diff --git a/.github/PULL_REQUEST_TEMPLATE/feature_stabilization.md b/.github/PULL_REQUEST_TEMPLATE/feature_stabilization.md index dd71eb2368f..2f566a07c6f 100644 --- a/.github/PULL_REQUEST_TEMPLATE/feature_stabilization.md +++ b/.github/PULL_REQUEST_TEMPLATE/feature_stabilization.md @@ -11,5 +11,5 @@ Feel free to link other pull requests or issues here. Describe the testing plan for this protocol and why you are confident that it is ready to be stabilized. # Checklist -- [ ] Link to nightly nayduck run (`./scripts/nayduck.py`, [docs](https://github.com/near/nearcore/blob/master/nightly/README.md#scheduling-a-run)): https://nayduck.near.org/ +- [ ] Link to nightly nayduck run (`./scripts/nayduck.py`, [docs](https://github.com/near/nearcore/blob/master/nightly/README.md#scheduling-a-run)): https://nayduck.nearone.org/ - [ ] Update CHANGELOG.md to include this protocol feature in the `Unreleased` section. diff --git a/.github/workflows/nightly_nayduck.yml b/.github/workflows/nightly_nayduck.yml index f692428f16b..0f22a8d55f8 100644 --- a/.github/workflows/nightly_nayduck.yml +++ b/.github/workflows/nightly_nayduck.yml @@ -15,7 +15,7 @@ jobs: # and check if there are any non-passing tests - name: Check if there are any non-passing tests run: | - NIGHTLY_RESULTS=$(curl -s https://nayduck.near.org/api/nightly-events) + NIGHTLY_RESULTS=$(curl -s https://nayduck.nearone.org/api/nightly-events) UNSUCCESSFUL_TESTS=$(jq -e '.tests | .[][] | select(.[2] != "PASSED" ) ' <<< ${NIGHTLY_RESULTS} ) if [ -z "$UNSUCCESSFUL_TESTS" ] ; then echo "Nightly Nayduck tests OK"; \ else echo "Nightly Nayduck tests are failing" && exit 1; fi diff --git a/CHANGELOG.md b/CHANGELOG.md index 1b1f7425d6f..9b2fc9d99ec 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,14 @@ ### Non-protocol Changes +## 1.39.0 + +### Protocol Changes + +* Use more precise gas costs for function calls [#10943](https://github.com/near/nearcore/pull/10943) that should lead to more efficient chunk utilization. + +### Non-protocol Changes + ## 1.37.0 ### Protocol Changes diff --git a/Cargo.lock b/Cargo.lock index 7f409565670..fff9689e142 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -23,7 +23,7 @@ dependencies = [ "pin-project-lite", "smallvec", "tokio", - "tokio-util 0.7.2", + "tokio-util", ] [[package]] @@ -40,7 +40,7 @@ dependencies = [ "memchr", "pin-project-lite", "tokio", - "tokio-util 0.7.2", + "tokio-util", ] [[package]] @@ -60,9 +60,9 @@ dependencies = [ [[package]] name = "actix-http" -version = "3.3.1" +version = "3.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2079246596c18b4a33e274ae10c0e50613f4d32a4198e09c7b93771013fed74" +checksum = "d223b13fd481fc0d1f83bb12659ae774d9e3601814c68a0bc539731698cca743" dependencies = [ "actix-codec", "actix-rt", @@ -70,7 +70,7 @@ dependencies = [ "actix-utils", "ahash 0.8.8", "base64 0.21.0", - "bitflags 1.3.2", + "bitflags 2.4.1", "brotli", "bytes", "bytestring", @@ -92,7 +92,7 @@ dependencies = [ "sha1", "smallvec", "tokio", - "tokio-util 0.7.2", + "tokio-util", "tracing", "zstd", ] @@ -177,7 +177,7 @@ dependencies = [ "openssl", "pin-project-lite", "tokio-openssl", - "tokio-util 0.7.2", + "tokio-util", ] [[package]] @@ -434,6 +434,12 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" +[[package]] +name = "arrayvec" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" + [[package]] name = "assert_matches" version = "1.5.0" @@ -489,6 +495,83 @@ dependencies = [ "wildmatch", ] +[[package]] +name = "aurora-engine-modexp" +version = "1.0.0" +source = "git+https://github.com/aurora-is-near/aurora-engine.git?tag=3.6.1#26a126231e1ce338598a7d9909779f32e4dce8a2" +dependencies = [ + "hex", + "num", +] + +[[package]] +name = "aurora-engine-precompiles" +version = "1.0.0" +source = "git+https://github.com/aurora-is-near/aurora-engine.git?tag=3.6.1#26a126231e1ce338598a7d9909779f32e4dce8a2" +dependencies = [ + "aurora-engine-modexp", + "aurora-engine-sdk", + "aurora-engine-types", + "ethabi", + "evm", + "hex", + "libsecp256k1", + "num", + "ripemd", + "sha2 0.10.6", + "sha3", + "zeropool-bn", +] + +[[package]] +name = "aurora-engine-sdk" +version = "1.0.0" +source = "git+https://github.com/aurora-is-near/aurora-engine.git?tag=3.6.1#26a126231e1ce338598a7d9909779f32e4dce8a2" +dependencies = [ + "aurora-engine-types", + "base64 0.21.0", + "sha2 0.10.6", + "sha3", +] + +[[package]] +name = "aurora-engine-transactions" +version = "1.0.0" +source = "git+https://github.com/aurora-is-near/aurora-engine.git?tag=3.6.1#26a126231e1ce338598a7d9909779f32e4dce8a2" +dependencies = [ + "aurora-engine-precompiles", + "aurora-engine-sdk", + "aurora-engine-types", + "evm", + "rlp", +] + +[[package]] +name = "aurora-engine-types" +version = "1.0.0" +source = "git+https://github.com/aurora-is-near/aurora-engine.git?tag=3.6.1#26a126231e1ce338598a7d9909779f32e4dce8a2" +dependencies = [ + "base64 0.21.0", + "borsh 0.10.3", + "bs58 0.5.1", + "hex", + "primitive-types 0.12.2", + "rlp", + "serde", + "serde_json", +] + +[[package]] +name = "auto_impl" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.32", +] + [[package]] name = "autocfg" version = "1.1.0" @@ -555,6 +638,51 @@ dependencies = [ "thiserror", ] +[[package]] +name = "axum" +version = "0.6.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" +dependencies = [ + "async-trait", + "axum-core", + "bitflags 1.3.2", + "bytes", + "futures-util", + "http", + "http-body", + "hyper", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "sync_wrapper", + "tower", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http", + "http-body", + "mime", + "rustversion", + "tower-layer", + "tower-service", +] + [[package]] name = "backtrace" version = "0.3.67" @@ -684,7 +812,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b64485778c4f16a6a5a9d335e80d449ac6c70cdd6a06d2af18a6f6f775a125b3" dependencies = [ "arrayref", - "arrayvec", + "arrayvec 0.5.2", "cc", "cfg-if 0.1.10", "constant_time_eq", @@ -692,6 +820,15 @@ dependencies = [ "digest 0.9.0", ] +[[package]] +name = "block-buffer" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" +dependencies = [ + "generic-array 0.14.5", +] + [[package]] name = "block-buffer" version = "0.10.2" @@ -805,6 +942,16 @@ dependencies = [ "hashbrown 0.11.2", ] +[[package]] +name = "borsh" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4114279215a005bc675e386011e594e1d9b800918cea18fcadadcce864a2046b" +dependencies = [ + "borsh-derive 0.10.3", + "hashbrown 0.13.2", +] + [[package]] name = "borsh" version = "1.0.0" @@ -821,8 +968,21 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6441c552f230375d18e3cc377677914d2ca2b0d36e52129fe15450a2dce46775" dependencies = [ - "borsh-derive-internal", - "borsh-schema-derive-internal", + "borsh-derive-internal 0.9.3", + "borsh-schema-derive-internal 0.9.3", + "proc-macro-crate 0.1.5", + "proc-macro2", + "syn 1.0.103", +] + +[[package]] +name = "borsh-derive" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0754613691538d51f329cce9af41d7b7ca150bc973056f1156611489475f54f7" +dependencies = [ + "borsh-derive-internal 0.10.3", + "borsh-schema-derive-internal 0.10.3", "proc-macro-crate 0.1.5", "proc-macro2", "syn 1.0.103", @@ -853,6 +1013,17 @@ dependencies = [ "syn 1.0.103", ] +[[package]] +name = "borsh-derive-internal" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afb438156919598d2c7bad7e1c0adf3d26ed3840dbc010db1a882a65583ca2fb" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.103", +] + [[package]] name = "borsh-schema-derive-internal" version = "0.9.3" @@ -864,6 +1035,17 @@ dependencies = [ "syn 1.0.103", ] +[[package]] +name = "borsh-schema-derive-internal" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "634205cc43f74a1b9046ef87c4540ebda95696ec0f315024860cad7c5b0f5ccd" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.103", +] + [[package]] name = "brotli" version = "3.3.4" @@ -891,12 +1073,28 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" +[[package]] +name = "bs58" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf88ba1141d185c399bee5288d850d63b8369520c1eafc32a0430b5b6c287bf4" +dependencies = [ + "sha2 0.10.6", + "tinyvec", +] + [[package]] name = "bumpalo" version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d261e256854913907f67ed06efbc3338dfe6179796deefc1ff763fc1aee5535" +[[package]] +name = "byte-slice-cast" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" + [[package]] name = "bytecheck" version = "0.6.8" @@ -1141,7 +1339,7 @@ version = "4.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9644cd56d6b87dbe899ef8b053e331c0637664e9e21a33dfcdc36093f5c5c4" dependencies = [ - "heck 0.4.0", + "heck", "proc-macro2", "quote", "syn 2.0.32", @@ -1236,6 +1434,8 @@ dependencies = [ "chrono", "clap", "csv", + "tracing", + "tracing-subscriber", ] [[package]] @@ -1813,7 +2013,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" dependencies = [ - "block-buffer", + "block-buffer 0.10.2", "crypto-common", "subtle", ] @@ -1933,7 +2133,7 @@ dependencies = [ "curve25519-dalek", "ed25519", "rand_core 0.6.4", - "sha2", + "sha2 0.10.6", "subtle", ] @@ -2063,6 +2263,114 @@ dependencies = [ "xshell", ] +[[package]] +name = "ethabi" +version = "18.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7413c5f74cc903ea37386a8965a936cbeb334bd270862fdece542c1b2dcbc898" +dependencies = [ + "ethereum-types", + "hex", + "once_cell", + "regex", + "serde", + "serde_json", + "sha3", + "thiserror", + "uint", +] + +[[package]] +name = "ethbloom" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c22d4b5885b6aa2fe5e8b9329fb8d232bf739e434e6b87347c63bdd00c120f60" +dependencies = [ + "crunchy", + "fixed-hash 0.8.0", + "impl-codec", + "impl-rlp", + "impl-serde", + "scale-info", + "tiny-keccak", +] + +[[package]] +name = "ethereum" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a89fb87a9e103f71b903b80b670200b54cc67a07578f070681f1fffb7396fb7" +dependencies = [ + "bytes", + "ethereum-types", + "hash-db", + "hash256-std-hasher", + "rlp", + "sha3", + "triehash", +] + +[[package]] +name = "ethereum-types" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02d215cbf040552efcbe99a38372fe80ab9d00268e20012b79fcd0f073edd8ee" +dependencies = [ + "ethbloom", + "fixed-hash 0.8.0", + "impl-codec", + "impl-rlp", + "impl-serde", + "primitive-types 0.12.2", + "scale-info", + "uint", +] + +[[package]] +name = "evm" +version = "0.39.1" +source = "git+https://github.com/aurora-is-near/sputnikvm.git?tag=v0.39.1#0334a09d6b6e83ff3a8da992e33f29ba95e0c9fe" +dependencies = [ + "auto_impl", + "ethereum", + "evm-core", + "evm-gasometer", + "evm-runtime", + "log", + "primitive-types 0.12.2", + "rlp", + "sha3", +] + +[[package]] +name = "evm-core" +version = "0.39.1" +source = "git+https://github.com/aurora-is-near/sputnikvm.git?tag=v0.39.1#0334a09d6b6e83ff3a8da992e33f29ba95e0c9fe" +dependencies = [ + "primitive-types 0.12.2", +] + +[[package]] +name = "evm-gasometer" +version = "0.39.1" +source = "git+https://github.com/aurora-is-near/sputnikvm.git?tag=v0.39.1#0334a09d6b6e83ff3a8da992e33f29ba95e0c9fe" +dependencies = [ + "evm-core", + "evm-runtime", + "primitive-types 0.12.2", +] + +[[package]] +name = "evm-runtime" +version = "0.39.1" +source = "git+https://github.com/aurora-is-near/sputnikvm.git?tag=v0.39.1#0334a09d6b6e83ff3a8da992e33f29ba95e0c9fe" +dependencies = [ + "auto_impl", + "evm-core", + "primitive-types 0.12.2", + "sha3", +] + [[package]] name = "expect-test" version = "1.3.0" @@ -2135,10 +2443,16 @@ dependencies = [ ] [[package]] -name = "fixedbitset" -version = "0.4.2" +name = "fixed-hash" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" +checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" +dependencies = [ + "byteorder", + "rand", + "rustc-hex", + "static_assertions", +] [[package]] name = "flagset" @@ -2418,15 +2732,15 @@ dependencies = [ [[package]] name = "glob" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" +checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "h2" -version = "0.3.24" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb2c4422095b67ee78da96fbb51a4cc413b3b25883c7717ff7ca1ab31022c9c9" +checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" dependencies = [ "bytes", "fnv", @@ -2437,7 +2751,7 @@ dependencies = [ "indexmap 2.0.0", "slab", "tokio", - "tokio-util 0.7.2", + "tokio-util", "tracing", ] @@ -2447,6 +2761,21 @@ version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" +[[package]] +name = "hash-db" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d23bd4e7b5eda0d0f3a307e8b381fdc8ba9000f26fbe912250c0a4cc3956364a" + +[[package]] +name = "hash256-std-hasher" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92c171d55b98633f4ed3860808f004099b36c1cc29c42cfc53aa8591b21efcf2" +dependencies = [ + "crunchy", +] + [[package]] name = "hashbrown" version = "0.11.2" @@ -2502,15 +2831,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "heck" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" -dependencies = [ - "unicode-segmentation", -] - [[package]] name = "heck" version = "0.4.0" @@ -2566,7 +2886,17 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "791a029f6b9fc27657f6f188ec6e5e43f6911f6f878e0dc5501396e09809d437" dependencies = [ - "hmac", + "hmac 0.12.1", +] + +[[package]] +name = "hmac" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "126888268dcc288495a26bf004b38c5fdbb31682f992c84ceb046a1f0fe38840" +dependencies = [ + "crypto-mac", + "digest 0.9.0", ] [[package]] @@ -2578,11 +2908,22 @@ dependencies = [ "digest 0.10.6", ] +[[package]] +name = "hmac-drbg" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17ea0a1394df5b6574da6e0c1ade9e78868c9fb0a4e5ef4428e32da4676b85b1" +dependencies = [ + "digest 0.9.0", + "generic-array 0.14.5", + "hmac 0.8.1", +] + [[package]] name = "http" -version = "0.2.7" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff8670570af52249509a86f5e3e18a08c60b177071826898fde8997cf5f6bfbb" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" dependencies = [ "bytes", "fnv", @@ -2591,9 +2932,9 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.4" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ff4f84919677303da5f147645dbea6b1881f368d03ac84e1dc09031ebd7b2c6" +checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ "bytes", "http", @@ -2602,9 +2943,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.7.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "496ce29bb5a52785b44e0f7ca2847ae0bb839c9bd28f69acac9b99d461c0c04c" +checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" [[package]] name = "httpdate" @@ -2614,9 +2955,9 @@ checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" [[package]] name = "hyper" -version = "0.14.18" +version = "0.14.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b26ae0a80afebe130861d90abf98e3814a4f28a4c6ffeb5ab8ebb2be311e0ef2" +checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80" dependencies = [ "bytes", "futures-channel", @@ -2715,6 +3056,44 @@ dependencies = [ "version_check", ] +[[package]] +name = "impl-codec" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" +dependencies = [ + "parity-scale-codec", +] + +[[package]] +name = "impl-rlp" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f28220f89297a075ddc7245cd538076ee98b01f2a9c23a53a4f1105d5a322808" +dependencies = [ + "rlp", +] + +[[package]] +name = "impl-serde" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc88fc67028ae3db0c853baa36269d398d5f45b6982f95549ff5def78c935cd" +dependencies = [ + "serde", +] + +[[package]] +name = "impl-trait-for-tuples" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.103", +] + [[package]] name = "indexer-example" version = "0.0.0" @@ -2788,12 +3167,15 @@ dependencies = [ "actix-rt", "anyhow", "assert_matches", + "aurora-engine-transactions", + "aurora-engine-types", "borsh 1.0.0", "bytesize", "chrono", "clap", "derive-enum-from-into", "derive_more", + "ethabi", "futures", "hex", "insta", @@ -2828,11 +3210,12 @@ dependencies = [ "node-runtime", "once_cell", "parking_lot 0.12.1", - "primitive-types", + "primitive-types 0.10.1", "rand", "rlp", "serde", "serde_json", + "sha3", "smart-default", "strum", "tempfile", @@ -2882,9 +3265,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.2" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "112c678d4050afce233f4f2852bb2eb519230b3cf12f33585275537d7e41578d" +checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "jobserver" @@ -2897,9 +3280,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.57" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "671a26f820db17c2a2750743f1dd03bafd15b98c9f30c7c2628c024c05d73397" +checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" dependencies = [ "wasm-bindgen", ] @@ -3010,6 +3393,54 @@ dependencies = [ "zstd-sys", ] +[[package]] +name = "libsecp256k1" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95b09eff1b35ed3b33b877ced3a691fc7a481919c7e29c53c906226fcf55e2a1" +dependencies = [ + "arrayref", + "base64 0.13.0", + "digest 0.9.0", + "hmac-drbg", + "libsecp256k1-core", + "libsecp256k1-gen-ecmult", + "libsecp256k1-gen-genmult", + "rand", + "serde", + "sha2 0.9.9", + "typenum", +] + +[[package]] +name = "libsecp256k1-core" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5be9b9bb642d8522a44d533eab56c16c738301965504753b03ad1de3425d5451" +dependencies = [ + "crunchy", + "digest 0.9.0", + "subtle", +] + +[[package]] +name = "libsecp256k1-gen-ecmult" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3038c808c55c87e8a172643a7d87187fc6c4174468159cb3090659d55bcb4809" +dependencies = [ + "libsecp256k1-core", +] + +[[package]] +name = "libsecp256k1-gen-genmult" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3db8d6ba2cec9eacc40e6e8ccc98931840301f1006e95647ceb2dd5c3aa06f7c" +dependencies = [ + "libsecp256k1-core", +] + [[package]] name = "libsqlite3-sys" version = "0.26.0" @@ -3168,6 +3599,12 @@ dependencies = [ "regex-automata", ] +[[package]] +name = "matchit" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" + [[package]] name = "maybe-async" version = "0.2.6" @@ -3358,12 +3795,6 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7843ec2de400bcbc6a6328c958dc38e5359da6e93e72e37bc5246bf1ae776389" -[[package]] -name = "multimap" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" - [[package]] name = "native-tls" version = "0.2.10" @@ -3413,7 +3844,7 @@ dependencies = [ "near-crypto", "near-primitives", "near-primitives-core", - "num-rational", + "num-rational 0.3.2", "serde", "serde_json", "tempfile", @@ -3436,7 +3867,7 @@ dependencies = [ "serde_json", "time", "tokio", - "tokio-util 0.7.2", + "tokio-util", ] [[package]] @@ -3492,9 +3923,9 @@ dependencies = [ "near-store", "near-vm-runner", "node-runtime", - "num-rational", + "num-rational 0.3.2", "once_cell", - "primitive-types", + "primitive-types 0.10.1", "rand", "rand_chacha", "rayon", @@ -3520,11 +3951,11 @@ dependencies = [ "near-o11y", "near-parameters", "near-primitives", - "num-rational", + "num-rational 0.3.2", "once_cell", "serde", "serde_json", - "sha2", + "sha2 0.10.6", "smart-default", "time", "tracing", @@ -3622,7 +4053,7 @@ dependencies = [ "near-store", "near-telemetry", "near-vm-runner", - "num-rational", + "num-rational 0.3.2", "once_cell", "percent-encoding", "rand", @@ -3680,7 +4111,7 @@ dependencies = [ "blake2", "bolero", "borsh 1.0.0", - "bs58", + "bs58 0.4.0", "curve25519-dalek", "derive_more", "ed25519-dalek", @@ -3690,11 +4121,11 @@ dependencies = [ "near-config-utils", "near-stdx", "once_cell", - "primitive-types", + "primitive-types 0.10.1", "secp256k1", "serde", "serde_json", - "sha2", + "sha2 0.10.6", "subtle", "tempfile", "thiserror", @@ -3753,9 +4184,9 @@ dependencies = [ "near-o11y", "near-primitives", "near-store", - "num-rational", + "num-rational 0.3.2", "once_cell", - "primitive-types", + "primitive-types 0.10.1", "rand", "rand_hc", "serde_json", @@ -3872,7 +4303,7 @@ dependencies = [ "actix", "actix-cors", "actix-web", - "bs58", + "bs58 0.4.0", "derive_more", "easy-ext", "futures", @@ -3995,7 +4426,7 @@ dependencies = [ "anyhow", "async-trait", "borsh 1.0.0", - "bs58", + "bs58 0.4.0", "clap", "ed25519-dalek", "hex", @@ -4023,7 +4454,7 @@ dependencies = [ "secp256k1", "serde", "serde_json", - "sha2", + "sha2 0.10.6", "strum", "thiserror", "tokio", @@ -4072,7 +4503,7 @@ dependencies = [ "rayon", "rlimit", "serde", - "sha2", + "sha2 0.10.6", "smart-default", "strum", "stun", @@ -4081,7 +4512,7 @@ dependencies = [ "time", "tokio", "tokio-stream", - "tokio-util 0.7.2", + "tokio-util", "tracing", "turn", "webrtc-util", @@ -4103,6 +4534,7 @@ dependencies = [ "opentelemetry", "opentelemetry-otlp", "opentelemetry-semantic-conventions", + "opentelemetry_sdk", "prometheus", "serde", "serde_json", @@ -4127,7 +4559,7 @@ dependencies = [ "insta", "near-account-id", "near-primitives-core", - "num-rational", + "num-rational 0.3.2", "serde", "serde_repr", "serde_yaml", @@ -4147,7 +4579,7 @@ dependencies = [ "libc", "once_cell", "tokio", - "tokio-util 0.7.2", + "tokio-util", "tracing", ] @@ -4200,6 +4632,7 @@ dependencies = [ "bencher", "bolero", "borsh 1.0.0", + "bytes", "bytesize", "cfg-if 1.0.0", "chrono", @@ -4218,9 +4651,9 @@ dependencies = [ "near-rpc-error-macro", "near-stdx", "near-vm-runner", - "num-rational", + "num-rational 0.3.2", "once_cell", - "primitive-types", + "primitive-types 0.10.1", "rand", "rand_chacha", "reed-solomon-erasure", @@ -4233,6 +4666,7 @@ dependencies = [ "strum", "thiserror", "tracing", + "zstd", ] [[package]] @@ -4242,17 +4676,17 @@ dependencies = [ "arbitrary", "base64 0.21.0", "borsh 1.0.0", - "bs58", + "bs58 0.4.0", "derive_more", "enum-map", "expect-test", "insta", "near-account-id", - "num-rational", + "num-rational 0.3.2", "serde", "serde_json", "serde_repr", - "sha2", + "sha2 0.10.6", "thiserror", ] @@ -4324,7 +4758,7 @@ dependencies = [ "near-ping", "near-primitives", "once_cell", - "sha2", + "sha2 0.10.6", "tokio", "tracing", ] @@ -4547,7 +4981,7 @@ dependencies = [ "near-vm-engine", "near-vm-types", "near-vm-vm", - "num-rational", + "num-rational 0.3.2", "once_cell", "parity-wasm 0.41.0", "parity-wasm 0.42.2", @@ -4559,7 +4993,7 @@ dependencies = [ "serde", "serde_json", "serde_repr", - "sha2", + "sha2 0.10.6", "sha3", "strum", "tempfile", @@ -4728,9 +5162,9 @@ dependencies = [ "near-telemetry", "near-vm-runner", "node-runtime", - "num-rational", + "num-rational 0.3.2", "once_cell", - "primitive-types", + "primitive-types 0.10.1", "rand", "rayon", "regex", @@ -4842,14 +5276,14 @@ dependencies = [ "near-vm-runner", "near-wallet-contract", "num-bigint 0.3.3", - "num-rational", + "num-rational 0.3.2", "num-traits", "once_cell", "rand", "rayon", "serde", "serde_json", - "sha2", + "sha2 0.10.6", "tempfile", "testlib", "thiserror", @@ -4885,6 +5319,20 @@ dependencies = [ "winapi", ] +[[package]] +name = "num" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b05180d69e3da0e530ba2a1dae5110317e49e3b7f3d41be227dc5f92e49ee7af" +dependencies = [ + "num-bigint 0.4.3", + "num-complex", + "num-integer", + "num-iter", + "num-rational 0.4.1", + "num-traits", +] + [[package]] name = "num-bigint" version = "0.2.6" @@ -4907,6 +5355,26 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-bigint" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-complex" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23c6602fda94a57c990fe0df199a035d83576b496aa29f4e634a8ac6004e68a6" +dependencies = [ + "num-traits", +] + [[package]] name = "num-integer" version = "0.1.45" @@ -4917,6 +5385,17 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-iter" +version = "0.1.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d869c01cc0c455284163fd0092f1f93835385ccab5a98a0dcc497b2f8bf055a9" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + [[package]] name = "num-rational" version = "0.3.2" @@ -4930,6 +5409,18 @@ dependencies = [ "serde", ] +[[package]] +name = "num-rational" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0638a1c9d0a3c0914158145bc76cff373a75a627e6ecbfb71cbe6f453a5a19b0" +dependencies = [ + "autocfg", + "num-bigint 0.4.3", + "num-integer", + "num-traits", +] + [[package]] name = "num-traits" version = "0.2.15" @@ -5032,37 +5523,90 @@ version = "300.1.6+3.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "439fac53e092cd7442a3660c85dde4643ab3b5bd39040912388dcdabf6b88085" dependencies = [ - "cc", + "cc", +] + +[[package]] +name = "openssl-sys" +version = "0.9.96" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3812c071ba60da8b5677cc12bcb1d42989a65553772897a7e0355545a819838f" +dependencies = [ + "cc", + "libc", + "openssl-src", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "opentelemetry" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "900d57987be3f2aeb70d385fff9b27fb74c5723cc9a52d904d4f9c807a0667bf" +dependencies = [ + "futures-core", + "futures-sink", + "js-sys", + "once_cell", + "pin-project-lite", + "thiserror", + "urlencoding", +] + +[[package]] +name = "opentelemetry-otlp" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a016b8d9495c639af2145ac22387dcb88e44118e45320d9238fbf4e7889abcb" +dependencies = [ + "async-trait", + "futures-core", + "http", + "opentelemetry", + "opentelemetry-proto", + "opentelemetry-semantic-conventions", + "opentelemetry_sdk", + "prost", + "thiserror", + "tokio", + "tonic", +] + +[[package]] +name = "opentelemetry-proto" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a8fddc9b68f5b80dae9d6f510b88e02396f006ad48cac349411fbecc80caae4" +dependencies = [ + "opentelemetry", + "opentelemetry_sdk", + "prost", + "tonic", ] [[package]] -name = "openssl-sys" -version = "0.9.96" +name = "opentelemetry-semantic-conventions" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3812c071ba60da8b5677cc12bcb1d42989a65553772897a7e0355545a819838f" -dependencies = [ - "cc", - "libc", - "openssl-src", - "pkg-config", - "vcpkg", -] +checksum = "f9ab5bd6c42fb9349dcf28af2ba9a0667f697f9bdcca045d39f2cec5543e2910" [[package]] -name = "opentelemetry" -version = "0.17.0" +name = "opentelemetry_sdk" +version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6105e89802af13fdf48c49d7646d3b533a70e536d818aae7e78ba0433d01acb8" +checksum = "9e90c7113be649e31e9a0f8b5ee24ed7a16923b322c3c5ab6367469c049d6b7e" dependencies = [ "async-trait", "crossbeam-channel", "futures-channel", "futures-executor", "futures-util", - "js-sys", - "lazy_static", + "glob", + "once_cell", + "opentelemetry", + "ordered-float", "percent-encoding", - "pin-project", "rand", "thiserror", "tokio", @@ -5070,30 +5614,12 @@ dependencies = [ ] [[package]] -name = "opentelemetry-otlp" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d1a6ca9de4c8b00aa7f1a153bd76cb263287155cec642680d79d98706f3d28a" -dependencies = [ - "async-trait", - "futures", - "futures-util", - "http", - "opentelemetry", - "prost", - "thiserror", - "tokio", - "tonic", - "tonic-build", -] - -[[package]] -name = "opentelemetry-semantic-conventions" -version = "0.9.0" +name = "ordered-float" +version = "4.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "985cc35d832d412224b2cffe2f9194b1b89b6aa5d0bef76d080dce09d90e62bd" +checksum = "a76df7075c7d4d01fdcb46c912dd17fba5b60c78ea480b475f2b6ab6f666584e" dependencies = [ - "opentelemetry", + "num-traits", ] [[package]] @@ -5183,7 +5709,7 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a0e23a129dc95a45661cbbfac1b8f865094131da7937738a343d00f47d87fada" dependencies = [ - "heck 0.4.0", + "heck", "http", "lazy_static", "mime", @@ -5195,6 +5721,32 @@ dependencies = [ "syn 1.0.103", ] +[[package]] +name = "parity-scale-codec" +version = "3.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0dec8a8073036902368c2cdc0387e85ff9a37054d7e7c98e592145e0c92cd4fb" +dependencies = [ + "arrayvec 0.7.4", + "bitvec", + "byte-slice-cast", + "impl-trait-for-tuples", + "parity-scale-codec-derive", + "serde", +] + +[[package]] +name = "parity-scale-codec-derive" +version = "3.6.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be30eaf4b0a9fba5336683b38de57bb86d179a35862ba6bfcf57625d006bde5b" +dependencies = [ + "proc-macro-crate 2.0.2", + "proc-macro2", + "quote", + "syn 1.0.103", +] + [[package]] name = "parity-wasm" version = "0.41.0" @@ -5327,34 +5879,24 @@ dependencies = [ "sha1", ] -[[package]] -name = "petgraph" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6d5014253a1331579ce62aa67443b4a658c5e7dd03d4bc6d302b94474888143" -dependencies = [ - "fixedbitset", - "indexmap 1.9.2", -] - [[package]] name = "pin-project" -version = "1.0.10" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58ad3879ad3baf4e44784bc6a718a8698867bb991f8ce24d1bcbe2cfb4c3a75e" +checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.10" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "744b6f092ba29c3650faf274db506afd39944f48420f6c86b17cfe0ee1cb36bb" +checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 1.0.103", + "syn 2.0.32", ] [[package]] @@ -5425,7 +5967,21 @@ version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05e4722c697a58a99d5d06a08c30821d7c082a4632198de1eaa5a6c22ef42373" dependencies = [ - "fixed-hash", + "fixed-hash 0.7.0", + "uint", +] + +[[package]] +name = "primitive-types" +version = "0.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b34d9fd68ae0b74a41b21c03c2f62847aa0ffea044eee893b4c140b37e244e2" +dependencies = [ + "fixed-hash 0.8.0", + "impl-codec", + "impl-rlp", + "impl-serde", + "scale-info", "uint", ] @@ -5445,7 +6001,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" dependencies = [ "once_cell", - "toml_edit", + "toml_edit 0.19.15", +] + +[[package]] +name = "proc-macro-crate" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b00f26d3400549137f92511a46ac1cd8ce37cb5598a96d382381458b992a5d24" +dependencies = [ + "toml_datetime", + "toml_edit 0.20.2", ] [[package]] @@ -5504,55 +6070,25 @@ dependencies = [ [[package]] name = "prost" -version = "0.9.0" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "444879275cb4fd84958b1a1d5420d15e6fcf7c235fe47f053c9c2a80aceb6001" +checksum = "d0f5d036824e4761737860779c906171497f6d55681139d8312388f8fe398922" dependencies = [ "bytes", "prost-derive", ] -[[package]] -name = "prost-build" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62941722fb675d463659e49c4f3fe1fe792ff24fe5bbaa9c08cd3b98a1c354f5" -dependencies = [ - "bytes", - "heck 0.3.3", - "itertools", - "lazy_static", - "log", - "multimap", - "petgraph", - "prost", - "prost-types", - "regex", - "tempfile", - "which", -] - [[package]] name = "prost-derive" -version = "0.9.0" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9cc1a3263e07e0bf68e96268f37665207b49560d98739662cdfaae215c720fe" +checksum = "19de2de2a00075bf566bee3bd4db014b11587e84184d3f7a791bc17f1a8e9e48" dependencies = [ "anyhow", "itertools", "proc-macro2", "quote", - "syn 1.0.103", -] - -[[package]] -name = "prost-types" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "534b7a0e836e3c482d2693070f982e39e7611da9695d4d1f5a4b186b51faef0a" -dependencies = [ - "bytes", - "prost", + "syn 2.0.32", ] [[package]] @@ -5902,7 +6438,7 @@ dependencies = [ "serde_urlencoded", "tokio", "tokio-native-tls", - "tokio-util 0.7.2", + "tokio-util", "tower-service", "url", "wasm-bindgen", @@ -5992,9 +6528,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bb919243f34364b6bd2fc10ef797edbfa75f33c252e7998527479c6d6b47e1ec" dependencies = [ "bytes", + "rlp-derive", "rustc-hex", ] +[[package]] +name = "rlp-derive" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e33d7b2abe0c340d8797fe2907d3f20d3b5ea5908683618bfe80df7f621f672a" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.103", +] + [[package]] name = "rocksdb" version = "0.21.0" @@ -6011,7 +6559,7 @@ version = "0.0.0" dependencies = [ "anyhow", "borsh 1.0.0", - "bs58", + "bs58 0.4.0", "bytesize", "cfg-if 1.0.0", "chrono", @@ -6032,7 +6580,7 @@ dependencies = [ "near-vm-runner", "nearcore", "node-runtime", - "num-rational", + "num-rational 0.3.2", "num-traits", "rand", "rand_xorshift", @@ -6120,7 +6668,7 @@ dependencies = [ "block_on_proc", "cfg-if 1.0.0", "hex", - "hmac", + "hmac 0.12.1", "http", "log", "maybe-async", @@ -6131,7 +6679,7 @@ dependencies = [ "serde", "serde-xml-rs", "serde_derive", - "sha2", + "sha2 0.10.6", "thiserror", "time", "tokio", @@ -6218,9 +6766,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.6" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2cc38e8fa666e2de3c4aba7edeb5ffc5246c1c2ed0e3d17e560aeeba736b23f" +checksum = "80af6f9131f277a45a3fba6ce8e2258037bb0477a67e610d3c1fe046ab31de47" [[package]] name = "rxml" @@ -6256,6 +6804,30 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "scale-info" +version = "2.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ef2175c2907e7c8bc0a9c3f86aeb5ec1f3b275300ad58a44d0c3ae379a5e52e" +dependencies = [ + "cfg-if 1.0.0", + "derive_more", + "parity-scale-codec", + "scale-info-derive", +] + +[[package]] +name = "scale-info-derive" +version = "2.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "634d9b8eb8fd61c5cdd3390d9b2132300a7e7618955b98b8416f118c1b4e144f" +dependencies = [ + "proc-macro-crate 1.3.1", + "proc-macro2", + "quote", + "syn 1.0.103", +] + [[package]] name = "schannel" version = "0.1.19" @@ -6525,6 +7097,19 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae1a47186c03a32177042e55dbc5fd5aee900b8e0069a8d70fba96a9375cd012" +[[package]] +name = "sha2" +version = "0.9.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" +dependencies = [ + "block-buffer 0.9.0", + "cfg-if 1.0.0", + "cpufeatures", + "digest 0.9.0", + "opaque-debug", +] + [[package]] name = "sha2" version = "0.10.6" @@ -6823,7 +7408,7 @@ version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6878079b17446e4d3eba6192bb0a2950d5b14f0ed8424b852310e5a94345d0ef" dependencies = [ - "heck 0.4.0", + "heck", "proc-macro2", "quote", "rustversion", @@ -6889,6 +7474,12 @@ dependencies = [ "syn 2.0.32", ] +[[package]] +name = "sync_wrapper" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" + [[package]] name = "sysinfo" version = "0.24.5" @@ -7057,6 +7648,15 @@ dependencies = [ "time-core", ] +[[package]] +name = "tiny-keccak" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" +dependencies = [ + "crunchy", +] + [[package]] name = "tinytemplate" version = "1.2.1" @@ -7156,20 +7756,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "tokio-util" -version = "0.6.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36943ee01a6d67977dd3f84a5a1d2efeb4ada3a1ae771cadfaa535d9d9fc6507" -dependencies = [ - "bytes", - "futures-core", - "futures-sink", - "log", - "pin-project-lite", - "tokio", -] - [[package]] name = "tokio-util" version = "0.7.2" @@ -7210,18 +7796,28 @@ dependencies = [ "winnow", ] +[[package]] +name = "toml_edit" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "396e4d48bbb2b7554c944bde63101b5ae446cff6ec4a24227428f15eb72ef338" +dependencies = [ + "indexmap 2.0.0", + "toml_datetime", + "winnow", +] + [[package]] name = "tonic" -version = "0.6.2" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff08f4649d10a70ffa3522ca559031285d8e421d727ac85c60825761818f5d0a" +checksum = "76c4eb7a4e9ef9d4763600161f12f5070b92a578e1b634db88a6887844c91a13" dependencies = [ "async-stream", "async-trait", - "base64 0.13.0", + "axum", + "base64 0.21.0", "bytes", - "futures-core", - "futures-util", "h2", "http", "http-body", @@ -7230,27 +7826,12 @@ dependencies = [ "percent-encoding", "pin-project", "prost", - "prost-derive", "tokio", "tokio-stream", - "tokio-util 0.6.10", "tower", "tower-layer", "tower-service", "tracing", - "tracing-futures", -] - -[[package]] -name = "tonic-build" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9403f1bafde247186684b230dc6f38b5cd514584e8bec1dd32514be4745fa757" -dependencies = [ - "proc-macro2", - "prost-build", - "quote", - "syn 1.0.103", ] [[package]] @@ -7267,7 +7848,7 @@ dependencies = [ "rand", "slab", "tokio", - "tokio-util 0.7.2", + "tokio-util", "tower-layer", "tower-service", "tracing", @@ -7275,9 +7856,9 @@ dependencies = [ [[package]] name = "tower-layer" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "343bc9466d3fe6b0f960ef45960509f84480bf4fd96f92901afe7ff3df9d3a62" +checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" [[package]] name = "tower-service" @@ -7296,11 +7877,10 @@ dependencies = [ [[package]] name = "tracing" -version = "0.1.36" +version = "0.1.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fce9567bd60a67d08a16488756721ba392f24f29006402881e43b19aac64307" +checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ - "cfg-if 1.0.0", "log", "pin-project-lite", "tracing-attributes", @@ -7309,57 +7889,37 @@ dependencies = [ [[package]] name = "tracing-appender" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09d48f71a791638519505cefafe162606f706c25592e4bde4d97600c0195312e" +checksum = "3566e8ce28cc0a3fe42519fc80e6b4c943cc4c8cef275620eb8dac2d3d4e06cf" dependencies = [ "crossbeam-channel", + "thiserror", "time", "tracing-subscriber", ] [[package]] name = "tracing-attributes" -version = "0.1.22" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11c75893af559bc8e10716548bdef5cb2b983f8e637db9d0e15126b61b484ee2" +checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 1.0.103", + "syn 2.0.32", ] [[package]] name = "tracing-core" -version = "0.1.30" +version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" +checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" dependencies = [ "once_cell", "valuable", ] -[[package]] -name = "tracing-futures" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" -dependencies = [ - "pin-project", - "tracing", -] - -[[package]] -name = "tracing-log" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922" -dependencies = [ - "lazy_static", - "log", - "tracing-core", -] - [[package]] name = "tracing-log" version = "0.2.0" @@ -7373,16 +7933,20 @@ dependencies = [ [[package]] name = "tracing-opentelemetry" -version = "0.17.4" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbbe89715c1dbbb790059e2565353978564924ee85017b5fff365c872ff6721f" +checksum = "a9be14ba1bbe4ab79e9229f7f89fab8d120b865859f10527f31c033e599d2284" dependencies = [ + "js-sys", "once_cell", "opentelemetry", + "opentelemetry_sdk", + "smallvec", "tracing", "tracing-core", - "tracing-log 0.1.3", + "tracing-log", "tracing-subscriber", + "web-time", ] [[package]] @@ -7410,7 +7974,17 @@ dependencies = [ "thread_local", "tracing", "tracing-core", - "tracing-log 0.2.0", + "tracing-log", +] + +[[package]] +name = "triehash" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1631b201eb031b563d2e85ca18ec8092508e262a3196ce9bd10a67ec87b9f5c" +dependencies = [ + "hash-db", + "rlp", ] [[package]] @@ -7467,9 +8041,9 @@ checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" [[package]] name = "uint" -version = "0.9.3" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12f03af7ccf01dd611cc450a0d10dbc9b745770d096473e2faf0ca6e2d66d1e0" +checksum = "76f64bba2c53b04fcab63c01a7d7427eadc821e3bc48c34dc9ba29c501164b52" dependencies = [ "byteorder", "crunchy", @@ -7507,12 +8081,6 @@ dependencies = [ "tinyvec", ] -[[package]] -name = "unicode-segmentation" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e8820f5d777f6224dc4be3632222971ac30164d4a258d595640799554ebfd99" - [[package]] name = "unicode-width" version = "0.1.9" @@ -7542,6 +8110,12 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "urlencoding" +version = "2.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" + [[package]] name = "utf8parse" version = "0.2.1" @@ -7613,9 +8187,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.80" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27370197c907c55e3f1a9fbe26f44e937fe6451368324e009cba39e139dc08ad" +checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", @@ -7623,16 +8197,16 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.80" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53e04185bfa3a779273da532f5025e33398409573f348985af9a1cbf3774d3f4" +checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" dependencies = [ "bumpalo", - "lazy_static", "log", + "once_cell", "proc-macro2", "quote", - "syn 1.0.103", + "syn 2.0.32", "wasm-bindgen-shared", ] @@ -7650,9 +8224,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.80" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17cae7ff784d7e83a2fe7611cfe766ecf034111b49deb850a3dc7699c08251f5" +checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -7660,22 +8234,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.80" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99ec0dc7a4756fffc231aab1b9f2f578d23cd391390ab27f952ae0c9b3ece20b" +checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 1.0.103", + "syn 2.0.32", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.80" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d554b7f530dee5964d9a9468d95c1f8b8acae4f282807e7d27d4b03099a46744" +checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" [[package]] name = "wasm-encoder" @@ -8195,6 +8769,16 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "web-time" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + [[package]] name = "webrtc-util" version = "0.7.0" @@ -8567,29 +9151,28 @@ dependencies = [ [[package]] name = "zstd" -version = "0.12.3+zstd.1.5.2" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76eea132fb024e0e13fd9c2f5d5d595d8a967aa72382ac2f9d39fcc95afd0806" +checksum = "2d789b1514203a1120ad2429eae43a7bd32b90976a7bb8a05f7ec02fa88cc23a" dependencies = [ "zstd-safe", ] [[package]] name = "zstd-safe" -version = "6.0.3+zstd.1.5.2" +version = "7.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68e4a3f57d13d0ab7e478665c60f35e2a613dcd527851c2c7287ce5c787e134a" +checksum = "1cd99b45c6bc03a018c8b8a86025678c87e55526064e38f9df301989dce7ec0a" dependencies = [ - "libc", "zstd-sys", ] [[package]] name = "zstd-sys" -version = "2.0.1+zstd.1.5.2" +version = "2.0.10+zstd.1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fd07cbbc53846d9145dbffdf6dd09a7a0aa52be46741825f5c97bdd4f73f12b" +checksum = "c253a4914af5bafc8fa8c86ee400827e83cf6ec01195ec1f1ed8441bf00d65aa" dependencies = [ "cc", - "libc", + "pkg-config", ] diff --git a/Cargo.toml b/Cargo.toml index d7abc7a388b..abebd1ad1e2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -113,7 +113,7 @@ unnecessary_lazy_evaluations = "deny" [workspace.dependencies] actix = "0.13.0" actix-cors = "0.6.1" -actix-http = "3.3" +actix-http = "3.6" actix-rt = "2" actix-web = "4.1" anyhow = "1.0.62" @@ -121,6 +121,8 @@ arbitrary = { version = "1.2.3", features = ["derive"] } arc-swap = "1.5" assert_matches = "1.5.0" async-trait = "0.1.58" +aurora-engine-transactions = { git = "https://github.com/aurora-is-near/aurora-engine.git", tag = "3.6.1" } +aurora-engine-types = { git = "https://github.com/aurora-is-near/aurora-engine.git", tag = "3.6.1" } awc = { version = "3", features = ["openssl"] } backtrace = "0.3" base64 = "0.21" @@ -170,6 +172,7 @@ ed25519-dalek = { version = "2.1.0", default-features = false, features = [ elastic-array = "0.11" enum-map = "2.1.0" enumset = "1.0" +ethabi = "18" expect-test = "1.3.0" finite-wasm = "0.5.0" fs2 = "0.4" @@ -275,9 +278,10 @@ num-traits = "0.2.15" once_cell = "1.13.1" openssl = { version = "0.10.60", features = ["vendored"] } openssl-probe = "0.1.4" -opentelemetry = { version = "0.17.0", features = ["rt-tokio", "trace"] } -opentelemetry-otlp = "0.10.0" -opentelemetry-semantic-conventions = "0.9.0" +opentelemetry = { version = "0.22.0", features = ["trace"] } +opentelemetry_sdk = { version = "0.22.0", features = ["rt-tokio"] } +opentelemetry-otlp = "0.15.0" +opentelemetry-semantic-conventions = "0.14.0" paperclip = { version = "0.8.0", features = ["actix4"] } parity-wasm = { version = "0.42", default-features = false } parity-wasm_41 = { package = "parity-wasm", version = "0.41" } @@ -358,11 +362,11 @@ tokio-stream = { version = "0.1.2", features = ["net"] } tokio-util = { version = "0.7.1", features = ["codec", "io"] } toml = "0.5.8" tqdm = "0.4.4" -tracing = { version = "0.1.36", features = ["std"] } -tracing-appender = "0.2.2" -tracing-opentelemetry = "0.17.0" +tracing = { version = "0.1.40", features = ["std"] } +tracing-appender = "0.2.3" +tracing-opentelemetry = "0.23.0" tracing-span-tree = "0.1" -tracing-subscriber = { version = "0.3.15", features = [ +tracing-subscriber = { version = "0.3.18", features = [ "env-filter", "fmt", "registry", @@ -402,6 +406,7 @@ winapi = { version = "0.3", features = [ xshell = "0.2.1" xz2 = "0.1.6" yansi = "0.5.1" +zstd = "0.13.1" stdx = { package = "near-stdx", path = "utils/stdx" } diff --git a/chain/chain/src/chain.rs b/chain/chain/src/chain.rs index e8e7500a194..4d14e602ec6 100644 --- a/chain/chain/src/chain.rs +++ b/chain/chain/src/chain.rs @@ -521,7 +521,7 @@ impl Chain { }) .cloned() .collect(); - runtime_adapter.load_mem_tries_on_startup(&tracked_shards)?; + runtime_adapter.get_tries().load_mem_tries_for_enabled_shards(&tracked_shards)?; info!(target: "chain", "Init: header head @ #{} {}; block head @ #{} {}", header_head.height, header_head.last_block_hash, @@ -1744,6 +1744,7 @@ impl Chain { }); } + #[tracing::instrument(level = "debug", target = "chain", "postprocess_block_only", skip_all)] fn postprocess_block_only( &mut self, me: &Option, @@ -1751,9 +1752,18 @@ impl Chain { block_preprocess_info: BlockPreprocessInfo, apply_results: Vec<(ShardId, Result)>, ) -> Result, Error> { + // Save state transition data to the database only if it might later be needed + // for generating a state witness. Storage space optimization. + let should_save_state_transition_data = + self.should_produce_state_witness_for_this_or_next_epoch(me, block.header())?; let mut chain_update = self.chain_update(); - let new_head = - chain_update.postprocess_block(me, &block, block_preprocess_info, apply_results)?; + let new_head = chain_update.postprocess_block( + me, + &block, + block_preprocess_info, + apply_results, + should_save_state_transition_data, + )?; chain_update.commit()?; Ok(new_head) } @@ -2725,59 +2735,61 @@ impl Chain { Ok(()) } + pub fn schedule_load_memtrie( + &self, + shard_uid: ShardUId, + sync_hash: CryptoHash, + chunk: &ShardChunk, + load_memtrie_scheduler: &near_async::messaging::Sender, + ) { + load_memtrie_scheduler.send(LoadMemtrieRequest { + runtime_adapter: self.runtime_adapter.clone(), + shard_uid, + prev_state_root: chunk.prev_state_root(), + sync_hash, + }); + } + + pub fn create_flat_storage_for_shard( + &self, + shard_uid: ShardUId, + chunk: &ShardChunk, + ) -> Result<(), Error> { + let flat_storage_manager = self.runtime_adapter.get_flat_storage_manager(); + // Flat storage must not exist at this point because leftover keys corrupt its state. + assert!(flat_storage_manager.get_flat_storage_for_shard(shard_uid).is_none()); + + let flat_head_hash = *chunk.prev_block(); + let flat_head_header = self.get_block_header(&flat_head_hash)?; + let flat_head_prev_hash = *flat_head_header.prev_hash(); + let flat_head_height = flat_head_header.height(); + + tracing::debug!(target: "store", ?shard_uid, ?flat_head_hash, flat_head_height, "set_state_finalize - initialized flat storage"); + + let mut store_update = self.runtime_adapter.store().store_update(); + store_helper::set_flat_storage_status( + &mut store_update, + shard_uid, + FlatStorageStatus::Ready(FlatStorageReadyStatus { + flat_head: near_store::flat::BlockInfo { + hash: flat_head_hash, + prev_hash: flat_head_prev_hash, + height: flat_head_height, + }, + }), + ); + store_update.commit()?; + flat_storage_manager.create_flat_storage_for_shard(shard_uid).unwrap(); + Ok(()) + } + pub fn set_state_finalize( &mut self, shard_id: ShardId, sync_hash: CryptoHash, - apply_result: Result<(), near_chain_primitives::Error>, ) -> Result<(), Error> { let _span = tracing::debug_span!(target: "sync", "set_state_finalize").entered(); - apply_result?; - let shard_state_header = self.get_state_header(shard_id, sync_hash)?; - let chunk = shard_state_header.cloned_chunk(); - - let block_hash = chunk.prev_block(); - - // We synced shard state on top of _previous_ block for chunk in shard state header and applied state parts to - // flat storage. Now we can set flat head to hash of this block and create flat storage. - // If block_hash is equal to default - this means that we're all the way back at genesis. - // So we don't have to add the storage state for shard in such case. - // TODO(8438) - add additional test scenarios for this case. - if *block_hash != CryptoHash::default() { - let block_header = self.get_block_header(block_hash)?; - let epoch_id = block_header.epoch_id(); - let shard_uid = self.epoch_manager.shard_id_to_uid(shard_id, epoch_id)?; - - let flat_storage_manager = self.runtime_adapter.get_flat_storage_manager(); - // Flat storage must not exist at this point because leftover keys corrupt its state. - assert!(flat_storage_manager.get_flat_storage_for_shard(shard_uid).is_none()); - - let flat_head_hash = *chunk.prev_block(); - let flat_head_header = self.get_block_header(&flat_head_hash)?; - let flat_head_prev_hash = *flat_head_header.prev_hash(); - let flat_head_height = flat_head_header.height(); - - tracing::debug!(target: "store", ?shard_uid, ?flat_head_hash, flat_head_height, "set_state_finalize - initialized flat storage"); - - let mut store_update = self.runtime_adapter.store().store_update(); - store_helper::set_flat_storage_status( - &mut store_update, - shard_uid, - FlatStorageStatus::Ready(FlatStorageReadyStatus { - flat_head: near_store::flat::BlockInfo { - hash: flat_head_hash, - prev_hash: flat_head_prev_hash, - height: flat_head_height, - }, - }), - ); - store_update.commit()?; - flat_storage_manager.create_flat_storage_for_shard(shard_uid).unwrap(); - // Flat storage is ready, load memtrie if it is enabled. - self.runtime_adapter.load_mem_trie_on_catchup(&shard_uid, &chunk.prev_state_root())?; - } - let mut height = shard_state_header.chunk_height_included(); let mut chain_update = self.chain_update(); chain_update.set_state_finalize(shard_id, sync_hash, shard_state_header)?; @@ -2969,9 +2981,17 @@ impl Chain { results: Vec>, ) -> Result<(), Error> { let block = self.chain_store.get_block(block_hash)?; + // Save state transition data to the database only if it might later be needed + // for generating a state witness. Storage space optimization. + let should_save_state_transition_data = + self.should_produce_state_witness_for_this_or_next_epoch(me, block.header())?; let mut chain_update = self.chain_update(); let results = results.into_iter().collect::, Error>>()?; - chain_update.apply_chunk_postprocessing(&block, results)?; + chain_update.apply_chunk_postprocessing( + &block, + results, + should_save_state_transition_data, + )?; chain_update.commit()?; let epoch_id = block.header().epoch_id(); @@ -3060,29 +3080,18 @@ impl Chain { Ok(self.chain_store.get_outcomes_by_id(id)?.into_iter().map(Into::into).collect()) } - fn get_recursive_transaction_results( - &self, - outcomes: &mut Vec, - id: &CryptoHash, - ) -> Result<(), Error> { - outcomes.push(ExecutionOutcomeWithIdView::from(self.get_execution_outcome(id)?)); - let outcome_idx = outcomes.len() - 1; - for idx in 0..outcomes[outcome_idx].outcome.receipt_ids.len() { - let id = outcomes[outcome_idx].outcome.receipt_ids[idx]; - self.get_recursive_transaction_results(outcomes, &id)?; - } - Ok(()) - } - - pub fn get_final_transaction_result( + /// Returns execution status based on the list of currently existing outcomes + fn get_execution_status( &self, + outcomes: &[ExecutionOutcomeWithIdView], transaction_hash: &CryptoHash, - ) -> Result { - let mut outcomes = Vec::new(); - self.get_recursive_transaction_results(&mut outcomes, transaction_hash)?; + ) -> FinalExecutionStatus { + if outcomes.is_empty() { + return FinalExecutionStatus::NotStarted; + } let mut looking_for_id = *transaction_hash; let num_outcomes = outcomes.len(); - let status = outcomes + outcomes .iter() .find_map(|outcome_with_id| { if outcome_with_id.id == looking_for_id { @@ -3106,7 +3115,39 @@ impl Chain { None } }) - .expect("results should resolve to a final outcome"); + .unwrap_or_else(|| FinalExecutionStatus::Started) + } + + /// Collect all the execution outcomes existing at the current moment + /// Fails if there are non executed receipts, and require_all_outcomes == true + fn get_recursive_transaction_results( + &self, + outcomes: &mut Vec, + id: &CryptoHash, + require_all_outcomes: bool, + ) -> Result<(), Error> { + let outcome = match self.get_execution_outcome(id) { + Ok(outcome) => outcome, + Err(err) => return if require_all_outcomes { Err(err) } else { Ok(()) }, + }; + outcomes.push(ExecutionOutcomeWithIdView::from(outcome)); + let outcome_idx = outcomes.len() - 1; + for idx in 0..outcomes[outcome_idx].outcome.receipt_ids.len() { + let id = outcomes[outcome_idx].outcome.receipt_ids[idx]; + self.get_recursive_transaction_results(outcomes, &id, require_all_outcomes)?; + } + Ok(()) + } + + /// Returns FinalExecutionOutcomeView for the given transaction. + /// Waits for the end of the execution of all corresponding receipts + pub fn get_final_transaction_result( + &self, + transaction_hash: &CryptoHash, + ) -> Result { + let mut outcomes = Vec::new(); + self.get_recursive_transaction_results(&mut outcomes, transaction_hash, true)?; + let status = self.get_execution_status(&outcomes, transaction_hash); let receipts_outcome = outcomes.split_off(1); let transaction = self.chain_store.get_transaction(transaction_hash)?.ok_or_else(|| { Error::DBNotFoundErr(format!("Transaction {} is not found", transaction_hash)) @@ -3116,16 +3157,45 @@ impl Chain { Ok(FinalExecutionOutcomeView { status, transaction, transaction_outcome, receipts_outcome }) } - pub fn get_final_transaction_result_with_receipt( + /// Returns FinalExecutionOutcomeView for the given transaction. + /// Does not wait for the end of the execution of all corresponding receipts + pub fn get_partial_transaction_result( &self, - final_outcome: FinalExecutionOutcomeView, + transaction_hash: &CryptoHash, + ) -> Result { + let transaction = self.chain_store.get_transaction(transaction_hash)?.ok_or_else(|| { + Error::DBNotFoundErr(format!("Transaction {} is not found", transaction_hash)) + })?; + let transaction: SignedTransactionView = SignedTransaction::clone(&transaction).into(); + + let mut outcomes = Vec::new(); + self.get_recursive_transaction_results(&mut outcomes, transaction_hash, false)?; + if outcomes.is_empty() { + // It can't be, we would fail with tx not found error earlier in this case + // But if so, let's return meaningful error instead of panic on split_off + return Err(Error::DBNotFoundErr(format!( + "Transaction {} is not found", + transaction_hash + ))); + } + + let status = self.get_execution_status(&outcomes, transaction_hash); + let receipts_outcome = outcomes.split_off(1); + let transaction_outcome = outcomes.pop().unwrap(); + Ok(FinalExecutionOutcomeView { status, transaction, transaction_outcome, receipts_outcome }) + } + + /// Returns corresponding receipts for provided outcome + /// The incoming list in receipts_outcome may be partial + pub fn get_transaction_result_with_receipt( + &self, + outcome: FinalExecutionOutcomeView, ) -> Result { let receipt_id_from_transaction = - final_outcome.transaction_outcome.outcome.receipt_ids.get(0).cloned(); - let is_local_receipt = - final_outcome.transaction.signer_id == final_outcome.transaction.receiver_id; + outcome.transaction_outcome.outcome.receipt_ids.get(0).cloned(); + let is_local_receipt = outcome.transaction.signer_id == outcome.transaction.receiver_id; - let receipts = final_outcome + let receipts = outcome .receipts_outcome .iter() .filter_map(|outcome| { @@ -3141,7 +3211,7 @@ impl Chain { }) .collect::, _>>()?; - Ok(FinalExecutionOutcomeWithReceiptView { final_outcome, receipts }) + Ok(FinalExecutionOutcomeWithReceiptView { final_outcome: outcome, receipts }) } pub fn check_blocks_final_and_canonical( @@ -3338,12 +3408,8 @@ impl Chain { // only for a single shard. This so far has been enough. let state_patch = state_patch.take(); - let storage_context = StorageContext { - storage_data_source: StorageDataSource::Db, - state_patch, - record_storage: self - .should_produce_state_witness_for_this_or_next_epoch(me, block.header())?, - }; + let storage_context = + StorageContext { storage_data_source: StorageDataSource::Db, state_patch }; let stateful_job = self.get_update_shard_job( me, block, @@ -4164,7 +4230,8 @@ impl Chain { Ok(is_first_block_of_epoch?) } - /// Get transaction result for given hash of transaction or receipt id on the canonical chain + /// Get transaction result for given hash of transaction or receipt id + /// Chain may not be canonical yet pub fn get_execution_outcome( &self, id: &CryptoHash, @@ -4463,6 +4530,42 @@ pub struct ApplyStatePartsResponse { pub sync_hash: CryptoHash, } +// This message is handled by `sync_job_actions.rs::handle_load_memtrie_request()`. +// It is a request for `runtime_adapter` to load in-memory trie for `shard_uid`. +#[derive(actix::Message)] +#[rtype(result = "()")] +pub struct LoadMemtrieRequest { + pub runtime_adapter: Arc, + pub shard_uid: ShardUId, + // Required to load memtrie. + pub prev_state_root: StateRoot, + // Needs to be included in a response to the caller for identification purposes. + pub sync_hash: CryptoHash, +} + +// Skip `runtime_adapter`, because it's a complex object that has complex logic +// and many fields. +impl Debug for LoadMemtrieRequest { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.debug_struct("LoadMemtrieRequest") + .field("runtime_adapter", &"") + .field("shard_uid", &self.shard_uid) + .field("prev_state_root", &self.prev_state_root) + .field("sync_hash", &self.sync_hash) + .finish() + } +} + +// It is message indicating the result of loading in-memory trie for `shard_id`. +// `sync_hash` is passed around to indicate to which block we were catching up. +#[derive(actix::Message, Debug)] +#[rtype(result = "()")] +pub struct LoadMemtrieResponse { + pub load_result: Result<(), near_chain_primitives::error::Error>, + pub shard_uid: ShardUId, + pub sync_hash: CryptoHash, +} + #[derive(actix::Message)] #[rtype(result = "()")] pub struct BlockCatchUpRequest { diff --git a/chain/chain/src/chain_update.rs b/chain/chain/src/chain_update.rs index a75691f371a..b96e828c714 100644 --- a/chain/chain/src/chain_update.rs +++ b/chain/chain/src/chain_update.rs @@ -145,10 +145,11 @@ impl<'a> ChainUpdate<'a> { &mut self, block: &Block, apply_results: Vec, + should_save_state_transition_data: bool, ) -> Result<(), Error> { let _span = tracing::debug_span!(target: "chain", "apply_chunk_postprocessing").entered(); for result in apply_results { - self.process_apply_chunk_result(block, result)?; + self.process_apply_chunk_result(block, result, should_save_state_transition_data)?; } Ok(()) } @@ -299,6 +300,7 @@ impl<'a> ChainUpdate<'a> { &mut self, block: &Block, result: ShardUpdateResult, + should_save_state_transition_data: bool, ) -> Result<(), Error> { let block_hash = block.hash(); let prev_hash = block.header().prev_hash(); @@ -351,12 +353,14 @@ impl<'a> ChainUpdate<'a> { apply_result.outcomes, outcome_paths, ); - self.chain_store_update.save_state_transition_data( - *block_hash, - shard_id, - apply_result.proof, - apply_result.applied_receipts_hash, - ); + if should_save_state_transition_data { + self.chain_store_update.save_state_transition_data( + *block_hash, + shard_id, + apply_result.proof, + apply_result.applied_receipts_hash, + ); + } if let Some(resharding_results) = resharding_results { self.process_resharding_results(block, &shard_uid, resharding_results)?; } @@ -383,12 +387,14 @@ impl<'a> ChainUpdate<'a> { self.chain_store_update.save_chunk_extra(block_hash, &shard_uid, new_extra); self.chain_store_update.save_trie_changes(apply_result.trie_changes); - self.chain_store_update.save_state_transition_data( - *block_hash, - shard_uid.shard_id(), - apply_result.proof, - apply_result.applied_receipts_hash, - ); + if should_save_state_transition_data { + self.chain_store_update.save_state_transition_data( + *block_hash, + shard_uid.shard_id(), + apply_result.proof, + apply_result.applied_receipts_hash, + ); + } if let Some(resharding_config) = resharding_results { self.process_resharding_results(block, &shard_uid, resharding_config)?; @@ -407,12 +413,19 @@ impl<'a> ChainUpdate<'a> { /// This is the last step of process_block_single, where we take the preprocess block info /// apply chunk results and store the results on chain. + #[tracing::instrument( + level = "debug", + target = "chain", + "ChainUpdate::postprocess_block", + skip_all + )] pub(crate) fn postprocess_block( &mut self, me: &Option, block: &Block, block_preprocess_info: BlockPreprocessInfo, apply_chunks_results: Vec<(ShardId, Result)>, + should_save_state_transition_data: bool, ) -> Result, Error> { let shard_ids = self.epoch_manager.shard_ids(block.header().epoch_id())?; let prev_hash = block.header().prev_hash(); @@ -422,7 +435,7 @@ impl<'a> ChainUpdate<'a> { } x }).collect::, Error>>()?; - self.apply_chunk_postprocessing(block, results)?; + self.apply_chunk_postprocessing(block, results, should_save_state_transition_data)?; let BlockPreprocessInfo { is_caught_up, diff --git a/chain/chain/src/resharding.rs b/chain/chain/src/resharding.rs index 566ceb85a02..542ef5697b3 100644 --- a/chain/chain/src/resharding.rs +++ b/chain/chain/src/resharding.rs @@ -22,7 +22,7 @@ use near_store::flat::{ store_helper, BlockInfo, FlatStorageError, FlatStorageManager, FlatStorageReadyStatus, FlatStorageStatus, }; -use near_store::resharding::get_delayed_receipts; +use near_store::resharding::{get_delayed_receipts, get_promise_yield_timeouts}; use near_store::trie::SnapshotError; use near_store::{ShardTries, ShardUId, StorageError, Store, Trie, TrieDBStorage, TrieStorage}; use std::collections::{HashMap, HashSet}; @@ -168,6 +168,38 @@ fn apply_delayed_receipts<'a>( Ok(new_state_roots) } +fn apply_promise_yield_timeouts<'a>( + config: &ReshardingConfig, + tries: &ShardTries, + orig_shard_uid: ShardUId, + orig_state_root: StateRoot, + state_roots: HashMap, + account_id_to_shard_uid: &(dyn Fn(&AccountId) -> ShardUId + 'a), +) -> Result, Error> { + let mut total_count = 0; + let orig_trie_update = tries.new_trie_update_view(orig_shard_uid, orig_state_root); + + let mut start_index = None; + let mut new_state_roots = state_roots; + while let Some((next_index, timeouts)) = + get_promise_yield_timeouts(&orig_trie_update, start_index, config.batch_size)? + { + total_count += timeouts.len() as u64; + let (store_update, updated_state_roots) = tries + .apply_promise_yield_timeouts_to_children_states( + &new_state_roots, + &timeouts, + account_id_to_shard_uid, + )?; + new_state_roots = updated_state_roots; + start_index = Some(next_index); + store_update.commit()?; + } + + tracing::debug!(target: "resharding", ?orig_shard_uid, ?total_count, "Applied PromiseYield timeouts"); + Ok(new_state_roots) +} + // function to set up flat storage status to Ready after a resharding event // TODO(resharding) : Consolidate this with setting up flat storage during state sync logic fn set_flat_storage_state( @@ -419,6 +451,15 @@ impl Chain { &checked_account_id_to_shard_uid, )?; + state_roots = apply_promise_yield_timeouts( + &config.get(), + &tries, + shard_uid, + state_root, + state_roots, + &checked_account_id_to_shard_uid, + )?; + tracing::debug!(target: "resharding", ?shard_uid, "build_state_for_split_shards_impl finished"); Ok(state_roots) } diff --git a/chain/chain/src/runtime/mod.rs b/chain/chain/src/runtime/mod.rs index a41d0962ba1..401a462ba93 100644 --- a/chain/chain/src/runtime/mod.rs +++ b/chain/chain/src/runtime/mod.rs @@ -15,6 +15,7 @@ use near_epoch_manager::{EpochManagerAdapter, EpochManagerHandle}; use near_parameters::{ActionCosts, ExtCosts, RuntimeConfigStore}; use near_pool::types::TransactionGroupIterator; use near_primitives::account::{AccessKey, Account}; +use near_primitives::checked_feature; use near_primitives::errors::{InvalidTxError, RuntimeError, StorageError}; use near_primitives::hash::{hash, CryptoHash}; use near_primitives::receipt::{DelayedReceiptIndices, Receipt}; @@ -418,7 +419,7 @@ impl NightshadeRuntime { metrics::DELAYED_RECEIPTS_COUNT .with_label_values(&[&shard_label]) .set(apply_result.delayed_receipts_count as i64); - if let Some(metrics) = apply_result.metrics { + if let Some(mut metrics) = apply_result.metrics { metrics.report(&shard_label); } @@ -450,6 +451,7 @@ impl NightshadeRuntime { total_balance_burnt, proof: apply_result.proof, processed_delayed_receipts: apply_result.processed_delayed_receipts, + processed_yield_timeouts: apply_result.processed_yield_timeouts, applied_receipts_hash: hash(&borsh::to_vec(receipts).unwrap()), }; @@ -691,8 +693,15 @@ impl RuntimeAdapter for NightshadeRuntime { ) -> Result { let start_time = std::time::Instant::now(); let PrepareTransactionsChunkContext { shard_id, gas_limit } = chunk; + let epoch_id = self.epoch_manager.get_epoch_id_from_prev_block(&prev_block.block_hash)?; let protocol_version = self.epoch_manager.get_epoch_protocol_version(&epoch_id)?; + + let next_epoch_id = + self.epoch_manager.get_next_epoch_id_from_prev_block(&(&prev_block.block_hash))?; + let next_protocol_version = + self.epoch_manager.get_epoch_protocol_version(&next_epoch_id)?; + let shard_uid = self.get_shard_uid_from_epoch_id(shard_id, &epoch_id)?; // While the height of the next block that includes the chunk might not be prev_height + 1, // using it will result in a more conservative check and will not accidentally allow @@ -709,7 +718,13 @@ impl RuntimeAdapter for NightshadeRuntime { storage_config.use_flat_storage, ), }; - if storage_config.record_storage { + // We need to start recording reads if the stateless validation is + // enabled in the next epoch. We need to save the state transition data + // in the current epoch to be able to produce the state witness in the + // next epoch. + if checked_feature!("stable", StateWitnessSizeLimit, next_protocol_version) + || cfg!(feature = "shadow_chunk_validation") + { trie = trie.recording_reads(); } let mut state_update = TrieUpdate::new(trie); @@ -871,7 +886,18 @@ impl RuntimeAdapter for NightshadeRuntime { storage_config.use_flat_storage, ), }; - if storage_config.record_storage { + let next_epoch_id = + self.epoch_manager.get_next_epoch_id_from_prev_block(&block.prev_block_hash)?; + let next_protocol_version = + self.epoch_manager.get_epoch_protocol_version(&next_epoch_id)?; + + // We need to start recording reads if the stateless validation is + // enabled in the next epoch. We need to save the state transition data + // in the current epoch to be able to produce the state witness in the + // next epoch. + if checked_feature!("stable", StateWitnessSizeLimit, next_protocol_version) + || cfg!(feature = "shadow_chunk_validation") + { trie = trie.recording_reads(); } @@ -1237,32 +1263,6 @@ impl RuntimeAdapter for NightshadeRuntime { let epoch_manager = self.epoch_manager.read(); Ok(epoch_manager.will_shard_layout_change(parent_hash)?) } - - fn load_mem_tries_on_startup(&self, tracked_shards: &[ShardUId]) -> Result<(), StorageError> { - self.tries.load_mem_tries_for_enabled_shards(tracked_shards) - } - - fn load_mem_trie_on_catchup( - &self, - shard_uid: &ShardUId, - state_root: &StateRoot, - ) -> Result<(), StorageError> { - if !self.get_tries().trie_config().load_mem_tries_for_tracked_shards { - return Ok(()); - } - // It should not happen that memtrie is already loaded for a shard - // for which we just did state sync. - debug_assert!(!self.tries.is_mem_trie_loaded(shard_uid)); - self.tries.load_mem_trie(shard_uid, Some(*state_root)) - } - - fn retain_mem_tries(&self, shard_uids: &[ShardUId]) { - self.tries.retain_mem_tries(shard_uids) - } - - fn unload_mem_trie(&self, shard_uid: &ShardUId) { - self.tries.unload_mem_trie(shard_uid) - } } impl node_runtime::adapter::ViewRuntimeAdapter for NightshadeRuntime { diff --git a/chain/chain/src/runtime/tests.rs b/chain/chain/src/runtime/tests.rs index 8730790aaab..79ef34ffb52 100644 --- a/chain/chain/src/runtime/tests.rs +++ b/chain/chain/src/runtime/tests.rs @@ -9,8 +9,10 @@ use near_epoch_manager::{EpochManager, RngSeed}; use near_pool::{ InsertTransactionResult, PoolIteratorWrapper, TransactionGroupIteratorWrapper, TransactionPool, }; +use near_primitives::checked_feature; use near_primitives::test_utils::create_test_signer; use near_primitives::types::validator_stake::{ValidatorStake, ValidatorStakeIter}; +use near_primitives::version::PROTOCOL_VERSION; use near_store::flat::{FlatStateChanges, FlatStateDelta, FlatStateDeltaMetadata}; use near_store::genesis::initialize_genesis_state; use num_rational::Ratio; @@ -1602,6 +1604,11 @@ fn prepare_transactions( /// Check that transactions validation works the same when using recorded storage proof instead of db. #[test] fn test_prepare_transactions_storage_proof() { + if !checked_feature!("stable", StatelessValidationV0, PROTOCOL_VERSION) { + println!("Test not applicable without StatelessValidation enabled"); + return; + } + let (env, chain, mut transaction_pool) = get_test_env_with_chain_and_pool(); let transactions_count = transaction_pool.len(); @@ -1610,7 +1617,6 @@ fn test_prepare_transactions_storage_proof() { use_flat_storage: true, source: StorageDataSource::Db, state_patch: Default::default(), - record_storage: true, }; let proposed_transactions = prepare_transactions( @@ -1631,7 +1637,6 @@ fn test_prepare_transactions_storage_proof() { nodes: proposed_transactions.storage_proof.unwrap(), }), state_patch: Default::default(), - record_storage: false, }; let validated_transactions = prepare_transactions( @@ -1648,6 +1653,11 @@ fn test_prepare_transactions_storage_proof() { /// Check that transactions validation fails if provided empty storage proof. #[test] fn test_prepare_transactions_empty_storage_proof() { + if !checked_feature!("stable", StatelessValidationV0, PROTOCOL_VERSION) { + println!("Test not applicable without StatelessValidation enabled"); + return; + } + let (env, chain, mut transaction_pool) = get_test_env_with_chain_and_pool(); let transactions_count = transaction_pool.len(); @@ -1656,7 +1666,6 @@ fn test_prepare_transactions_empty_storage_proof() { use_flat_storage: true, source: StorageDataSource::Db, state_patch: Default::default(), - record_storage: true, }; let proposed_transactions = prepare_transactions( @@ -1677,7 +1686,6 @@ fn test_prepare_transactions_empty_storage_proof() { nodes: PartialState::default(), // We use empty storage proof here. }), state_patch: Default::default(), - record_storage: false, }; let validation_result = prepare_transactions( diff --git a/chain/chain/src/state_snapshot_actor.rs b/chain/chain/src/state_snapshot_actor.rs index eda53784f62..7aeb8c5603e 100644 --- a/chain/chain/src/state_snapshot_actor.rs +++ b/chain/chain/src/state_snapshot_actor.rs @@ -1,4 +1,4 @@ -use actix::{AsyncContext, Context}; +use actix::{Actor, Addr, Arbiter, ArbiterHandle, AsyncContext, Context}; use near_async::messaging::CanSend; use near_network::types::{NetworkRequests, PeerManagerAdapter, PeerManagerMessageRequest}; use near_o11y::{handler_debug_span, WithSpanContext, WithSpanContextExt}; @@ -22,12 +22,18 @@ pub struct StateSnapshotActor { } impl StateSnapshotActor { - pub fn new( + pub fn spawn( flat_storage_manager: FlatStorageManager, network_adapter: PeerManagerAdapter, tries: ShardTries, - ) -> Self { - Self { flat_storage_manager, network_adapter, tries } + ) -> (Addr, ArbiterHandle) { + let arbiter = Arbiter::new().handle(); + let addr = Self::start_in_arbiter(&arbiter, |_ctx| Self { + flat_storage_manager, + network_adapter, + tries, + }); + (addr, arbiter) } } @@ -134,7 +140,7 @@ pub struct SnapshotCallbacks { /// Sends a request to make a state snapshot. pub fn get_make_snapshot_callback( - state_snapshot_addr: Arc>, + state_snapshot_addr: actix::Addr, flat_storage_manager: FlatStorageManager, ) -> MakeSnapshotCallback { Arc::new(move |prev_block_hash, epoch_height, shard_uids, block| { @@ -162,7 +168,7 @@ pub fn get_make_snapshot_callback( /// Sends a request to delete a state snapshot. pub fn get_delete_snapshot_callback( - state_snapshot_addr: Arc>, + state_snapshot_addr: actix::Addr, ) -> DeleteSnapshotCallback { Arc::new(move || { tracing::info!( diff --git a/chain/chain/src/store.rs b/chain/chain/src/store.rs index a19600f35b5..e80a6c16dbc 100644 --- a/chain/chain/src/store.rs +++ b/chain/chain/src/store.rs @@ -2333,124 +2333,149 @@ impl<'a> ChainStoreUpdate<'a> { Ok(chain_store_update) } + #[tracing::instrument(level = "debug", target = "store", "ChainUpdate::finalize", skip_all)] fn finalize(&mut self) -> Result { let mut store_update = self.store().store_update(); - Self::write_col_misc(&mut store_update, HEAD_KEY, &mut self.head)?; - Self::write_col_misc(&mut store_update, TAIL_KEY, &mut self.tail)?; - Self::write_col_misc(&mut store_update, CHUNK_TAIL_KEY, &mut self.chunk_tail)?; - Self::write_col_misc(&mut store_update, FORK_TAIL_KEY, &mut self.fork_tail)?; - Self::write_col_misc(&mut store_update, HEADER_HEAD_KEY, &mut self.header_head)?; - Self::write_col_misc(&mut store_update, FINAL_HEAD_KEY, &mut self.final_head)?; - Self::write_col_misc( - &mut store_update, - LARGEST_TARGET_HEIGHT_KEY, - &mut self.largest_target_height, - )?; - debug_assert!(self.chain_store_cache_update.blocks.len() <= 1); - for (hash, block) in self.chain_store_cache_update.blocks.iter() { - let mut map = HashMap::clone( - self.chain_store.get_all_block_hashes_by_height(block.header().height())?.as_ref(), - ); - map.entry(block.header().epoch_id().clone()) - .or_insert_with(|| HashSet::new()) - .insert(*hash); - store_update.set_ser( - DBCol::BlockPerHeight, - &index_to_bytes(block.header().height()), - &map, + { + let _span = tracing::trace_span!(target: "store", "write_col_misc").entered(); + Self::write_col_misc(&mut store_update, HEAD_KEY, &mut self.head)?; + Self::write_col_misc(&mut store_update, TAIL_KEY, &mut self.tail)?; + Self::write_col_misc(&mut store_update, CHUNK_TAIL_KEY, &mut self.chunk_tail)?; + Self::write_col_misc(&mut store_update, FORK_TAIL_KEY, &mut self.fork_tail)?; + Self::write_col_misc(&mut store_update, HEADER_HEAD_KEY, &mut self.header_head)?; + Self::write_col_misc(&mut store_update, FINAL_HEAD_KEY, &mut self.final_head)?; + Self::write_col_misc( + &mut store_update, + LARGEST_TARGET_HEIGHT_KEY, + &mut self.largest_target_height, )?; - self.chain_store_cache_update - .block_hash_per_height - .insert(block.header().height(), map); - store_update.insert_ser(DBCol::Block, hash.as_ref(), block)?; } - let mut header_hashes_by_height: HashMap> = HashMap::new(); - for (hash, header) in self.chain_store_cache_update.headers.iter() { - if self.chain_store.get_block_header(hash).is_ok() { - // No need to add same Header once again - continue; + { + let _span = tracing::trace_span!(target: "store", "write_block").entered(); + debug_assert!(self.chain_store_cache_update.blocks.len() <= 1); + for (hash, block) in self.chain_store_cache_update.blocks.iter() { + let mut map = HashMap::clone( + self.chain_store + .get_all_block_hashes_by_height(block.header().height())? + .as_ref(), + ); + map.entry(block.header().epoch_id().clone()) + .or_insert_with(|| HashSet::new()) + .insert(*hash); + store_update.set_ser( + DBCol::BlockPerHeight, + &index_to_bytes(block.header().height()), + &map, + )?; + self.chain_store_cache_update + .block_hash_per_height + .insert(block.header().height(), map); + store_update.insert_ser(DBCol::Block, hash.as_ref(), block)?; } + let mut header_hashes_by_height: HashMap> = + HashMap::new(); + for (hash, header) in self.chain_store_cache_update.headers.iter() { + if self.chain_store.get_block_header(hash).is_ok() { + // No need to add same Header once again + continue; + } - header_hashes_by_height - .entry(header.height()) - .or_insert_with(|| { - self.chain_store - .get_all_header_hashes_by_height(header.height()) - .unwrap_or_default() - }) - .insert(*hash); - store_update.insert_ser(DBCol::BlockHeader, hash.as_ref(), header)?; - } - for (height, hash_set) in header_hashes_by_height { - store_update.set_ser( - DBCol::HeaderHashesByHeight, - &index_to_bytes(height), - &hash_set, - )?; + header_hashes_by_height + .entry(header.height()) + .or_insert_with(|| { + self.chain_store + .get_all_header_hashes_by_height(header.height()) + .unwrap_or_default() + }) + .insert(*hash); + store_update.insert_ser(DBCol::BlockHeader, hash.as_ref(), header)?; + } + for (height, hash_set) in header_hashes_by_height { + store_update.set_ser( + DBCol::HeaderHashesByHeight, + &index_to_bytes(height), + &hash_set, + )?; + } + for ((block_hash, shard_uid), chunk_extra) in + self.chain_store_cache_update.chunk_extras.iter() + { + store_update.set_ser( + DBCol::ChunkExtra, + &get_block_shard_uid(block_hash, shard_uid), + chunk_extra, + )?; + } + for (block_hash, block_extra) in self.chain_store_cache_update.block_extras.iter() { + store_update.insert_ser(DBCol::BlockExtra, block_hash.as_ref(), block_extra)?; + } } - for ((block_hash, shard_uid), chunk_extra) in - self.chain_store_cache_update.chunk_extras.iter() + { - store_update.set_ser( - DBCol::ChunkExtra, - &get_block_shard_uid(block_hash, shard_uid), - chunk_extra, - )?; - } - for (block_hash, block_extra) in self.chain_store_cache_update.block_extras.iter() { - store_update.insert_ser(DBCol::BlockExtra, block_hash.as_ref(), block_extra)?; - } - let mut chunk_hashes_by_height: HashMap> = HashMap::new(); - for (chunk_hash, chunk) in self.chain_store_cache_update.chunks.iter() { - if self.chain_store.chunk_exists(chunk_hash)? { - // No need to add same Chunk once again - continue; - } + let _span = tracing::trace_span!(target: "store", "write_chunk").entered(); + + let mut chunk_hashes_by_height: HashMap> = + HashMap::new(); + for (chunk_hash, chunk) in self.chain_store_cache_update.chunks.iter() { + if self.chain_store.chunk_exists(chunk_hash)? { + // No need to add same Chunk once again + continue; + } - let height_created = chunk.height_created(); - match chunk_hashes_by_height.entry(height_created) { - Entry::Occupied(mut entry) => { - entry.get_mut().insert(chunk_hash.clone()); + let height_created = chunk.height_created(); + match chunk_hashes_by_height.entry(height_created) { + Entry::Occupied(mut entry) => { + entry.get_mut().insert(chunk_hash.clone()); + } + Entry::Vacant(entry) => { + let mut hash_set = + match self.chain_store.get_all_chunk_hashes_by_height(height_created) { + Ok(hash_set) => hash_set.clone(), + Err(_) => HashSet::new(), + }; + hash_set.insert(chunk_hash.clone()); + entry.insert(hash_set); + } + }; + + // Increase transaction refcounts for all included txs + for tx in chunk.transactions().iter() { + let bytes = borsh::to_vec(&tx).expect("Borsh cannot fail"); + store_update.increment_refcount( + DBCol::Transactions, + tx.get_hash().as_ref(), + &bytes, + ); } - Entry::Vacant(entry) => { - let mut hash_set = - match self.chain_store.get_all_chunk_hashes_by_height(height_created) { - Ok(hash_set) => hash_set.clone(), - Err(_) => HashSet::new(), - }; - hash_set.insert(chunk_hash.clone()); - entry.insert(hash_set); + + // Increase receipt refcounts for all included receipts + for receipt in chunk.prev_outgoing_receipts().iter() { + let bytes = borsh::to_vec(&receipt).expect("Borsh cannot fail"); + store_update.increment_refcount( + DBCol::Receipts, + receipt.get_hash().as_ref(), + &bytes, + ); } - }; - // Increase transaction refcounts for all included txs - for tx in chunk.transactions().iter() { - let bytes = borsh::to_vec(&tx).expect("Borsh cannot fail"); - store_update.increment_refcount( - DBCol::Transactions, - tx.get_hash().as_ref(), - &bytes, - ); + store_update.insert_ser(DBCol::Chunks, chunk_hash.as_ref(), chunk)?; } - - // Increase receipt refcounts for all included receipts - for receipt in chunk.prev_outgoing_receipts().iter() { - let bytes = borsh::to_vec(&receipt).expect("Borsh cannot fail"); - store_update.increment_refcount( - DBCol::Receipts, - receipt.get_hash().as_ref(), - &bytes, - ); + for (height, hash_set) in chunk_hashes_by_height { + store_update.set_ser( + DBCol::ChunkHashesByHeight, + &index_to_bytes(height), + &hash_set, + )?; + } + for (chunk_hash, partial_chunk) in self.chain_store_cache_update.partial_chunks.iter() { + store_update.insert_ser( + DBCol::PartialChunks, + chunk_hash.as_ref(), + partial_chunk, + )?; } - - store_update.insert_ser(DBCol::Chunks, chunk_hash.as_ref(), chunk)?; - } - for (height, hash_set) in chunk_hashes_by_height { - store_update.set_ser(DBCol::ChunkHashesByHeight, &index_to_bytes(height), &hash_set)?; - } - for (chunk_hash, partial_chunk) in self.chain_store_cache_update.partial_chunks.iter() { - store_update.insert_ser(DBCol::PartialChunks, chunk_hash.as_ref(), partial_chunk)?; } + for (height, hash) in self.chain_store_cache_update.height_to_hashes.iter() { if let Some(hash) = hash { store_update.set_ser(DBCol::BlockHeight, &index_to_bytes(*height), hash)?; @@ -2470,40 +2495,52 @@ impl<'a> ChainStoreUpdate<'a> { light_client_block, )?; } - for ((block_hash, shard_id), receipt) in - self.chain_store_cache_update.outgoing_receipts.iter() - { - store_update.set_ser( - DBCol::OutgoingReceipts, - &get_block_shard_id(block_hash, *shard_id), - receipt, - )?; - } - for ((block_hash, shard_id), receipt) in - self.chain_store_cache_update.incoming_receipts.iter() { - store_update.set_ser( - DBCol::IncomingReceipts, - &get_block_shard_id(block_hash, *shard_id), - receipt, - )?; + let _span = + tracing::trace_span!(target: "store", "write_incoming_and_outgoing_receipts") + .entered(); + + for ((block_hash, shard_id), receipt) in + self.chain_store_cache_update.outgoing_receipts.iter() + { + store_update.set_ser( + DBCol::OutgoingReceipts, + &get_block_shard_id(block_hash, *shard_id), + receipt, + )?; + } + for ((block_hash, shard_id), receipt) in + self.chain_store_cache_update.incoming_receipts.iter() + { + store_update.set_ser( + DBCol::IncomingReceipts, + &get_block_shard_id(block_hash, *shard_id), + receipt, + )?; + } } - for ((outcome_id, block_hash), outcome_with_proof) in - self.chain_store_cache_update.outcomes.iter() + { - store_update.insert_ser( - DBCol::TransactionResultForBlock, - &get_outcome_id_block_hash(outcome_id, block_hash), - &outcome_with_proof, - )?; - } - for ((block_hash, shard_id), ids) in self.chain_store_cache_update.outcome_ids.iter() { - store_update.set_ser( - DBCol::OutcomeIds, - &get_block_shard_id(block_hash, *shard_id), - &ids, - )?; + let _span = tracing::trace_span!(target: "store", "write_outcomes").entered(); + + for ((outcome_id, block_hash), outcome_with_proof) in + self.chain_store_cache_update.outcomes.iter() + { + store_update.insert_ser( + DBCol::TransactionResultForBlock, + &get_outcome_id_block_hash(outcome_id, block_hash), + &outcome_with_proof, + )?; + } + for ((block_hash, shard_id), ids) in self.chain_store_cache_update.outcome_ids.iter() { + store_update.set_ser( + DBCol::OutcomeIds, + &get_block_shard_id(block_hash, *shard_id), + &ids, + )?; + } } + for (receipt_id, shard_id) in self.chain_store_cache_update.receipt_id_to_shard_id.iter() { let data = borsh::to_vec(&shard_id)?; store_update.increment_refcount(DBCol::ReceiptIdToShardId, receipt_id.as_ref(), &data); @@ -2529,94 +2566,108 @@ impl<'a> ChainStoreUpdate<'a> { // Convert trie changes to database ops for trie nodes. // Create separate store update for deletions, because we want to update cache and don't want to remove nodes // from the store. - let mut deletions_store_update = self.store().store_update(); - for mut wrapped_trie_changes in self.trie_changes.drain(..) { - wrapped_trie_changes.apply_mem_changes(); - wrapped_trie_changes.insertions_into(&mut store_update); - wrapped_trie_changes.deletions_into(&mut deletions_store_update); - wrapped_trie_changes.state_changes_into(&mut store_update); - - if self.chain_store.save_trie_changes { - wrapped_trie_changes - .trie_changes_into(&mut store_update) - .map_err(|err| Error::Other(err.to_string()))?; + { + let _span = tracing::trace_span!(target: "store", "write_trie_changes").entered(); + let mut deletions_store_update = self.store().store_update(); + for mut wrapped_trie_changes in self.trie_changes.drain(..) { + wrapped_trie_changes.apply_mem_changes(); + wrapped_trie_changes.insertions_into(&mut store_update); + wrapped_trie_changes.deletions_into(&mut deletions_store_update); + wrapped_trie_changes.state_changes_into(&mut store_update); + + if self.chain_store.save_trie_changes { + wrapped_trie_changes + .trie_changes_into(&mut store_update) + .map_err(|err| Error::Other(err.to_string()))?; + } } - } - for ((block_hash, shard_id), state_transition_data) in self.state_transition_data.drain() { - store_update.set_ser( - DBCol::StateTransitionData, - &get_block_shard_id(&block_hash, shard_id), - &state_transition_data, - )?; + for ((block_hash, shard_id), state_transition_data) in + self.state_transition_data.drain() + { + store_update.set_ser( + DBCol::StateTransitionData, + &get_block_shard_id(&block_hash, shard_id), + &state_transition_data, + )?; + } + for ((block_hash, shard_id), state_changes) in + self.add_state_changes_for_resharding.drain() + { + store_update.set_ser( + DBCol::StateChangesForSplitStates, + &get_block_shard_id(&block_hash, shard_id), + &state_changes, + )?; + } + + if self.remove_all_state_changes_for_resharding { + store_update.delete_all(DBCol::StateChangesForSplitStates); + } } - for ((block_hash, shard_id), state_changes) in self.add_state_changes_for_resharding.drain() { - store_update.set_ser( - DBCol::StateChangesForSplitStates, - &get_block_shard_id(&block_hash, shard_id), - &state_changes, - )?; - } + let _span = tracing::trace_span!(target: "store", "write_catchup").entered(); + + let mut affected_catchup_blocks = HashSet::new(); + for (prev_hash, hash) in self.remove_blocks_to_catchup.drain(..) { + assert!(!affected_catchup_blocks.contains(&prev_hash)); + if affected_catchup_blocks.contains(&prev_hash) { + return Err(Error::Other( + "Multiple changes to the store affect the same catchup block".to_string(), + )); + } + affected_catchup_blocks.insert(prev_hash); - if self.remove_all_state_changes_for_resharding { - store_update.delete_all(DBCol::StateChangesForSplitStates); - } + let mut prev_table = + self.chain_store.get_blocks_to_catchup(&prev_hash).unwrap_or_else(|_| vec![]); - let mut affected_catchup_blocks = HashSet::new(); - for (prev_hash, hash) in self.remove_blocks_to_catchup.drain(..) { - assert!(!affected_catchup_blocks.contains(&prev_hash)); - if affected_catchup_blocks.contains(&prev_hash) { - return Err(Error::Other( - "Multiple changes to the store affect the same catchup block".to_string(), - )); - } - affected_catchup_blocks.insert(prev_hash); + let mut remove_idx = prev_table.len(); + for (i, val) in prev_table.iter().enumerate() { + if *val == hash { + remove_idx = i; + } + } - let mut prev_table = - self.chain_store.get_blocks_to_catchup(&prev_hash).unwrap_or_else(|_| vec![]); + assert_ne!(remove_idx, prev_table.len()); + prev_table.swap_remove(remove_idx); - let mut remove_idx = prev_table.len(); - for (i, val) in prev_table.iter().enumerate() { - if *val == hash { - remove_idx = i; + if !prev_table.is_empty() { + store_update.set_ser( + DBCol::BlocksToCatchup, + prev_hash.as_ref(), + &prev_table, + )?; + } else { + store_update.delete(DBCol::BlocksToCatchup, prev_hash.as_ref()); } } + for prev_hash in self.remove_prev_blocks_to_catchup.drain(..) { + assert!(!affected_catchup_blocks.contains(&prev_hash)); + if affected_catchup_blocks.contains(&prev_hash) { + return Err(Error::Other( + "Multiple changes to the store affect the same catchup block".to_string(), + )); + } + affected_catchup_blocks.insert(prev_hash); - assert_ne!(remove_idx, prev_table.len()); - prev_table.swap_remove(remove_idx); - - if !prev_table.is_empty() { - store_update.set_ser(DBCol::BlocksToCatchup, prev_hash.as_ref(), &prev_table)?; - } else { store_update.delete(DBCol::BlocksToCatchup, prev_hash.as_ref()); } - } - for prev_hash in self.remove_prev_blocks_to_catchup.drain(..) { - assert!(!affected_catchup_blocks.contains(&prev_hash)); - if affected_catchup_blocks.contains(&prev_hash) { - return Err(Error::Other( - "Multiple changes to the store affect the same catchup block".to_string(), - )); - } - affected_catchup_blocks.insert(prev_hash); + for (prev_hash, new_hash) in self.add_blocks_to_catchup.drain(..) { + assert!(!affected_catchup_blocks.contains(&prev_hash)); + if affected_catchup_blocks.contains(&prev_hash) { + return Err(Error::Other( + "Multiple changes to the store affect the same catchup block".to_string(), + )); + } + affected_catchup_blocks.insert(prev_hash); - store_update.delete(DBCol::BlocksToCatchup, prev_hash.as_ref()); - } - for (prev_hash, new_hash) in self.add_blocks_to_catchup.drain(..) { - assert!(!affected_catchup_blocks.contains(&prev_hash)); - if affected_catchup_blocks.contains(&prev_hash) { - return Err(Error::Other( - "Multiple changes to the store affect the same catchup block".to_string(), - )); + let mut prev_table = + self.chain_store.get_blocks_to_catchup(&prev_hash).unwrap_or_else(|_| vec![]); + prev_table.push(new_hash); + store_update.set_ser(DBCol::BlocksToCatchup, prev_hash.as_ref(), &prev_table)?; } - affected_catchup_blocks.insert(prev_hash); - - let mut prev_table = - self.chain_store.get_blocks_to_catchup(&prev_hash).unwrap_or_else(|_| vec![]); - prev_table.push(new_hash); - store_update.set_ser(DBCol::BlocksToCatchup, prev_hash.as_ref(), &prev_table)?; } + for state_sync_info in self.add_state_sync_infos.drain(..) { store_update.set_ser( DBCol::StateDlInfos, @@ -2646,6 +2697,7 @@ impl<'a> ChainStoreUpdate<'a> { Ok(store_update) } + #[tracing::instrument(level = "debug", target = "store", "ChainStoreUpdate::commit", skip_all)] pub fn commit(mut self) -> Result<(), Error> { let store_update = self.finalize()?; store_update.commit()?; diff --git a/chain/chain/src/test_utils/kv_runtime.rs b/chain/chain/src/test_utils/kv_runtime.rs index 5f28c9eacca..561bce24571 100644 --- a/chain/chain/src/test_utils/kv_runtime.rs +++ b/chain/chain/src/test_utils/kv_runtime.rs @@ -24,12 +24,11 @@ use near_primitives::epoch_manager::ValidatorSelectionConfig; use near_primitives::errors::{EpochError, InvalidTxError}; use near_primitives::hash::{hash, CryptoHash}; use near_primitives::receipt::{ActionReceipt, Receipt, ReceiptEnum}; -use near_primitives::shard_layout; use near_primitives::shard_layout::{ShardLayout, ShardUId}; use near_primitives::sharding::{ChunkHash, ShardChunkHeader}; use near_primitives::state_part::PartId; use near_primitives::stateless_validation::{ - ChunkEndorsement, ChunkStateWitness, ChunkValidatorAssignments, + ChunkEndorsement, ChunkValidatorAssignments, SignedEncodedChunkStateWitness, }; use near_primitives::transaction::{ Action, ExecutionMetadata, ExecutionOutcome, ExecutionOutcomeWithId, ExecutionStatus, @@ -45,10 +44,11 @@ use near_primitives::views::{ AccessKeyInfoView, AccessKeyList, CallResult, ContractCodeView, EpochValidatorInfo, QueryRequest, QueryResponse, QueryResponseKind, ViewStateResult, }; +use near_primitives::{checked_feature, shard_layout}; use near_store::test_utils::TestTriesBuilder; use near_store::{ - set_genesis_hash, set_genesis_state_roots, DBCol, ShardTries, StorageError, Store, StoreUpdate, - Trie, TrieChanges, WrappedTrieChanges, + set_genesis_hash, set_genesis_state_roots, DBCol, ShardTries, Store, StoreUpdate, Trie, + TrieChanges, WrappedTrieChanges, }; use num_rational::Ratio; use std::cmp::Ordering; @@ -950,14 +950,8 @@ impl EpochManagerAdapter for MockEpochManager { fn verify_chunk_state_witness_signature( &self, - _state_witness: &ChunkStateWitness, - ) -> Result { - Ok(true) - } - - fn verify_chunk_state_witness_signature_in_epoch( - &self, - _state_witness: &ChunkStateWitness, + _signed_witness: &SignedEncodedChunkStateWitness, + _chunk_producer: &AccountId, _epoch_id: &EpochId, ) -> Result { Ok(true) @@ -1083,7 +1077,7 @@ impl RuntimeAdapter for KeyValueRuntime { fn prepare_transactions( &self, - storage: RuntimeStorageConfig, + _storage: RuntimeStorageConfig, _chunk: PrepareTransactionsChunkContext, _prev_block: PrepareTransactionsBlockContext, transaction_groups: &mut dyn TransactionGroupIterator, @@ -1094,11 +1088,14 @@ impl RuntimeAdapter for KeyValueRuntime { while let Some(iter) = transaction_groups.next() { res.push(iter.next().unwrap()); } - Ok(PreparedTransactions { - transactions: res, - limited_by: None, - storage_proof: if storage.record_storage { Some(Default::default()) } else { None }, - }) + let storage_proof = if checked_feature!("stable", StatelessValidationV0, PROTOCOL_VERSION) + || cfg!(feature = "shadow_chunk_validation") + { + Some(Default::default()) + } else { + None + }; + Ok(PreparedTransactions { transactions: res, limited_by: None, storage_proof }) } fn apply_chunk( @@ -1242,7 +1239,13 @@ impl RuntimeAdapter for KeyValueRuntime { let state_root = hash(&data); self.state.write().unwrap().insert(state_root, state); self.state_size.write().unwrap().insert(state_root, state_size); - + let storage_proof = if checked_feature!("stable", StatelessValidationV0, PROTOCOL_VERSION) + || cfg!(feature = "shadow_chunk_validation") + { + Some(Default::default()) + } else { + None + }; Ok(ApplyChunkResult { trie_changes: WrappedTrieChanges::new( self.get_tries(), @@ -1258,8 +1261,9 @@ impl RuntimeAdapter for KeyValueRuntime { validator_proposals: vec![], total_gas_burnt: 0, total_balance_burnt: 0, - proof: if storage_config.record_storage { Some(Default::default()) } else { None }, + proof: storage_proof, processed_delayed_receipts: vec![], + processed_yield_timeouts: vec![], applied_receipts_hash: hash(&borsh::to_vec(receipts).unwrap()), }) } @@ -1452,20 +1456,4 @@ impl RuntimeAdapter for KeyValueRuntime { ) -> Result, Error> { Ok(vec![]) } - - fn load_mem_tries_on_startup(&self, _tracked_shards: &[ShardUId]) -> Result<(), StorageError> { - Ok(()) - } - - fn load_mem_trie_on_catchup( - &self, - _shard_uid: &ShardUId, - _state_root: &StateRoot, - ) -> Result<(), StorageError> { - Ok(()) - } - - fn retain_mem_tries(&self, _shard_uids: &[ShardUId]) {} - - fn unload_mem_trie(&self, _shard_uid: &ShardUId) {} } diff --git a/chain/chain/src/tests/simple_chain.rs b/chain/chain/src/tests/simple_chain.rs index e2f1a5d2f82..6ccc0721e74 100644 --- a/chain/chain/src/tests/simple_chain.rs +++ b/chain/chain/src/tests/simple_chain.rs @@ -35,7 +35,7 @@ fn build_chain() { if cfg!(feature = "nightly") { insta::assert_snapshot!(hash, @"CyVdmcpdfz8VAqZFN4zbZLTRcbcnAUzRJwNgxbgeEUMU"); } else { - insta::assert_snapshot!(hash, @"EsUNazp4zR2XgiwZSuQnX9dsaFk1VDhdRwGYt1YHpu5b"); + insta::assert_snapshot!(hash, @"CxmRfDBCbukRuksZMKEwHe6o8zqc2eZFsXmbQvPygwYi"); } for i in 1..5 { @@ -53,7 +53,7 @@ fn build_chain() { if cfg!(feature = "nightly") { insta::assert_snapshot!(hash, @"72j1xRcBZpPtyo2rpPBPRspL6Q9LCju2Doa8KFhYPNJt"); } else { - insta::assert_snapshot!(hash, @"CJ5p62dVTMVgRADQrWPkFLrozDN8KxbKMGqjVkPXBD7W"); + insta::assert_snapshot!(hash, @"6tqFaqvsAjrtkuPfnuXz2pJ1VPt1kvF95hVJR7x9JhXG"); } } diff --git a/chain/chain/src/types.rs b/chain/chain/src/types.rs index b7bb80fbb5e..b657e68cfce 100644 --- a/chain/chain/src/types.rs +++ b/chain/chain/src/types.rs @@ -13,7 +13,7 @@ use near_primitives::checked_feature; use near_primitives::errors::InvalidTxError; use near_primitives::hash::CryptoHash; use near_primitives::merkle::{merklize, MerklePath}; -use near_primitives::receipt::Receipt; +use near_primitives::receipt::{PromiseYieldTimeout, Receipt}; use near_primitives::sandbox::state_patch::SandboxStatePatch; use near_primitives::shard_layout::{ShardLayout, ShardUId}; use near_primitives::sharding::ShardChunkHeader; @@ -31,7 +31,6 @@ use near_primitives::version::{ }; use near_primitives::views::{QueryRequest, QueryResponse}; use near_store::flat::FlatStorageManager; -use near_store::StorageError; use near_store::{PartialStorage, ShardTries, Store, Trie, WrappedTrieChanges}; use num_rational::Rational32; use std::collections::HashMap; @@ -108,6 +107,7 @@ pub struct ApplyChunkResult { pub total_balance_burnt: Balance, pub proof: Option, pub processed_delayed_receipts: Vec, + pub processed_yield_timeouts: Vec, /// Hash of Vec which were applied in a chunk, later used for /// chunk validation with state witness. /// Note that applied receipts are not necessarily executed as they can @@ -263,7 +263,6 @@ pub struct RuntimeStorageConfig { pub use_flat_storage: bool, pub source: StorageDataSource, pub state_patch: SandboxStatePatch, - pub record_storage: bool, } impl RuntimeStorageConfig { @@ -273,7 +272,6 @@ impl RuntimeStorageConfig { use_flat_storage, source: StorageDataSource::Db, state_patch: Default::default(), - record_storage: false, } } } @@ -510,26 +508,6 @@ pub trait RuntimeAdapter: Send + Sync { ) -> bool; fn get_protocol_config(&self, epoch_id: &EpochId) -> Result; - - /// Loads in-memory tries upon startup. The given shard_uids are possible candidates to load, - /// but which exact shards to load depends on configuration. This may only be called when flat - /// storage is ready. - fn load_mem_tries_on_startup(&self, tracked_shards: &[ShardUId]) -> Result<(), StorageError>; - - /// Loads in-memory trie upon catchup, if it is enabled. - /// Requires state root because `ChunkExtra` is not available at the time mem-trie is being loaded. - fn load_mem_trie_on_catchup( - &self, - shard_uid: &ShardUId, - state_root: &StateRoot, - ) -> Result<(), StorageError>; - - /// Retains in-memory tries for given shards, i.e. unload tries from memory for shards that are NOT - /// in the given list. Should be called to unload obsolete tries from memory. - fn retain_mem_tries(&self, shard_uids: &[ShardUId]); - - /// Unload trie from memory for given shard. - fn unload_mem_trie(&self, shard_uid: &ShardUId); } /// The last known / checked height and time when we have processed it. diff --git a/chain/chain/src/update_shard.rs b/chain/chain/src/update_shard.rs index 7a116fc9126..33e5b1fe73b 100644 --- a/chain/chain/src/update_shard.rs +++ b/chain/chain/src/update_shard.rs @@ -115,7 +115,6 @@ pub struct StorageContext { /// Data source used for processing shard update. pub storage_data_source: StorageDataSource, pub state_patch: SandboxStatePatch, - pub record_storage: bool, } /// Processes shard update with given block and shard. @@ -185,7 +184,6 @@ pub fn apply_new_chunk( use_flat_storage: true, source: storage_context.storage_data_source, state_patch: storage_context.state_patch, - record_storage: storage_context.record_storage, }; match runtime.apply_chunk( storage_config, @@ -247,7 +245,6 @@ pub fn apply_old_chunk( use_flat_storage: true, source: storage_context.storage_data_source, state_patch: storage_context.state_patch, - record_storage: storage_context.record_storage, }; match runtime.apply_chunk( storage_config, @@ -332,6 +329,7 @@ fn apply_resharding_state_changes( let state_changes = StateChangesForResharding::from_raw_state_changes( apply_result.trie_changes.state_changes(), apply_result.processed_delayed_receipts.clone(), + apply_result.processed_yield_timeouts.clone(), ); let next_epoch_id = epoch_manager.get_next_epoch_id_from_prev_block(&block.prev_block_hash)?; let next_shard_layout = epoch_manager.get_shard_layout(&next_epoch_id)?; diff --git a/chain/chunks/src/lib.rs b/chain/chunks/src/lib.rs index 7ba3a1d8b2c..6232b8b9bb8 100644 --- a/chain/chunks/src/lib.rs +++ b/chain/chunks/src/lib.rs @@ -111,10 +111,11 @@ use near_primitives::errors::EpochError; use near_primitives::hash::CryptoHash; use near_primitives::merkle::{verify_path, MerklePath}; use near_primitives::receipt::Receipt; +use near_primitives::reed_solomon::ReedSolomonWrapper; use near_primitives::sharding::{ ChunkHash, EncodedShardChunk, EncodedShardChunkBody, PartialEncodedChunk, - PartialEncodedChunkPart, PartialEncodedChunkV2, ReceiptProof, ReedSolomonWrapper, ShardChunk, - ShardChunkHeader, ShardProof, + PartialEncodedChunkPart, PartialEncodedChunkV2, ReceiptProof, ShardChunk, ShardChunkHeader, + ShardProof, TransactionReceipt, }; use near_primitives::transaction::SignedTransaction; use near_primitives::types::validator_stake::ValidatorStake; @@ -397,7 +398,7 @@ impl ShardsManager { let epoch_id = self.epoch_manager.get_epoch_id_from_prev_block(ancestor_hash)?; - for part_ord in 0..self.rs.total_shard_count() { + for part_ord in 0..self.epoch_manager.num_total_parts() { let part_ord = part_ord as u64; if cache_entry.is_some_and(|cache_entry| cache_entry.parts.contains_key(&part_ord)) { continue; @@ -988,34 +989,18 @@ impl ShardsManager { // Construct EncodedShardChunk. If we earlier determined that we will // need parity parts, instruct the constructor to calculate them as // well. Otherwise we won’t bother. - let (parts, encoded_length) = match EncodedShardChunk::encode_transaction_receipts( - &mut self.rs, - chunk.transactions().to_vec(), - &outgoing_receipts, - ) { - Ok(result) => result, - Err(err) => { - warn!(target: "chunks", - "Not sending {:?}, failed to encode transactions and receipts: {}", - chunk.chunk_hash(), err); - return; - } - }; - if header.encoded_length() != encoded_length { + let (parts, encoded_length) = self + .rs + .encode(TransactionReceipt(chunk.transactions().to_vec(), outgoing_receipts.to_vec())); + + if header.encoded_length() != encoded_length as u64 { warn!(target: "chunks", "Not sending {:?}, expected encoded length doesn’t match calculated: {} != {}", chunk.chunk_hash(), header.encoded_length(), encoded_length); return; } - let mut content = EncodedShardChunkBody { parts }; - if let Err(err) = content.reconstruct(&mut self.rs) { - warn!(target: "chunks", - "Not sending {:?}, failed to reconstruct RS parity parts: {}", - chunk.chunk_hash(), err); - return; - } - + let content = EncodedShardChunkBody { parts }; let (encoded_merkle_root, merkle_paths) = content.get_merkle_hash_and_paths(); if header.encoded_merkle_root() != encoded_merkle_root { warn!(target: "chunks", @@ -1049,11 +1034,7 @@ impl ShardsManager { } } - // pub for testing - pub fn check_chunk_complete( - chunk: &mut EncodedShardChunk, - rs: &mut ReedSolomonWrapper, - ) -> ChunkStatus { + fn check_chunk_complete(&mut self, chunk: &mut EncodedShardChunk) -> ChunkStatus { let _span = debug_span!( target: "chunks", "check_chunk_complete", @@ -1061,29 +1042,30 @@ impl ShardsManager { shard_id = chunk.cloned_header().shard_id(), chunk_hash = ?chunk.chunk_hash()) .entered(); - let data_parts = rs.data_shard_count(); - if chunk.content().num_fetched_parts() >= data_parts { - if let Ok(_) = chunk.content_mut().reconstruct(rs) { - let (merkle_root, merkle_paths) = chunk.content().get_merkle_hash_and_paths(); - if merkle_root == chunk.encoded_merkle_root() { - debug!(target: "chunks", "Complete"); - ChunkStatus::Complete(merkle_paths) - } else { - debug!( - target: "chunks", - ?merkle_root, - chunk_encoded_merkle_root = ?chunk.encoded_merkle_root(), - "Invalid: Wrong merkle root"); - ChunkStatus::Invalid - } - } else { - debug!(target: "chunks", "Invalid: Failed to reconstruct"); - ChunkStatus::Invalid - } - } else { + + let data_parts = self.epoch_manager.num_data_parts(); + if chunk.content().num_fetched_parts() < data_parts { debug!(target: "chunks", num_fetched_parts = chunk.content().num_fetched_parts(), data_parts, "Incomplete"); - ChunkStatus::Incomplete + return ChunkStatus::Incomplete; } + + let encoded_length = chunk.encoded_length(); + if let Err(err) = self.rs.decode::( + chunk.content_mut().parts.as_mut_slice(), + encoded_length as usize, + ) { + debug!(target: "chunks", ?err, "Invalid: Failed to decode"); + return ChunkStatus::Invalid; + } + + let (merkle_root, merkle_paths) = chunk.content().get_merkle_hash_and_paths(); + if merkle_root != chunk.encoded_merkle_root() { + debug!(target: "chunks", ?merkle_root, chunk_encoded_merkle_root = ?chunk.encoded_merkle_root(), "Invalid: Wrong merkle root"); + return ChunkStatus::Invalid; + } + + debug!(target: "chunks", "Complete"); + ChunkStatus::Complete(merkle_paths) } /// Add a part to current encoded chunk stored in memory. It's present only if One Part was present and signed correctly. @@ -1108,7 +1090,7 @@ impl ShardsManager { &mut self, mut encoded_chunk: EncodedShardChunk, ) -> Result, Error> { - match ShardsManager::check_chunk_complete(&mut encoded_chunk, &mut self.rs) { + match self.check_chunk_complete(&mut encoded_chunk) { ChunkStatus::Complete(merkle_paths) => { self.requested_partial_encoded_chunks.remove(&encoded_chunk.chunk_hash()); match decode_encoded_chunk( @@ -1145,7 +1127,7 @@ impl ShardsManager { } // check part merkle proofs - let num_total_parts = self.rs.total_shard_count(); + let num_total_parts = self.epoch_manager.num_total_parts(); for part_info in forward.parts.iter() { self.validate_part(forward.merkle_root, part_info, num_total_parts)?; } @@ -1192,7 +1174,7 @@ impl ShardsManager { fn insert_forwarded_chunk(&mut self, forward: PartialEncodedChunkForwardMsg) { let chunk_hash = forward.chunk_hash.clone(); - let num_total_parts = self.rs.total_shard_count() as u64; + let num_total_parts = self.epoch_manager.num_total_parts() as u64; match self.chunk_forwards_cache.get_mut(&chunk_hash) { None => { // Never seen this chunk hash before, collect the parts and cache them @@ -1430,9 +1412,9 @@ impl ShardsManager { if entry.complete { return Ok(ProcessPartialEncodedChunkResult::Known); } - debug!(target: "chunks", num_parts_in_cache = entry.parts.len(), total_needed = self.rs.data_shard_count()); + debug!(target: "chunks", num_parts_in_cache = entry.parts.len(), total_needed = self.epoch_manager.num_data_parts()); } else { - debug!(target: "chunks", num_parts_in_cache = 0, total_needed = self.rs.data_shard_count()); + debug!(target: "chunks", num_parts_in_cache = 0, total_needed = self.epoch_manager.num_data_parts()); } // 1.b Checking chunk height @@ -1473,7 +1455,7 @@ impl ShardsManager { let partial_encoded_chunk = partial_encoded_chunk.as_ref().into_inner(); // 1.d Checking part_ords' validity - let num_total_parts = self.rs.total_shard_count(); + let num_total_parts = self.epoch_manager.num_total_parts(); for part_info in partial_encoded_chunk.parts.iter() { // TODO: only validate parts we care about // https://github.com/near/nearcore/issues/5885 @@ -1683,7 +1665,7 @@ impl ShardsManager { let protocol_version = self.epoch_manager.get_epoch_protocol_version(&epoch_id)?; let mut encoded_chunk = EncodedShardChunk::from_header( header.clone(), - self.rs.total_shard_count(), + self.epoch_manager.num_total_parts(), protocol_version, ); @@ -1904,7 +1886,7 @@ impl ShardsManager { prev_block_hash: &CryptoHash, chunk_entry: &EncodedChunksCacheEntry, ) -> Result { - for part_ord in 0..self.rs.total_shard_count() { + for part_ord in 0..self.epoch_manager.num_total_parts() { let part_ord = part_ord as u64; if !chunk_entry.parts.contains_key(&part_ord) { if need_part( @@ -1982,7 +1964,7 @@ impl ShardsManager { let mut block_producer_mapping = HashMap::new(); let epoch_id = self.epoch_manager.get_epoch_id_from_prev_block(&prev_block_hash)?; - for part_ord in 0..self.rs.total_shard_count() { + for part_ord in 0..self.epoch_manager.num_total_parts() { let part_ord = part_ord as u64; let to_whom = self.epoch_manager.get_part_owner(&epoch_id, part_ord).unwrap(); diff --git a/chain/chunks/src/test/basic.rs b/chain/chunks/src/test/basic.rs index bcebb48534e..8bf8d23e164 100644 --- a/chain/chunks/src/test/basic.rs +++ b/chain/chunks/src/test/basic.rs @@ -1,7 +1,16 @@ -use std::collections::HashSet; - +use crate::{ + adapter::ShardsManagerRequestFromClient, + client::ShardsManagerResponse, + test_loop::{ + forward_client_request_to_shards_manager, forward_network_request_to_shards_manager, + MockChainForShardsManager, MockChainForShardsManagerConfig, + }, + test_utils::default_tip, + ShardsManager, CHUNK_REQUEST_RETRY, +}; use derive_enum_from_into::{EnumFrom, EnumTryInto}; use near_async::messaging::noop; +use near_async::test_loop::futures::TestLoopDelayedActionEvent; use near_async::time; use near_async::{ messaging::{CanSend, IntoSender}, @@ -18,20 +27,9 @@ use near_network::{ }; use near_primitives::types::{AccountId, BlockHeight}; use near_store::test_utils::create_test_store; +use std::collections::HashSet; use tracing::log::info; -use crate::{ - adapter::ShardsManagerRequestFromClient, - client::ShardsManagerResponse, - test_loop::{ - forward_client_request_to_shards_manager, forward_network_request_to_shards_manager, - periodically_resend_chunk_requests, MockChainForShardsManager, - MockChainForShardsManagerConfig, ShardsManagerResendChunkRequests, - }, - test_utils::default_tip, - ShardsManager, CHUNK_REQUEST_RETRY, -}; - #[derive(derive_more::AsMut)] struct TestData { shards_manager: ShardsManager, @@ -60,8 +58,8 @@ enum TestEvent { NetworkToShardsManager(ShardsManagerRequestFromNetwork), ShardsManagerToClient(ShardsManagerResponse), ShardsManagerToNetwork(PeerManagerMessageRequest), - ShardsManagerResendRequests(ShardsManagerResendChunkRequests), Adhoc(AdhocEvent), + ShardsManagerDelayedActions(TestLoopDelayedActionEvent), } type ShardsManagerTestLoopBuilder = near_async::test_loop::TestLoopBuilder; @@ -182,8 +180,13 @@ fn test_chunk_forward() { test.register_handler(capture_events::().widen()); test.register_handler(forward_client_request_to_shards_manager().widen()); test.register_handler(forward_network_request_to_shards_manager().widen()); - test.register_handler(periodically_resend_chunk_requests(CHUNK_REQUEST_RETRY).widen()); test.register_handler(handle_adhoc_events::().widen()); + test.register_delayed_action_handler::(); + + test.data.shards_manager.periodically_resend_chunk_requests( + &mut test.sender().into_delayed_action_runner::(test.shutting_down()), + CHUNK_REQUEST_RETRY, + ); // We'll produce a single chunk whose next chunk producer is a chunk-only // producer, so that we can test that the chunk is forwarded to the next @@ -260,4 +263,5 @@ fn test_chunk_forward() { } } assert!(seen_part_request); + test.shutdown_and_drain_remaining_events(time::Duration::seconds(1)); } diff --git a/chain/chunks/src/test/multi.rs b/chain/chunks/src/test/multi.rs index 24e8bf84dd3..47a74bfa2d5 100644 --- a/chain/chunks/src/test/multi.rs +++ b/chain/chunks/src/test/multi.rs @@ -1,4 +1,16 @@ +use crate::{ + adapter::ShardsManagerRequestFromClient, + client::ShardsManagerResponse, + test_loop::{ + forward_client_request_to_shards_manager, forward_network_request_to_shards_manager, + route_shards_manager_network_messages, MockChainForShardsManager, + MockChainForShardsManagerConfig, + }, + test_utils::default_tip, + ShardsManager, CHUNK_REQUEST_RETRY, +}; use derive_enum_from_into::{EnumFrom, EnumTryInto}; +use near_async::test_loop::futures::TestLoopDelayedActionEvent; use near_async::{ messaging::IntoSender, test_loop::{ @@ -20,19 +32,6 @@ use near_primitives::{ }; use near_store::test_utils::create_test_store; -use crate::{ - adapter::ShardsManagerRequestFromClient, - client::ShardsManagerResponse, - test_loop::{ - forward_client_request_to_shards_manager, forward_network_request_to_shards_manager, - periodically_resend_chunk_requests, route_shards_manager_network_messages, - MockChainForShardsManager, MockChainForShardsManagerConfig, - ShardsManagerResendChunkRequests, - }, - test_utils::default_tip, - ShardsManager, CHUNK_REQUEST_RETRY, -}; - #[derive(derive_more::AsMut, derive_more::AsRef)] struct TestData { shards_manager: ShardsManager, @@ -50,11 +49,11 @@ impl AsMut for TestData { #[derive(EnumTryInto, Debug, EnumFrom)] enum TestEvent { Adhoc(AdhocEvent), + ShardsManagerDelayedActions(TestLoopDelayedActionEvent), ClientToShardsManager(ShardsManagerRequestFromClient), NetworkToShardsManager(ShardsManagerRequestFromNetwork), ShardsManagerToClient(ShardsManagerResponse), OutboundNetwork(PeerManagerMessageRequest), - ShardsManagerResendChunkRequests(ShardsManagerResendChunkRequests), } type ShardsManagerTestLoop = near_async::test_loop::TestLoop, (usize, TestEvent)>; @@ -106,13 +105,24 @@ fn basic_setup(config: BasicSetupConfig) -> ShardsManagerTestLoop { let mut test = builder.build(data); for idx in 0..test.data.len() { test.register_handler(handle_adhoc_events::().widen().for_index(idx)); + test.register_delayed_action_handler_for_index::(idx); test.register_handler(forward_client_request_to_shards_manager().widen().for_index(idx)); test.register_handler(forward_network_request_to_shards_manager().widen().for_index(idx)); test.register_handler(capture_events::().widen().for_index(idx)); - test.register_handler(route_shards_manager_network_messages(NETWORK_DELAY)); - test.register_handler( - periodically_resend_chunk_requests(CHUNK_REQUEST_RETRY).widen().for_index(idx), - ); + test.register_handler(route_shards_manager_network_messages( + test.sender(), + test.clock(), + NETWORK_DELAY, + )); + + let sender = test.sender().for_index(idx); + let shutting_down = test.shutting_down(); + test.sender().for_index(idx).send_adhoc_event("start_shards_manager", |data| { + data.shards_manager.periodically_resend_chunk_requests( + &mut sender.into_delayed_action_runner(shutting_down), + CHUNK_REQUEST_RETRY, + ); + }) } test } @@ -175,6 +185,8 @@ fn test_distribute_chunk_basic() { _ => panic!("Unexpected event"), } } + + test.shutdown_and_drain_remaining_events(time::Duration::seconds(1)); } /// Tests that when we have some block producers (validators) in the network, @@ -237,6 +249,7 @@ fn test_distribute_chunk_track_all_shards() { _ => panic!("Unexpected event"), } } + test.shutdown_and_drain_remaining_events(time::Duration::seconds(1)); } /// Tests that when the network has some block producers and also some chunk- @@ -348,4 +361,5 @@ fn test_distribute_chunk_with_chunk_only_producers() { }); } test.run_instant(); + test.shutdown_and_drain_remaining_events(time::Duration::seconds(1)); } diff --git a/chain/chunks/src/test_loop.rs b/chain/chunks/src/test_loop.rs index 1826d30983b..152d94938e0 100644 --- a/chain/chunks/src/test_loop.rs +++ b/chain/chunks/src/test_loop.rs @@ -1,9 +1,15 @@ -use std::{collections::HashMap, sync::Arc}; - +use crate::{ + adapter::ShardsManagerRequestFromClient, + logic::{cares_about_shard_this_or_next_epoch, make_outgoing_receipts_proofs}, + test_utils::{default_tip, tip}, + ShardsManager, +}; +use near_async::test_loop::delay_sender::DelaySender; use near_async::time; +use near_async::time::Clock; use near_async::{ messaging::Sender, - test_loop::event_handler::{interval, LoopEventHandler, LoopHandlerContext, TryIntoOrSelf}, + test_loop::event_handler::{LoopEventHandler, TryIntoOrSelf}, }; use near_chain::{types::Tip, Chain}; use near_epoch_manager::{ @@ -19,22 +25,17 @@ use near_network::{ use near_primitives::{ hash::CryptoHash, merkle::{self, MerklePath}, + reed_solomon::ReedSolomonWrapper, sharding::{ EncodedShardChunk, PartialEncodedChunk, PartialEncodedChunkV2, ReceiptProof, - ReedSolomonWrapper, ShardChunkHeader, + ShardChunkHeader, }, test_utils::create_test_signer, types::{AccountId, BlockHeight, BlockHeightDelta, MerkleHash, NumShards, ShardId}, version::PROTOCOL_VERSION, }; use near_store::Store; - -use crate::{ - adapter::ShardsManagerRequestFromClient, - logic::{cares_about_shard_this_or_next_epoch, make_outgoing_receipts_proofs}, - test_utils::{default_tip, tip}, - ShardsManager, -}; +use std::{collections::HashMap, sync::Arc}; pub fn forward_client_request_to_shards_manager( ) -> LoopEventHandler { @@ -61,25 +62,24 @@ pub fn route_shards_manager_network_messages< + From + From, >( + sender: DelaySender<(usize, Event)>, + clock: Clock, network_delay: time::Duration, ) -> LoopEventHandler { let mut route_back_lookup: HashMap = HashMap::new(); let mut next_hash: u64 = 0; - LoopEventHandler::new( - move |event: (usize, Event), - data: &mut Data, - context: &LoopHandlerContext<(usize, Event)>| { - let (idx, event) = event; - let message = event.try_into_or_self().map_err(|e| (idx, e.into()))?; - match message { - PeerManagerMessageRequest::NetworkRequests(request) => { - match request { - NetworkRequests::PartialEncodedChunkRequest { target, request, .. } => { - let target_idx = data.index_for_account(&target.account_id.unwrap()); - let route_back = CryptoHash::hash_borsh(next_hash); - route_back_lookup.insert(route_back, idx); - next_hash += 1; - context.sender.send_with_delay( + LoopEventHandler::new(move |event: (usize, Event), data: &mut Data| { + let (idx, event) = event; + let message = event.try_into_or_self().map_err(|e| (idx, e.into()))?; + match message { + PeerManagerMessageRequest::NetworkRequests(request) => { + match request { + NetworkRequests::PartialEncodedChunkRequest { target, request, .. } => { + let target_idx = data.index_for_account(&target.account_id.unwrap()); + let route_back = CryptoHash::hash_borsh(next_hash); + route_back_lookup.insert(route_back, idx); + next_hash += 1; + sender.send_with_delay( (target_idx, ShardsManagerRequestFromNetwork::ProcessPartialEncodedChunkRequest { partial_encoded_chunk_request: request, @@ -87,73 +87,66 @@ pub fn route_shards_manager_network_messages< }.into()), network_delay, ); - Ok(()) - } - NetworkRequests::PartialEncodedChunkResponse { route_back, response } => { - let target_idx = - *route_back_lookup.get(&route_back).expect("Route back not found"); - context.sender.send_with_delay( + Ok(()) + } + NetworkRequests::PartialEncodedChunkResponse { route_back, response } => { + let target_idx = + *route_back_lookup.get(&route_back).expect("Route back not found"); + sender.send_with_delay( (target_idx, ShardsManagerRequestFromNetwork::ProcessPartialEncodedChunkResponse { partial_encoded_chunk_response: response, - received_time: context.clock.now().into(), // TODO: use clock + received_time: clock.now().into(), // TODO: use clock }.into()), network_delay, ); - Ok(()) - } - NetworkRequests::PartialEncodedChunkMessage { - account_id, - partial_encoded_chunk, - } => { - let target_idx = data.index_for_account(&account_id); - context.sender.send_with_delay( - ( - target_idx, - ShardsManagerRequestFromNetwork::ProcessPartialEncodedChunk( - partial_encoded_chunk.into(), - ) - .into(), - ), - network_delay, - ); - Ok(()) - } - NetworkRequests::PartialEncodedChunkForward { account_id, forward } => { - let target_idx = data.index_for_account(&account_id); - context.sender.send_with_delay( - (target_idx, - ShardsManagerRequestFromNetwork::ProcessPartialEncodedChunkForward( - forward, - ).into()), + Ok(()) + } + NetworkRequests::PartialEncodedChunkMessage { + account_id, + partial_encoded_chunk, + } => { + let target_idx = data.index_for_account(&account_id); + sender.send_with_delay( + ( + target_idx, + ShardsManagerRequestFromNetwork::ProcessPartialEncodedChunk( + partial_encoded_chunk.into(), + ) + .into(), + ), + network_delay, + ); + Ok(()) + } + NetworkRequests::PartialEncodedChunkForward { account_id, forward } => { + let target_idx = data.index_for_account(&account_id); + sender.send_with_delay( + ( + target_idx, + ShardsManagerRequestFromNetwork::ProcessPartialEncodedChunkForward( + forward, + ) + .into(), + ), network_delay, ); - Ok(()) - } - other_message => Err(( - idx, - PeerManagerMessageRequest::NetworkRequests(other_message).into(), - )), + Ok(()) + } + other_message => { + Err((idx, PeerManagerMessageRequest::NetworkRequests(other_message).into())) } } - message => Err((idx, message.into())), } - }, - ) + message => Err((idx, message.into())), + } + }) } +// NOTE: this is no longer needed for TestLoop, but some other non-TestLoop tests depend on it. #[derive(Clone, Debug, PartialEq, Eq)] pub struct ShardsManagerResendChunkRequests; -/// Periodically call resend_chunk_requests. -pub fn periodically_resend_chunk_requests( - every: time::Duration, -) -> LoopEventHandler { - interval(every, ShardsManagerResendChunkRequests, |data: &mut ShardsManager| { - data.resend_chunk_requests() - }) -} - /// A simple implementation of the chain side that interacts with /// ShardsManager. pub struct MockChainForShardsManager { diff --git a/chain/chunks/src/test_utils.rs b/chain/chunks/src/test_utils.rs index 1705d9a88ed..327118d57b9 100644 --- a/chain/chunks/src/test_utils.rs +++ b/chain/chunks/src/test_utils.rs @@ -9,9 +9,10 @@ use near_network::test_utils::MockPeerManagerAdapter; use near_primitives::hash::CryptoHash; use near_primitives::merkle::{self, MerklePath}; use near_primitives::receipt::Receipt; +use near_primitives::reed_solomon::ReedSolomonWrapper; use near_primitives::sharding::{ EncodedShardChunk, PartialEncodedChunk, PartialEncodedChunkPart, PartialEncodedChunkV2, - ReedSolomonWrapper, ShardChunkHeader, + ShardChunkHeader, }; use near_primitives::test_utils::create_test_signer; use near_primitives::types::MerkleHash; diff --git a/chain/client-primitives/src/types.rs b/chain/client-primitives/src/types.rs index e4128d44345..5088e1dc5c1 100644 --- a/chain/client-primitives/src/types.rs +++ b/chain/client-primitives/src/types.rs @@ -92,9 +92,9 @@ impl Clone for DownloadStatus { pub enum ShardSyncStatus { StateDownloadHeader, StateDownloadParts, - StateDownloadScheduling, - StateDownloadApplying, - StateDownloadComplete, + StateApplyScheduling, + StateApplyComplete, + StateApplyFinalizing, ReshardingScheduling, ReshardingApplying, StateSyncDone, @@ -105,9 +105,9 @@ impl ShardSyncStatus { match self { ShardSyncStatus::StateDownloadHeader => 0, ShardSyncStatus::StateDownloadParts => 1, - ShardSyncStatus::StateDownloadScheduling => 2, - ShardSyncStatus::StateDownloadApplying => 3, - ShardSyncStatus::StateDownloadComplete => 4, + ShardSyncStatus::StateApplyScheduling => 2, + ShardSyncStatus::StateApplyComplete => 3, + ShardSyncStatus::StateApplyFinalizing => 4, ShardSyncStatus::ReshardingScheduling => 5, ShardSyncStatus::ReshardingApplying => 6, ShardSyncStatus::StateSyncDone => 7, @@ -129,9 +129,9 @@ impl ToString for ShardSyncStatus { match self { ShardSyncStatus::StateDownloadHeader => "header".to_string(), ShardSyncStatus::StateDownloadParts => "parts".to_string(), - ShardSyncStatus::StateDownloadScheduling => "scheduling".to_string(), - ShardSyncStatus::StateDownloadApplying => "applying".to_string(), - ShardSyncStatus::StateDownloadComplete => "download complete".to_string(), + ShardSyncStatus::StateApplyScheduling => "apply scheduling".to_string(), + ShardSyncStatus::StateApplyComplete => "apply complete".to_string(), + ShardSyncStatus::StateApplyFinalizing => "apply finalizing".to_string(), ShardSyncStatus::ReshardingScheduling => "resharding scheduling".to_string(), ShardSyncStatus::ReshardingApplying => "resharding applying".to_string(), ShardSyncStatus::StateSyncDone => "done".to_string(), diff --git a/chain/client/src/client.rs b/chain/client/src/client.rs index 2282623e4a9..27254262d35 100644 --- a/chain/client/src/client.rs +++ b/chain/client/src/client.rs @@ -21,9 +21,9 @@ use near_async::futures::{AsyncComputationSpawner, FutureSpawner}; use near_async::messaging::IntoSender; use near_async::messaging::{CanSend, Sender}; use near_async::time::{Clock, Duration, Instant}; -use near_chain::chain::VerifyBlockHashAndSignatureResult; use near_chain::chain::{ ApplyStatePartsRequest, BlockCatchUpRequest, BlockMissingChunks, BlocksCatchUpState, + LoadMemtrieRequest, VerifyBlockHashAndSignatureResult, }; use near_chain::flat_storage_creator::FlatStorageCreator; use near_chain::orphan::OrphanMissingChunks; @@ -70,10 +70,10 @@ use near_primitives::hash::CryptoHash; use near_primitives::merkle::{merklize, MerklePath, PartialMerkleTree}; use near_primitives::network::PeerId; use near_primitives::receipt::Receipt; +use near_primitives::reed_solomon::ReedSolomonWrapper; use near_primitives::sharding::StateSyncInfo; use near_primitives::sharding::{ - ChunkHash, EncodedShardChunk, PartialEncodedChunk, ReedSolomonWrapper, ShardChunk, - ShardChunkHeader, ShardInfo, + ChunkHash, EncodedShardChunk, PartialEncodedChunk, ShardChunk, ShardChunkHeader, ShardInfo, }; use near_primitives::transaction::SignedTransaction; use near_primitives::types::chunk_extra::ChunkExtra; @@ -1002,18 +1002,11 @@ impl Client { let prepared_transactions = if let Some(mut iter) = sharded_tx_pool.get_pool_iterator(shard_uid) { - let me = self - .validator_signer - .as_ref() - .map(|validator_signer| validator_signer.validator_id().clone()); - let record_storage = chain - .should_produce_state_witness_for_this_or_next_epoch(&me, &prev_block_header)?; let storage_config = RuntimeStorageConfig { state_root: *chunk_extra.state_root(), use_flat_storage: true, source: StorageDataSource::Db, state_patch: Default::default(), - record_storage, }; runtime.prepare_transactions( storage_config, @@ -2335,6 +2328,7 @@ impl Client { &mut self, highest_height_peers: &[HighestHeightPeerInfo], state_parts_task_scheduler: &Sender, + load_memtrie_scheduler: &Sender, block_catch_up_task_scheduler: &Sender, resharding_scheduler: &Sender, apply_chunks_done_callback: DoneApplyChunkCallback, @@ -2396,7 +2390,7 @@ impl Client { .iter() .map(|id| self.epoch_manager.shard_id_to_uid(*id, &epoch_id).unwrap()) .collect(); - self.runtime_adapter.retain_mem_tries(&shard_uids); + self.runtime_adapter.get_tries().retain_mem_tries(&shard_uids); for &shard_id in &tracking_shards { let shard_uid = ShardUId::from_shard_id_and_layout(shard_id, &shard_layout); @@ -2426,6 +2420,7 @@ impl Client { highest_height_peers, tracking_shards, state_parts_task_scheduler, + load_memtrie_scheduler, resharding_scheduler, state_parts_future_spawner, use_colour, diff --git a/chain/client/src/client_actions.rs b/chain/client/src/client_actions.rs index 465515f041d..944dd29188c 100644 --- a/chain/client/src/client_actions.rs +++ b/chain/client/src/client_actions.rs @@ -21,6 +21,7 @@ use near_async::time::{Duration, Instant}; use near_async::{MultiSend, MultiSendMessage, MultiSenderFrom}; use near_chain::chain::{ ApplyStatePartsRequest, ApplyStatePartsResponse, BlockCatchUpRequest, BlockCatchUpResponse, + LoadMemtrieRequest, LoadMemtrieResponse, }; use near_chain::resharding::{ReshardingRequest, ReshardingResponse}; use near_chain::test_utils::format_hash; @@ -89,6 +90,7 @@ pub struct ClientSenderForClient { #[multi_send_message_derive(Debug)] pub struct SyncJobsSenderForClient { pub apply_state_parts: Sender, + pub load_memtrie: Sender, pub block_catch_up: Sender, pub resharding: Sender, } @@ -619,6 +621,7 @@ impl ClientActionHandler for ClientActions { node_public_key, node_key, uptime_sec, + genesis_hash: *self.client.chain.genesis().hash(), detailed_debug_status, }) } @@ -1393,6 +1396,7 @@ impl ClientActions { if let Err(err) = self.client.run_catchup( &self.network_info.highest_height_peers, &self.sync_jobs_sender.apply_state_parts, + &self.sync_jobs_sender.load_memtrie, &self.sync_jobs_sender.block_catch_up, &self.sync_jobs_sender.resharding, self.get_apply_chunks_done_callback(), @@ -1619,6 +1623,7 @@ impl ClientActions { &self.network_info.highest_height_peers, shards_to_sync, &self.sync_jobs_sender.apply_state_parts, + &self.sync_jobs_sender.load_memtrie, &self.sync_jobs_sender.resharding, self.state_parts_future_spawner.as_ref(), use_colour, @@ -1781,6 +1786,26 @@ impl ClientActionHandler for ClientActions { } } +impl ClientActionHandler for ClientActions { + type Result = (); + + // The memtrie was loaded as a part of catchup or state-sync, + // (see https://github.com/near/nearcore/blob/master/docs/architecture/how/sync.md#basics). + // Here we save the result of loading memtrie to the appropriate place, + // depending on whether it was catch-up or state sync. + #[perf] + fn handle(&mut self, msg: LoadMemtrieResponse) -> Self::Result { + tracing::debug!(target: "client", ?msg); + if let Some((sync, _, _)) = self.client.catchup_state_syncs.get_mut(&msg.sync_hash) { + // We are doing catchup + sync.set_load_memtrie_result(msg.shard_uid, msg.load_result); + } else { + // We are doing state sync + self.client.state_sync.set_load_memtrie_result(msg.shard_uid, msg.load_result); + } + } +} + impl ClientActionHandler for ClientActions { type Result = (); diff --git a/chain/client/src/info.rs b/chain/client/src/info.rs index 33c11132fb4..493d5c8f6b2 100644 --- a/chain/client/src/info.rs +++ b/chain/client/src/info.rs @@ -1,12 +1,15 @@ use crate::config_updater::ConfigUpdater; use crate::{metrics, SyncStatus}; use itertools::Itertools; +use lru::LruCache; use near_async::messaging::Sender; use near_async::time::{Clock, Instant}; use near_chain_configs::{ClientConfig, LogSummaryStyle, SyncConfig}; use near_client_primitives::types::StateSyncStatus; +use near_epoch_manager::EpochManagerAdapter; use near_network::types::NetworkInfo; use near_primitives::block::Tip; +use near_primitives::hash::CryptoHash; use near_primitives::network::PeerId; use near_primitives::telemetry::{ TelemetryAgentInfo, TelemetryChainInfo, TelemetryInfo, TelemetrySystemInfo, @@ -24,7 +27,7 @@ use near_primitives::views::{ }; use near_telemetry::TelemetryEvent; use std::cmp::min; -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use std::fmt::Write; use std::sync::Arc; use sysinfo::{get_current_pid, set_open_files_limit, Pid, ProcessExt, System, SystemExt}; @@ -68,6 +71,8 @@ pub struct InfoHelper { enable_multiline_logging: bool, // Keeps track of the previous SyncRequirement for updating metrics. prev_sync_requirement: Option, + /// Number of validators (block + chunk producers) per epoch, cached for a small number of epochs. + num_validators_per_epoch: LruCache, } impl InfoHelper { @@ -95,6 +100,7 @@ impl InfoHelper { epoch_id: None, enable_multiline_logging: client_config.enable_multiline_logging, prev_sync_requirement: None, + num_validators_per_epoch: LruCache::new(3), } } @@ -275,6 +281,36 @@ impl InfoHelper { } } + /// Returns the number of validators in a given epoch (EpochId). + /// + /// The set of validators include both block producers and chunk producers. + /// This set of validators do not change during the epoch, so it is cached for a small number of epochs. + /// It does NOT currently consider whether the validators are slashed or not. + fn get_num_validators( + &mut self, + epoch_manager: &dyn EpochManagerAdapter, + epoch_id: &EpochId, + last_block_hash: &CryptoHash, + ) -> usize { + self.num_validators_per_epoch + .get_or_insert(epoch_id.clone(), || { + let block_producers: HashSet = epoch_manager + .get_epoch_block_producers_ordered(epoch_id, last_block_hash) + .unwrap_or(vec![]) + .into_iter() + .map(|(validator_stake, _)| validator_stake.account_id().clone()) + .collect(); + let chunk_producers: HashSet = epoch_manager + .get_epoch_chunk_producers(epoch_id) + .unwrap_or(vec![]) + .into_iter() + .map(|validator_stake| validator_stake.account_id().clone()) + .collect(); + block_producers.union(&chunk_producers).count() + }) + .map_or(0, |num_validators| *num_validators) + } + /// Print current summary. pub fn log_summary( &mut self, @@ -286,10 +322,11 @@ impl InfoHelper { let is_syncing = client.sync_status.is_syncing(); let head = unwrap_or_return!(client.chain.head()); let validator_info = if !is_syncing { - let validators = unwrap_or_return!(client - .epoch_manager - .get_epoch_block_producers_ordered(&head.epoch_id, &head.last_block_hash)); - let num_validators = validators.len(); + let num_validators = self.get_num_validators( + client.epoch_manager.as_ref(), + &head.epoch_id, + &head.last_block_hash, + ); let account_id = client.validator_signer.as_ref().map(|x| x.validator_id()); let is_validator = if let Some(account_id) = account_id { match client.epoch_manager.get_validator_by_account_id( @@ -874,6 +911,7 @@ mod tests { use near_chain::{Chain, ChainGenesis, DoomslugThresholdMode}; use near_chain_configs::Genesis; use near_epoch_manager::shard_tracker::ShardTracker; + use near_epoch_manager::test_utils::*; use near_epoch_manager::EpochManager; use near_network::test_utils::peer_id_from_seed; use near_store::genesis::initialize_genesis_state; @@ -899,7 +937,7 @@ mod tests { } #[test] - fn telemetry_info() { + fn test_telemetry_info() { let config = ClientConfig::test(false, 1230, 2340, 50, false, true, true, true); let info_helper = InfoHelper::new(Clock::real(), noop().into_sender(), &config, None); @@ -955,4 +993,63 @@ mod tests { Some(_) ); } + + /// Tests that `num_validators` returns the number of all validators including both block and chunk producers. + #[test] + fn test_num_validators() { + let amount_staked = 1_000_000; + let validators = vec![ + ("test1".parse().unwrap(), amount_staked), + ("test2".parse().unwrap(), amount_staked), + ("test3".parse().unwrap(), amount_staked), + ("test4".parse().unwrap(), amount_staked), + ("test5".parse().unwrap(), amount_staked), + ]; + let num_validators = validators.len(); + let num_block_producer_seats = 3usize; + assert!( + num_block_producer_seats < num_validators, + "for this test, make sure number of validators are more than block producer seats" + ); + + let last_block_hash = CryptoHash::default(); + let epoch_id = EpochId::default(); + let epoch_length = 2; + let num_shards = 2; + + let epoch_manager_adapter = setup_epoch_manager( + validators, + epoch_length, + num_shards, + num_block_producer_seats.try_into().unwrap(), + 0, + 90, + 90, + 90, + default_reward_calculator(), + ) + .into_handle(); + + // First check that we have different number of block and chunk producers. + assert_eq!( + num_block_producer_seats, + epoch_manager_adapter + .get_epoch_block_producers_ordered(&epoch_id, &last_block_hash) + .unwrap() + .len() + ); + assert_eq!( + num_validators, + epoch_manager_adapter.get_epoch_chunk_producers(&epoch_id).unwrap().len() + ); + + // Then check that get_num_validators returns the correct number of validators. + let client_config = ClientConfig::test(false, 1230, 2340, 50, false, true, true, true); + let mut info_helper = + InfoHelper::new(Clock::real(), noop().into_sender(), &client_config, None); + assert_eq!( + num_validators, + info_helper.get_num_validators(&epoch_manager_adapter, &epoch_id, &last_block_hash) + ); + } } diff --git a/chain/client/src/metrics.rs b/chain/client/src/metrics.rs index 395ec158e88..13ef59aecdf 100644 --- a/chain/client/src/metrics.rs +++ b/chain/client/src/metrics.rs @@ -576,9 +576,39 @@ pub(crate) static CHUNK_STATE_WITNESS_VALIDATION_TIME: Lazy = Lazy pub(crate) static CHUNK_STATE_WITNESS_TOTAL_SIZE: Lazy = Lazy::new(|| { try_create_histogram_vec( "near_chunk_state_witness_total_size", - "Stateless validation state witness size in bytes", + "Stateless validation compressed state witness size in bytes", &["shard_id"], - Some(exponential_buckets(1000.0, 2.0, 20).unwrap()), + Some(exponential_buckets(100_000.0, 1.2, 32).unwrap()), + ) + .unwrap() +}); + +pub(crate) static CHUNK_STATE_WITNESS_RAW_SIZE: Lazy = Lazy::new(|| { + try_create_histogram_vec( + "near_chunk_state_witness_raw_size", + "Stateless validation uncompressed (raw) state witness size in bytes", + &["shard_id"], + Some(exponential_buckets(100_000.0, 1.2, 32).unwrap()), + ) + .unwrap() +}); + +pub(crate) static CHUNK_STATE_WITNESS_ENCODE_TIME: Lazy = Lazy::new(|| { + try_create_histogram_vec( + "near_chunk_state_witness_encode_time", + "State witness encoding (serialization + compression) latency in seconds", + &["shard_id"], + Some(linear_buckets(0.025, 0.025, 20).unwrap()), + ) + .unwrap() +}); + +pub(crate) static CHUNK_STATE_WITNESS_DECODE_TIME: Lazy = Lazy::new(|| { + try_create_histogram_vec( + "near_chunk_state_witness_decode_time", + "State witness decoding (decompression + deserialization) latency in seconds", + &["shard_id"], + Some(linear_buckets(0.025, 0.025, 20).unwrap()), ) .unwrap() }); diff --git a/chain/client/src/stateless_validation/chunk_validator/mod.rs b/chain/client/src/stateless_validation/chunk_validator/mod.rs index 6b50f94b5b8..c27565e9923 100644 --- a/chain/client/src/stateless_validation/chunk_validator/mod.rs +++ b/chain/client/src/stateless_validation/chunk_validator/mod.rs @@ -27,7 +27,8 @@ use near_primitives::merkle::merklize; use near_primitives::receipt::Receipt; use near_primitives::sharding::{ChunkHash, ReceiptProof, ShardChunkHeader}; use near_primitives::stateless_validation::{ - ChunkEndorsement, ChunkStateWitness, ChunkStateWitnessAck, ChunkStateWitnessInner, + ChunkEndorsement, ChunkStateWitness, ChunkStateWitnessAck, ChunkStateWitnessSize, + SignedEncodedChunkStateWitness, }; use near_primitives::transaction::SignedTransaction; use near_primitives::types::chunk_extra::ChunkExtra; @@ -93,38 +94,25 @@ impl ChunkValidator { chain: &Chain, processing_done_tracker: Option, ) -> Result<(), Error> { - if !self.epoch_manager.verify_chunk_state_witness_signature(&state_witness)? { - return Err(Error::InvalidChunkStateWitness("Invalid signature".to_string())); - } - - let state_witness_inner = state_witness.inner; - let chunk_header = state_witness_inner.chunk_header.clone(); - let Some(my_signer) = self.my_signer.as_ref() else { - return Err(Error::NotAValidator); - }; - let epoch_id = - self.epoch_manager.get_epoch_id_from_prev_block(chunk_header.prev_block_hash())?; - // We will only validate something if we are a chunk validator for this chunk. - // Note this also covers the case before the protocol upgrade for chunk validators, - // because the chunk validators will be empty. - let chunk_validator_assignments = self.epoch_manager.get_chunk_validator_assignments( - &epoch_id, - chunk_header.shard_id(), - chunk_header.height_created(), - )?; - if !chunk_validator_assignments.contains(my_signer.validator_id()) { - return Err(Error::NotAChunkValidator); + let prev_block_hash = state_witness.chunk_header.prev_block_hash(); + let epoch_id = self.epoch_manager.get_epoch_id_from_prev_block(prev_block_hash)?; + if epoch_id != state_witness.epoch_id { + return Err(Error::InvalidChunkStateWitness(format!( + "Invalid EpochId {:?} for previous block {}, expected {:?}", + state_witness.epoch_id, prev_block_hash, epoch_id + ))); } let pre_validation_result = pre_validate_chunk_state_witness( - &state_witness_inner, + &state_witness, chain, self.epoch_manager.as_ref(), self.runtime_adapter.as_ref(), )?; + let chunk_header = state_witness.chunk_header.clone(); let network_sender = self.network_sender.clone(); - let signer = my_signer.clone(); + let signer = self.my_signer.as_ref().ok_or(Error::NotAValidator)?.clone(); let epoch_manager = self.epoch_manager.clone(); let runtime_adapter = self.runtime_adapter.clone(); let chunk_endorsement_tracker = self.chunk_endorsement_tracker.clone(); @@ -133,7 +121,7 @@ impl ChunkValidator { let _processing_done_tracker_capture = processing_done_tracker; match validate_chunk_state_witness( - state_witness_inner, + state_witness, pre_validation_result, epoch_manager.as_ref(), runtime_adapter.as_ref(), @@ -182,7 +170,7 @@ pub(crate) fn validate_prepared_transactions( /// We do this before handing off the computationally intensive part to a /// validation thread. pub(crate) fn pre_validate_chunk_state_witness( - state_witness: &ChunkStateWitnessInner, + state_witness: &ChunkStateWitness, chain: &Chain, epoch_manager: &dyn EpochManagerAdapter, runtime_adapter: &dyn RuntimeAdapter, @@ -262,7 +250,6 @@ pub(crate) fn pre_validate_chunk_state_witness( nodes: state_witness.new_transactions_validation_state.clone(), }), state_patch: Default::default(), - record_storage: false, }; match validate_prepared_transactions( @@ -314,7 +301,6 @@ pub(crate) fn pre_validate_chunk_state_witness( nodes: state_witness.main_state_transition.base_state.clone(), }), state_patch: Default::default(), - record_storage: false, }, }) }; @@ -469,7 +455,7 @@ pub(crate) struct PreValidationOutput { } pub(crate) fn validate_chunk_state_witness( - state_witness: ChunkStateWitnessInner, + state_witness: ChunkStateWitness, pre_validation_output: PreValidationOutput, epoch_manager: &dyn EpochManagerAdapter, runtime_adapter: &dyn RuntimeAdapter, @@ -529,7 +515,6 @@ pub(crate) fn validate_chunk_state_witness( nodes: transition.base_state, }), state_patch: Default::default(), - record_storage: false, }, }; let OldChunkResult { apply_result, .. } = apply_old_chunk( @@ -637,54 +622,57 @@ impl Client { /// you can use the `processing_done_tracker` argument (but it's optional, it's safe to pass None there). pub fn process_chunk_state_witness( &mut self, - witness: ChunkStateWitness, + signed_witness: SignedEncodedChunkStateWitness, processing_done_tracker: Option, ) -> Result<(), Error> { + let (witness, raw_witness_size) = self.partially_validate_state_witness(&signed_witness)?; + // Send the acknowledgement for the state witness back to the chunk producer. // This is currently used for network roundtrip time measurement, so we do not need to // wait for validation to finish. - if let Err(err) = self.send_state_witness_ack(&witness) { - tracing::warn!(target: "stateless_validation", error = &err as &dyn std::error::Error, - "Error sending chunk state witness acknowledgement"); + self.send_state_witness_ack(&witness); + + // Avoid processing state witness for old chunks. + // In particular it is impossible for a chunk created at a height + // that doesn't exceed the height of the current final block to be + // included in the chain. This addresses both network-delayed messages + // as well as malicious behavior of a chunk producer. + if let Ok(final_head) = self.chain.final_head() { + if witness.chunk_header.height_created() <= final_head.height { + tracing::debug!( + target: "stateless_validation", + chunk_hash=?witness.chunk_header.chunk_hash(), + shard_id=witness.chunk_header.shard_id(), + witness_height=witness.chunk_header.height_created(), + final_height=final_head.height, + "Skipping state witness below the last final block", + ); + return Ok(()); + } } - let prev_block_hash = witness.inner.chunk_header.prev_block_hash(); - let prev_block = match self.chain.get_block(prev_block_hash) { - Ok(block) => block, + match self.chain.get_block(witness.chunk_header.prev_block_hash()) { + Ok(block) => self.process_chunk_state_witness_with_prev_block( + witness, + &block, + processing_done_tracker, + ), Err(Error::DBNotFoundErr(_)) => { // Previous block isn't available at the moment, add this witness to the orphan pool. - self.handle_orphan_state_witness(witness)?; - return Ok(()); + self.handle_orphan_state_witness(witness, raw_witness_size)?; + Ok(()) } - Err(err) => return Err(err), - }; - - self.process_chunk_state_witness_with_prev_block( - witness, - &prev_block, - processing_done_tracker, - ) + Err(err) => Err(err), + } } - fn send_state_witness_ack(&self, witness: &ChunkStateWitness) -> Result<(), Error> { - // First find the AccountId for the chunk producer and then send the ack to that account. - let chunk_header = &witness.inner.chunk_header; - let prev_block_hash = chunk_header.prev_block_hash(); - let epoch_id = self.epoch_manager.get_epoch_id_from_prev_block(prev_block_hash)?; - let chunk_producer = self.epoch_manager.get_chunk_producer( - &epoch_id, - chunk_header.height_created(), - chunk_header.shard_id(), - )?; - + fn send_state_witness_ack(&self, witness: &ChunkStateWitness) { self.network_adapter.send(PeerManagerMessageRequest::NetworkRequests( NetworkRequests::ChunkStateWitnessAck( - chunk_producer, - ChunkStateWitnessAck::new(&witness), + witness.chunk_producer.clone(), + ChunkStateWitnessAck::new(witness), ), )); - - Ok(()) } pub fn process_chunk_state_witness_with_prev_block( @@ -693,14 +681,80 @@ impl Client { prev_block: &Block, processing_done_tracker: Option, ) -> Result<(), Error> { - if witness.inner.chunk_header.prev_block_hash() != prev_block.hash() { + if witness.chunk_header.prev_block_hash() != prev_block.hash() { return Err(Error::Other(format!( "process_chunk_state_witness_with_prev_block - prev_block doesn't match ({} != {})", - witness.inner.chunk_header.prev_block_hash(), + witness.chunk_header.prev_block_hash(), prev_block.hash() ))); } self.chunk_validator.start_validating_chunk(witness, &self.chain, processing_done_tracker) } + + /// Performs state witness decoding and partial validation without requiring the previous block. + /// Here we rely on epoch_id provided as part of the state witness. Later we verify that this + /// epoch_id actually corresponds to the chunk's previous block. + fn partially_validate_state_witness( + &self, + signed_witness: &SignedEncodedChunkStateWitness, + ) -> Result<(ChunkStateWitness, ChunkStateWitnessSize), Error> { + let decode_start = std::time::Instant::now(); + let (witness, raw_witness_size) = signed_witness.witness_bytes.decode()?; + let decode_elapsed_seconds = decode_start.elapsed().as_secs_f64(); + let chunk_header = &witness.chunk_header; + let witness_height = chunk_header.height_created(); + let witness_shard = chunk_header.shard_id(); + + if !self + .epoch_manager + .get_shard_layout(&witness.epoch_id)? + .shard_ids() + .contains(&witness_shard) + { + return Err(Error::InvalidChunkStateWitness(format!( + "Invalid shard_id in ChunkStateWitness: {}", + witness_shard + ))); + } + + let chunk_producer = self.epoch_manager.get_chunk_producer( + &witness.epoch_id, + witness_height, + witness_shard, + )?; + if witness.chunk_producer != chunk_producer { + return Err(Error::InvalidChunkStateWitness(format!( + "Incorrect chunk producer for epoch {:?} at height {}: expected {}, got {}", + witness.epoch_id, witness_height, chunk_producer, witness.chunk_producer, + ))); + } + + // Reject witnesses for chunks for which this node isn't a validator. + // It's an error, as chunk producer shouldn't send the witness to a non-validator node. + let my_signer = self.chunk_validator.my_signer.as_ref().ok_or(Error::NotAValidator)?; + let chunk_validator_assignments = self.epoch_manager.get_chunk_validator_assignments( + &witness.epoch_id, + witness_shard, + witness_height, + )?; + if !chunk_validator_assignments.contains(my_signer.validator_id()) { + return Err(Error::NotAChunkValidator); + } + + if !self.epoch_manager.verify_chunk_state_witness_signature( + &signed_witness, + &witness.chunk_producer, + &witness.epoch_id, + )? { + return Err(Error::InvalidChunkStateWitness("Invalid signature".to_string())); + } + + // Record metrics after validating the witness + metrics::CHUNK_STATE_WITNESS_DECODE_TIME + .with_label_values(&[&witness_shard.to_string()]) + .observe(decode_elapsed_seconds); + + Ok((witness, raw_witness_size)) + } } diff --git a/chain/client/src/stateless_validation/chunk_validator/orphan_witness_handling.rs b/chain/client/src/stateless_validation/chunk_validator/orphan_witness_handling.rs index 5e711b35fd3..aaa696b59c9 100644 --- a/chain/client/src/stateless_validation/chunk_validator/orphan_witness_handling.rs +++ b/chain/client/src/stateless_validation/chunk_validator/orphan_witness_handling.rs @@ -6,7 +6,6 @@ //! arrives, all witnesses that were waiting for it can be processed. use crate::Client; -use itertools::Itertools; use near_chain::Block; use near_chain_primitives::Error; use near_primitives::stateless_validation::ChunkStateWitness; @@ -24,8 +23,9 @@ impl Client { pub fn handle_orphan_state_witness( &mut self, witness: ChunkStateWitness, + witness_size: usize, ) -> Result { - let chunk_header = &witness.inner.chunk_header; + let chunk_header = &witness.chunk_header; let witness_height = chunk_header.height_created(); let witness_shard = chunk_header.shard_id(); @@ -53,7 +53,6 @@ impl Client { } // Don't save orphaned state witnesses which are bigger than the allowed limit. - let witness_size = borsh::to_vec(&witness)?.len(); let witness_size_u64: u64 = witness_size.try_into().map_err(|_| { Error::Other(format!("Cannot convert witness size to u64: {}", witness_size)) })?; @@ -77,31 +76,8 @@ impl Client { let possible_epochs = self.epoch_manager.possible_epochs_of_height_around_tip(&chain_head, witness_height)?; - // Try to validate the witness assuming that it resides in one of the possible epochs. - // The witness must pass validation in one of these epochs before it can be admitted to the pool. - let mut epoch_validation_result: Option> = None; - for epoch_id in possible_epochs { - match self.partially_validate_orphan_witness_in_epoch(&witness, &epoch_id) { - Ok(()) => { - epoch_validation_result = Some(Ok(())); - break; - } - Err(err) => epoch_validation_result = Some(Err(err)), - } - } - match epoch_validation_result { - Some(Ok(())) => {} // Validation passed in one of the possible epochs, witness can be added to the pool. - Some(Err(err)) => { - // Validation failed in all possible epochs, reject the witness - return Err(err); - } - None => { - // possible_epochs was empty. This shouldn't happen as all epochs around the chain head are known. - return Err(Error::Other(format!( - "Couldn't find any matching EpochId for orphan chunk state witness with height {}", - witness_height - ))); - } + if !possible_epochs.contains(&witness.epoch_id) { + return Ok(HandleOrphanWitnessOutcome::UnsupportedEpochId(witness.epoch_id)); } // Orphan witness is OK, save it to the pool @@ -110,45 +86,6 @@ impl Client { Ok(HandleOrphanWitnessOutcome::SavedToPool) } - fn partially_validate_orphan_witness_in_epoch( - &self, - witness: &ChunkStateWitness, - epoch_id: &EpochId, - ) -> Result<(), Error> { - let chunk_header = &witness.inner.chunk_header; - let witness_height = chunk_header.height_created(); - let witness_shard = chunk_header.shard_id(); - - // Validate shard_id - if !self.epoch_manager.get_shard_layout(&epoch_id)?.shard_ids().contains(&witness_shard) { - return Err(Error::InvalidChunkStateWitness(format!( - "Invalid shard_id in ChunkStateWitness: {}", - witness_shard - ))); - } - - // Reject witnesses for chunks for which which this node isn't a validator. - // It's an error, as the sender shouldn't send the witness to a non-validator node. - let Some(my_signer) = self.chunk_validator.my_signer.as_ref() else { - return Err(Error::NotAValidator); - }; - let chunk_validator_assignments = self.epoch_manager.get_chunk_validator_assignments( - &epoch_id, - witness_shard, - witness_height, - )?; - if !chunk_validator_assignments.contains(my_signer.validator_id()) { - return Err(Error::NotAChunkValidator); - } - - // Verify signature - if !self.epoch_manager.verify_chunk_state_witness_signature_in_epoch(&witness, &epoch_id)? { - return Err(Error::InvalidChunkStateWitness("Invalid signature".to_string())); - } - - Ok(()) - } - /// Once a new block arrives, we can process the orphaned chunk state witnesses that were waiting /// for this block. This function takes the ready witnesses out of the orhan pool and process them. /// It also removes old witnesses (below final height) from the orphan pool to save memory. @@ -158,7 +95,7 @@ impl Client { .orphan_witness_pool .take_state_witnesses_waiting_for_block(new_block.hash()); for witness in ready_witnesses { - let header = &witness.inner.chunk_header; + let header = &witness.chunk_header; tracing::debug!( target: "client", witness_height = header.height_created(), @@ -201,9 +138,10 @@ impl Client { /// of other reasons. In such cases the handler function returns Ok(outcome) to let the caller /// know what happened with the witness. /// It's useful in tests. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq)] pub enum HandleOrphanWitnessOutcome { SavedToPool, TooBig(usize), TooFarFromHead { head_height: BlockHeight, witness_height: BlockHeight }, + UnsupportedEpochId(EpochId), } diff --git a/chain/client/src/stateless_validation/chunk_validator/orphan_witness_pool.rs b/chain/client/src/stateless_validation/chunk_validator/orphan_witness_pool.rs index dbcd34073ac..cbd9b48356d 100644 --- a/chain/client/src/stateless_validation/chunk_validator/orphan_witness_pool.rs +++ b/chain/client/src/stateless_validation/chunk_validator/orphan_witness_pool.rs @@ -42,13 +42,13 @@ impl OrphanStateWitnessPool { /// `witness_size` is only used for metrics, it's okay to pass 0 if you don't care about the metrics. pub fn add_orphan_state_witness(&mut self, witness: ChunkStateWitness, witness_size: usize) { // Insert the new ChunkStateWitness into the cache - let chunk_header = &witness.inner.chunk_header; + let chunk_header = &witness.chunk_header; let cache_key = (chunk_header.shard_id(), chunk_header.height_created()); let metrics_tracker = OrphanWitnessMetricsTracker::new(&witness, witness_size); let cache_entry = CacheEntry { witness, _metrics_tracker: metrics_tracker }; if let Some((_, ejected_entry)) = self.witness_cache.push(cache_key, cache_entry) { // Another witness has been ejected from the cache due to capacity limit - let header = &ejected_entry.witness.inner.chunk_header; + let header = &ejected_entry.witness.chunk_header; tracing::debug!( target: "client", ejected_witness_height = header.height_created(), @@ -68,7 +68,7 @@ impl OrphanStateWitnessPool { ) -> Vec { let mut to_remove: Vec<(ShardId, BlockHeight)> = Vec::new(); for (cache_key, cache_entry) in self.witness_cache.iter() { - if cache_entry.witness.inner.chunk_header.prev_block_hash() == prev_block { + if cache_entry.witness.chunk_header.prev_block_hash() == prev_block { to_remove.push(*cache_key); } } @@ -91,7 +91,7 @@ impl OrphanStateWitnessPool { for ((witness_shard, witness_height), cache_entry) in self.witness_cache.iter() { if *witness_height < final_height { to_remove.push((*witness_shard, *witness_height)); - let header = &cache_entry.witness.inner.chunk_header; + let header = &cache_entry.witness.chunk_header; tracing::debug!( target: "client", final_height, @@ -136,7 +136,7 @@ mod metrics_tracker { witness: &ChunkStateWitness, witness_size: usize, ) -> OrphanWitnessMetricsTracker { - let shard_id = witness.inner.chunk_header.shard_id().to_string(); + let shard_id = witness.chunk_header.shard_id().to_string(); metrics::ORPHAN_CHUNK_STATE_WITNESSES_TOTAL_COUNT .with_label_values(&[shard_id.as_str()]) .inc(); @@ -188,7 +188,7 @@ mod tests { encoded_length: u64, ) -> ChunkStateWitness { let mut witness = ChunkStateWitness::new_dummy(height, shard_id, prev_block_hash); - match &mut witness.inner.chunk_header { + match &mut witness.chunk_header { ShardChunkHeader::V3(header) => match &mut header.inner { ShardChunkHeaderInner::V2(inner) => inner.encoded_length = encoded_length, _ => unimplemented!(), @@ -214,7 +214,7 @@ mod tests { expected.sort_by(sort_comparator); if observed != expected { let print_witness_info = |witness: &ChunkStateWitness| { - let header = &witness.inner.chunk_header; + let header = &witness.chunk_header; eprintln!( "- height = {}, shard_id = {}, encoded_length: {} prev_block: {}", header.height_created(), diff --git a/chain/client/src/stateless_validation/shadow_validate.rs b/chain/client/src/stateless_validation/shadow_validate.rs index 51500e9540c..5b214e9b3d5 100644 --- a/chain/client/src/stateless_validation/shadow_validate.rs +++ b/chain/client/src/stateless_validation/shadow_validate.rs @@ -4,6 +4,7 @@ use near_chain::types::{RuntimeStorageConfig, StorageDataSource}; use near_chain::{Block, BlockHeader}; use near_chain_primitives::Error; use near_primitives::sharding::{ShardChunk, ShardChunkHeader}; +use near_primitives::stateless_validation::EncodedChunkStateWitness; use crate::stateless_validation::chunk_validator::{ pre_validate_chunk_state_witness, validate_chunk_state_witness, validate_prepared_transactions, @@ -57,7 +58,6 @@ impl Client { use_flat_storage: true, source: StorageDataSource::Db, state_patch: Default::default(), - record_storage: true, }; // We call `validate_prepared_transactions()` here because we need storage proof for transactions validation. @@ -74,16 +74,34 @@ impl Client { )); }; - let witness = self.create_state_witness_inner( + let witness = self.create_state_witness( + // Setting arbitrary chunk producer is OK for shadow validation + "alice.near".parse().unwrap(), prev_block_header, prev_chunk_header, chunk, validated_transactions.storage_proof, )?; - let witness_size = borsh::to_vec(&witness)?.len(); - metrics::CHUNK_STATE_WITNESS_TOTAL_SIZE - .with_label_values(&[&shard_id.to_string()]) - .observe(witness_size as f64); + let (encoded_witness, raw_witness_size) = { + let shard_id_label = shard_id.to_string(); + let encode_timer = metrics::CHUNK_STATE_WITNESS_ENCODE_TIME + .with_label_values(&[shard_id_label.as_str()]) + .start_timer(); + let (encoded_witness, raw_witness_size) = EncodedChunkStateWitness::encode(&witness)?; + encode_timer.observe_duration(); + metrics::CHUNK_STATE_WITNESS_TOTAL_SIZE + .with_label_values(&[shard_id_label.as_str()]) + .observe(encoded_witness.size_bytes() as f64); + metrics::CHUNK_STATE_WITNESS_RAW_SIZE + .with_label_values(&[shard_id_label.as_str()]) + .observe(raw_witness_size as f64); + let decode_timer = metrics::CHUNK_STATE_WITNESS_DECODE_TIME + .with_label_values(&[shard_id_label.as_str()]) + .start_timer(); + encoded_witness.decode()?; + decode_timer.observe_duration(); + (encoded_witness, raw_witness_size) + }; let pre_validation_start = Instant::now(); let pre_validation_result = pre_validate_chunk_state_witness( &witness, @@ -95,7 +113,8 @@ impl Client { target: "stateless_validation", shard_id, ?chunk_hash, - witness_size, + witness_size = encoded_witness.size_bytes(), + raw_witness_size, pre_validation_elapsed = ?pre_validation_start.elapsed(), "completed shadow chunk pre-validation" ); diff --git a/chain/client/src/stateless_validation/state_witness_producer.rs b/chain/client/src/stateless_validation/state_witness_producer.rs index 87cc3785c5d..6750d1bfd2e 100644 --- a/chain/client/src/stateless_validation/state_witness_producer.rs +++ b/chain/client/src/stateless_validation/state_witness_producer.rs @@ -10,10 +10,11 @@ use near_primitives::hash::{hash, CryptoHash}; use near_primitives::receipt::Receipt; use near_primitives::sharding::{ChunkHash, ReceiptProof, ShardChunk, ShardChunkHeader}; use near_primitives::stateless_validation::{ - ChunkStateTransition, ChunkStateWitness, ChunkStateWitnessAck, ChunkStateWitnessInner, - StoredChunkStateTransitionData, + ChunkStateTransition, ChunkStateWitness, ChunkStateWitnessAck, EncodedChunkStateWitness, + SignedEncodedChunkStateWitness, StoredChunkStateTransitionData, }; -use near_primitives::types::EpochId; +use near_primitives::types::{AccountId, EpochId}; +use near_primitives::validator_signer::ValidatorSigner; use std::collections::HashMap; use crate::stateless_validation::chunk_validator::send_chunk_endorsement_to_block_producers; @@ -46,19 +47,14 @@ impl Client { .ordered_chunk_validators(); let my_signer = self.validator_signer.as_ref().ok_or(Error::NotAValidator)?.clone(); - let (witness, witness_size) = { - let witness_inner = self.create_state_witness_inner( - prev_block_header, - prev_chunk_header, - chunk, - transactions_storage_proof, - )?; - let (signature, witness_size) = my_signer.sign_chunk_state_witness(&witness_inner); - metrics::CHUNK_STATE_WITNESS_TOTAL_SIZE - .with_label_values(&[&chunk_header.shard_id().to_string()]) - .observe(witness_size as f64); - (ChunkStateWitness { inner: witness_inner, signature }, witness_size) - }; + let witness = self.create_state_witness( + my_signer.validator_id().clone(), + prev_block_header, + prev_chunk_header, + chunk, + transactions_storage_proof, + )?; + let signed_witness = create_signed_witness(&witness, my_signer.as_ref())?; if chunk_validators.contains(my_signer.validator_id()) { // Bypass state witness validation if we created state witness. Endorse the chunk immediately. @@ -84,12 +80,12 @@ impl Client { // See process_chunk_state_witness_ack for the handling of the ack messages. self.state_witness_tracker.record_witness_sent( &witness, - witness_size, + signed_witness.witness_bytes.size_bytes(), chunk_validators.len(), ); self.network_adapter.send(PeerManagerMessageRequest::NetworkRequests( - NetworkRequests::ChunkStateWitness(chunk_validators, witness), + NetworkRequests::ChunkStateWitness(chunk_validators, signed_witness), )); Ok(()) } @@ -103,14 +99,17 @@ impl Client { self.state_witness_tracker.on_witness_ack_received(witness_ack); } - pub(crate) fn create_state_witness_inner( + pub(crate) fn create_state_witness( &mut self, + chunk_producer: AccountId, prev_block_header: &BlockHeader, prev_chunk_header: &ShardChunkHeader, chunk: &ShardChunk, transactions_storage_proof: Option, - ) -> Result { + ) -> Result { let chunk_header = chunk.cloned_header(); + let epoch_id = + self.epoch_manager.get_epoch_id_from_prev_block(chunk_header.prev_block_hash())?; let prev_chunk = self.chain.get_chunk(&prev_chunk_header.chunk_hash())?; let (main_state_transition, implicit_transitions, applied_receipts_hash) = self.collect_state_transition_data(&chunk_header, prev_chunk_header)?; @@ -131,8 +130,10 @@ impl Client { let source_receipt_proofs = self.collect_source_receipt_proofs(prev_block_header, prev_chunk_header)?; - let witness_inner = ChunkStateWitnessInner::new( - chunk_header.clone(), + let witness = ChunkStateWitness::new( + chunk_producer, + epoch_id, + chunk_header, main_state_transition, source_receipt_proofs, // (Could also be derived from iterating through the receipts, but @@ -144,7 +145,7 @@ impl Client { new_transactions, new_transactions_validation_state, ); - Ok(witness_inner) + Ok(witness) } /// Collect state transition data necessary to produce state witness for @@ -299,3 +300,26 @@ impl Client { Ok(source_receipt_proofs) } } + +fn create_signed_witness( + witness: &ChunkStateWitness, + my_signer: &dyn ValidatorSigner, +) -> Result { + let shard_id_label = witness.chunk_header.shard_id().to_string(); + let encode_timer = metrics::CHUNK_STATE_WITNESS_ENCODE_TIME + .with_label_values(&[shard_id_label.as_str()]) + .start_timer(); + let (witness_bytes, raw_witness_size) = EncodedChunkStateWitness::encode(&witness)?; + encode_timer.observe_duration(); + let signed_witness = SignedEncodedChunkStateWitness { + signature: my_signer.sign_chunk_state_witness(&witness_bytes), + witness_bytes, + }; + metrics::CHUNK_STATE_WITNESS_TOTAL_SIZE + .with_label_values(&[shard_id_label.as_str()]) + .observe(signed_witness.witness_bytes.size_bytes() as f64); + metrics::CHUNK_STATE_WITNESS_RAW_SIZE + .with_label_values(&[shard_id_label.as_str()]) + .observe(raw_witness_size as f64); + Ok(signed_witness) +} diff --git a/chain/client/src/stateless_validation/state_witness_tracker.rs b/chain/client/src/stateless_validation/state_witness_tracker.rs index 5be1af88505..3c4a45cbdb9 100644 --- a/chain/client/src/stateless_validation/state_witness_tracker.rs +++ b/chain/client/src/stateless_validation/state_witness_tracker.rs @@ -24,7 +24,7 @@ struct ChunkStateWitnessKey { impl ChunkStateWitnessKey { pub fn new(witness: &ChunkStateWitness) -> Self { - Self { chunk_hash: witness.inner.chunk_header.chunk_hash() } + Self { chunk_hash: witness.chunk_header.chunk_hash() } } } diff --git a/chain/client/src/sync/state.rs b/chain/client/src/sync/state.rs index 3236d10aca0..0272258939d 100644 --- a/chain/client/src/sync/state.rs +++ b/chain/client/src/sync/state.rs @@ -29,7 +29,7 @@ use futures::{future, FutureExt}; use near_async::futures::{FutureSpawner, FutureSpawnerExt}; use near_async::messaging::SendAsync; use near_async::time::{Clock, Duration, Utc}; -use near_chain::chain::ApplyStatePartsRequest; +use near_chain::chain::{ApplyStatePartsRequest, LoadMemtrieRequest}; use near_chain::near_chain_primitives; use near_chain::resharding::ReshardingRequest; use near_chain::types::RuntimeAdapter; @@ -147,6 +147,9 @@ pub struct StateSync { /// Maps shard_id to result of applying downloaded state. state_parts_apply_results: HashMap>, + /// Maps shard_id to result of loading in-memory trie. + load_memtrie_results: HashMap>, + /// Maps shard_id to result of splitting state for resharding. resharding_state_roots: HashMap, near_chain::Error>>, @@ -215,6 +218,7 @@ impl StateSync { network_adapter, timeout, state_parts_apply_results: HashMap::new(), + load_memtrie_results: HashMap::new(), resharding_state_roots: HashMap::new(), state_parts_mpsc_rx: rx, state_parts_mpsc_tx: tx, @@ -234,6 +238,7 @@ impl StateSync { tracking_shards: Vec, now: Utc, state_parts_task_scheduler: &near_async::messaging::Sender, + load_memtrie_scheduler: &near_async::messaging::Sender, resharding_scheduler: &near_async::messaging::Sender, state_parts_future_spawner: &dyn FutureSpawner, use_colour: bool, @@ -281,8 +286,8 @@ impl StateSync { download_timeout = res.0; run_shard_state_download = res.1; } - ShardSyncStatus::StateDownloadScheduling => { - self.sync_shards_download_scheduling_status( + ShardSyncStatus::StateApplyScheduling => { + self.sync_shards_apply_scheduling_status( shard_id, shard_sync_download, sync_hash, @@ -291,18 +296,24 @@ impl StateSync { state_parts_task_scheduler, )?; } - ShardSyncStatus::StateDownloadApplying => { - self.sync_shards_download_applying_status( + ShardSyncStatus::StateApplyComplete => { + self.sync_shards_apply_complete_status( shard_id, shard_sync_download, sync_hash, chain, - now, + load_memtrie_scheduler, )?; } - ShardSyncStatus::StateDownloadComplete => { - shard_sync_done = self - .sync_shards_download_complete_status(need_to_reshard, shard_sync_download); + ShardSyncStatus::StateApplyFinalizing => { + shard_sync_done = self.sync_shards_apply_finalizing_status( + shard_uid, + chain, + sync_hash, + now, + need_to_reshard, + shard_sync_download, + )?; } ShardSyncStatus::ReshardingScheduling => { debug_assert!(need_to_reshard); @@ -453,6 +464,15 @@ impl StateSync { self.resharding_state_roots.insert(shard_id, result); } + // Called by the client actor, when it finished loading memtrie. + pub fn set_load_memtrie_result( + &mut self, + shard_uid: ShardUId, + result: Result<(), near_chain::Error>, + ) { + self.load_memtrie_results.insert(shard_uid, result); + } + /// Find the hash of the first block on the same epoch (and chain) of block with hash `sync_hash`. pub fn get_epoch_start_sync_hash( chain: &Chain, @@ -738,6 +758,7 @@ impl StateSync { // Shards to sync. tracking_shards: Vec, state_parts_task_scheduler: &near_async::messaging::Sender, + load_memtrie_scheduler: &near_async::messaging::Sender, resharding_scheduler: &near_async::messaging::Sender, state_parts_future_spawner: &dyn FutureSpawner, use_colour: bool, @@ -767,6 +788,7 @@ impl StateSync { tracking_shards, now, state_parts_task_scheduler, + load_memtrie_scheduler, resharding_scheduler, state_parts_future_spawner, use_colour, @@ -949,13 +971,13 @@ impl StateSync { if parts_done { *shard_sync_download = ShardSyncDownload { downloads: vec![], - status: ShardSyncStatus::StateDownloadScheduling, + status: ShardSyncStatus::StateApplyScheduling, }; } (download_timeout, run_shard_state_download) } - fn sync_shards_download_scheduling_status( + fn sync_shards_apply_scheduling_status( &mut self, shard_id: ShardId, shard_sync_download: &mut ShardSyncDownload, @@ -978,7 +1000,7 @@ impl StateSync { Ok(()) => { *shard_sync_download = ShardSyncDownload { downloads: vec![], - status: ShardSyncStatus::StateDownloadApplying, + status: ShardSyncStatus::StateApplyComplete, } } Err(err) => { @@ -993,25 +1015,79 @@ impl StateSync { Ok(()) } - fn sync_shards_download_applying_status( + fn sync_shards_apply_complete_status( &mut self, shard_id: ShardId, shard_sync_download: &mut ShardSyncDownload, sync_hash: CryptoHash, chain: &mut Chain, - now: Utc, + load_memtrie_scheduler: &near_async::messaging::Sender, ) -> Result<(), near_chain::Error> { // Keep waiting until our shard is on the list of results // (these are set via callback from ClientActor - both for sync and catchup). if let Some(result) = self.state_parts_apply_results.remove(&shard_id) { - match chain.set_state_finalize(shard_id, sync_hash, result) { - Ok(()) => { - *shard_sync_download = ShardSyncDownload { - downloads: vec![], - status: ShardSyncStatus::StateDownloadComplete, + result?; + let epoch_id = chain.get_block_header(&sync_hash)?.epoch_id().clone(); + let shard_uid = chain.epoch_manager.shard_id_to_uid(shard_id, &epoch_id)?; + let shard_state_header = chain.get_state_header(shard_id, sync_hash)?; + let chunk = shard_state_header.cloned_chunk(); + let block_hash = chunk.prev_block(); + + // We synced shard state on top of _previous_ block for chunk in shard state header and applied state parts to + // flat storage. Now we can set flat head to hash of this block and create flat storage. + // If block_hash is equal to default - this means that we're all the way back at genesis. + // So we don't have to add the storage state for shard in such case. + // TODO(8438) - add additional test scenarios for this case. + if *block_hash != CryptoHash::default() { + chain.create_flat_storage_for_shard(shard_uid, &chunk)?; + } + // We schedule load memtrie when flat storage state (if any) is ready. + // It is possible that memtrie is not enabled for that shard, + // in which case the task would finish immediately with Ok() status. + // We require the task result to further proceed with state sync. + chain.schedule_load_memtrie(shard_uid, sync_hash, &chunk, load_memtrie_scheduler); + *shard_sync_download = ShardSyncDownload { + downloads: vec![], + status: ShardSyncStatus::StateApplyFinalizing, + } + } + Ok(()) + } + + fn sync_shards_apply_finalizing_status( + &mut self, + shard_uid: ShardUId, + chain: &mut Chain, + sync_hash: CryptoHash, + now: Utc, + need_to_reshard: bool, + shard_sync_download: &mut ShardSyncDownload, + ) -> Result { + // Keep waiting until our shard is on the list of results + // (these are set via callback from ClientActor - both for sync and catchup). + let mut shard_sync_done = false; + if let Some(result) = self.load_memtrie_results.remove(&shard_uid) { + let shard_id = shard_uid.shard_id(); + result + .and_then(|_| { + chain.set_state_finalize(shard_id, sync_hash)?; + // If the shard layout is changing in this epoch - we have to apply it right now. + if need_to_reshard { + *shard_sync_download = ShardSyncDownload { + downloads: vec![], + status: ShardSyncStatus::ReshardingScheduling, + }; + } else { + // If there is no layout change - we're done. + *shard_sync_download = ShardSyncDownload { + downloads: vec![], + status: ShardSyncStatus::StateSyncDone, + }; + shard_sync_done = true; } - } - Err(err) => { + Ok(()) + }) + .or_else(|err| -> Result<(), near_chain::Error> { // Cannot finalize the downloaded state. // The reasonable behavior here is to start from the very beginning. metrics::STATE_SYNC_DISCARD_PARTS @@ -1022,30 +1098,10 @@ impl StateSync { let shard_state_header = chain.get_state_header(shard_id, sync_hash)?; let state_num_parts = shard_state_header.num_state_parts(); chain.clear_downloaded_parts(shard_id, sync_hash, state_num_parts)?; - } - } - } - Ok(()) - } - - fn sync_shards_download_complete_status( - &mut self, - need_to_reshard: bool, - shard_sync_download: &mut ShardSyncDownload, - ) -> bool { - // If the shard layout is changing in this epoch - we have to apply it right now. - if need_to_reshard { - *shard_sync_download = ShardSyncDownload { - downloads: vec![], - status: ShardSyncStatus::ReshardingScheduling, - }; - false - } else { - // If there is no layout change - we're done. - *shard_sync_download = - ShardSyncDownload { downloads: vec![], status: ShardSyncStatus::StateSyncDone }; - true + Ok(()) + })?; } + Ok(shard_sync_done) } fn sync_shards_resharding_scheduling_status( @@ -1515,6 +1571,7 @@ mod test { vec![0], &noop().into_sender(), &noop().into_sender(), + &noop().into_sender(), &ActixArbiterHandleFutureSpawner(Arbiter::new().handle()), false, runtime, diff --git a/chain/client/src/sync_jobs_actions.rs b/chain/client/src/sync_jobs_actions.rs index fde3c77f61c..916f2f4074f 100644 --- a/chain/client/src/sync_jobs_actions.rs +++ b/chain/client/src/sync_jobs_actions.rs @@ -4,7 +4,7 @@ use near_async::time::Duration; use near_async::{MultiSend, MultiSendMessage, MultiSenderFrom}; use near_chain::chain::{ do_apply_chunks, ApplyStatePartsRequest, ApplyStatePartsResponse, BlockCatchUpRequest, - BlockCatchUpResponse, + BlockCatchUpResponse, LoadMemtrieRequest, LoadMemtrieResponse, }; use near_chain::resharding::{ReshardingRequest, ReshardingResponse}; use near_chain::Chain; @@ -19,6 +19,7 @@ pub struct ClientSenderForSyncJobs { apply_state_parts_response: Sender, block_catch_up_response: Sender, resharding_response: Sender, + load_memtrie_response: Sender, } #[derive(Clone, MultiSend, MultiSenderFrom, MultiSendMessage)] @@ -80,9 +81,23 @@ impl SyncJobsActions { Ok(success) } + /// This call is synchronous and handled in `sync_jobs_actor`. + pub fn handle_load_memtrie_request(&mut self, msg: LoadMemtrieRequest) { + let result = msg + .runtime_adapter + .get_tries() + .load_mem_trie_on_catchup(&msg.shard_uid, &msg.prev_state_root) + .map_err(|error| error.into()); + self.client_sender.send(LoadMemtrieResponse { + load_result: result, + shard_uid: msg.shard_uid, + sync_hash: msg.sync_hash, + }); + } + pub fn handle_apply_state_parts_request(&mut self, msg: ApplyStatePartsRequest) { // Unload mem-trie (in case it is still loaded) before we apply state parts. - msg.runtime_adapter.unload_mem_trie(&msg.shard_uid); + msg.runtime_adapter.get_tries().unload_mem_trie(&msg.shard_uid); let shard_id = msg.shard_uid.shard_id as ShardId; match self.clear_flat_state(&msg) { diff --git a/chain/client/src/sync_jobs_actor.rs b/chain/client/src/sync_jobs_actor.rs index b00343a38b3..ab8d19b71ae 100644 --- a/chain/client/src/sync_jobs_actor.rs +++ b/chain/client/src/sync_jobs_actor.rs @@ -1,6 +1,6 @@ use crate::sync_jobs_actions::SyncJobsActions; use near_async::futures::ActixFutureSpawner; -use near_chain::chain::{ApplyStatePartsRequest, BlockCatchUpRequest}; +use near_chain::chain::{ApplyStatePartsRequest, BlockCatchUpRequest, LoadMemtrieRequest}; use near_chain::resharding::ReshardingRequest; use near_o11y::{handler_debug_span, WithSpanContext}; use near_performance_metrics_macros::perf; @@ -17,6 +17,20 @@ impl actix::Actor for SyncJobsActor { type Context = actix::Context; } +impl actix::Handler> for SyncJobsActor { + type Result = (); + + #[perf] + fn handle( + &mut self, + msg: WithSpanContext, + _: &mut Self::Context, + ) -> Self::Result { + let (_span, msg) = handler_debug_span!(target: "client", msg); + self.actions.handle_load_memtrie_request(msg); + } +} + impl actix::Handler> for SyncJobsActor { type Result = (); diff --git a/chain/client/src/test_utils/client.rs b/chain/client/src/test_utils/client.rs index 579e0176512..fd5b457de16 100644 --- a/chain/client/src/test_utils/client.rs +++ b/chain/client/src/test_utils/client.rs @@ -20,7 +20,8 @@ use near_network::types::HighestHeightPeerInfo; use near_primitives::block::Block; use near_primitives::hash::CryptoHash; use near_primitives::merkle::{merklize, PartialMerkleTree}; -use near_primitives::sharding::{EncodedShardChunk, ReedSolomonWrapper, ShardChunk}; +use near_primitives::reed_solomon::ReedSolomonWrapper; +use near_primitives::sharding::{EncodedShardChunk, ShardChunk}; use near_primitives::stateless_validation::ChunkEndorsement; use near_primitives::transaction::SignedTransaction; use near_primitives::types::{BlockHeight, ShardId}; @@ -287,6 +288,7 @@ pub fn run_catchup( client.run_catchup( highest_height_peers, &noop().into_sender(), + &noop().into_sender(), &block_catch_up, &resharding, Arc::new(|_| {}), diff --git a/chain/client/src/test_utils/client_actions_test_utils.rs b/chain/client/src/test_utils/client_actions_test_utils.rs index 0b584c40648..a893ace755b 100644 --- a/chain/client/src/test_utils/client_actions_test_utils.rs +++ b/chain/client/src/test_utils/client_actions_test_utils.rs @@ -6,7 +6,7 @@ use near_network::client::ClientSenderForNetworkMessage; pub fn forward_client_messages_from_network_to_client_actions( ) -> LoopEventHandler { - LoopEventHandler::new(|msg, client_actions: &mut ClientActions, _| { + LoopEventHandler::new(|msg, client_actions: &mut ClientActions| { match msg { ClientSenderForNetworkMessage::_state_response(msg) => { (msg.callback)(Ok(client_actions.handle(msg.message))); @@ -61,6 +61,7 @@ pub fn forward_client_messages_from_sync_jobs_to_client_actions( } ClientSenderForSyncJobsMessage::_block_catch_up_response(msg) => client_actions.handle(msg), ClientSenderForSyncJobsMessage::_resharding_response(msg) => client_actions.handle(msg), + ClientSenderForSyncJobsMessage::_load_memtrie_response(msg) => client_actions.handle(msg), }) } diff --git a/chain/client/src/test_utils/sync_jobs_test_utils.rs b/chain/client/src/test_utils/sync_jobs_test_utils.rs index 20fbedc66c5..726397bcdda 100644 --- a/chain/client/src/test_utils/sync_jobs_test_utils.rs +++ b/chain/client/src/test_utils/sync_jobs_test_utils.rs @@ -16,5 +16,8 @@ pub fn forward_sync_jobs_messages_from_client_to_sync_jobs_actions( SyncJobsSenderForClientMessage::_resharding(msg) => { sync_jobs_actions.handle_resharding_request(msg, &future_spawner); } + SyncJobsSenderForClientMessage::_load_memtrie(msg) => { + sync_jobs_actions.handle_load_memtrie_request(msg); + } }) } diff --git a/chain/client/src/test_utils/test_env.rs b/chain/client/src/test_utils/test_env.rs index 68a5d79a659..ecac26758de 100644 --- a/chain/client/src/test_utils/test_env.rs +++ b/chain/client/src/test_utils/test_env.rs @@ -27,12 +27,11 @@ use near_primitives::epoch_manager::RngSeed; use near_primitives::errors::InvalidTxError; use near_primitives::hash::CryptoHash; use near_primitives::sharding::{ChunkHash, PartialEncodedChunk}; -use near_primitives::stateless_validation::{ChunkEndorsement, ChunkStateWitness}; +use near_primitives::stateless_validation::{ChunkEndorsement, SignedEncodedChunkStateWitness}; use near_primitives::test_utils::create_test_signer; use near_primitives::transaction::{Action, FunctionCallAction, SignedTransaction}; use near_primitives::types::{AccountId, Balance, BlockHeight, EpochId, NumSeats, ShardId}; use near_primitives::utils::MaybeValidated; -use near_primitives::version::ProtocolVersion; use near_primitives::views::{ AccountView, FinalExecutionOutcomeView, QueryRequest, QueryResponse, QueryResponseKind, StateItem, @@ -293,14 +292,11 @@ impl TestEnv { } fn found_differing_post_state_root_due_to_state_transitions( - chunk_state_witness: &ChunkStateWitness, + signed_witness: &SignedEncodedChunkStateWitness, ) -> bool { - let chunk_state_witness_inner = &chunk_state_witness.inner; - let mut post_state_roots = - HashSet::from([chunk_state_witness_inner.main_state_transition.post_state_root]); - post_state_roots.extend( - chunk_state_witness_inner.implicit_transitions.iter().map(|t| t.post_state_root), - ); + let witness = signed_witness.witness_bytes.decode().unwrap().0; + let mut post_state_roots = HashSet::from([witness.main_state_transition.post_state_root]); + post_state_roots.extend(witness.implicit_transitions.iter().map(|t| t.post_state_root)); post_state_roots.len() >= 2 } @@ -436,9 +432,11 @@ impl TestEnv { self.clients[id].process_tx(tx, false, false) } - /// This function will actually bump to the latest protocol version instead of the provided one. - /// See https://github.com/near/nearcore/issues/8590 for details. - pub fn upgrade_protocol(&mut self, protocol_version: ProtocolVersion) { + /// This function used to be able to upgrade to a specific protocol version + /// but due to https://github.com/near/nearcore/issues/8590 that + /// functionality does not work currently. Hence it is renamed to upgrade + /// to the latest version. + pub fn upgrade_protocol_to_latest_version(&mut self) { assert_eq!(self.clients.len(), 1, "at the moment, this support only a single client"); let tip = self.clients[0].chain.head().unwrap(); @@ -450,8 +448,6 @@ impl TestEnv { self.clients[0].epoch_manager.get_block_producer(&epoch_id, tip.height).unwrap(); let mut block = self.clients[0].produce_block(tip.height + 1).unwrap().unwrap(); - eprintln!("Producing block with version {protocol_version}"); - block.mut_header().set_latest_protocol_version(protocol_version); block.mut_header().resign(&create_test_signer(block_producer.as_str())); let _ = self.clients[0] diff --git a/chain/client/src/view_client.rs b/chain/client/src/view_client.rs index e3e76b16369..3ab648a94dd 100644 --- a/chain/client/src/view_client.rs +++ b/chain/client/src/view_client.rs @@ -62,7 +62,7 @@ use near_primitives::views::{ use near_store::flat::{FlatStorageReadyStatus, FlatStorageStatus}; use near_store::{DBCol, COLD_HEAD_KEY, FINAL_HEAD_KEY, HEAD_KEY}; use std::cmp::Ordering; -use std::collections::{BTreeSet, HashMap, VecDeque}; +use std::collections::{BTreeSet, HashMap, HashSet, VecDeque}; use std::hash::Hash; use std::sync::{Arc, Mutex, RwLock}; use tracing::{error, info, warn}; @@ -436,28 +436,57 @@ impl ViewClientActor { return Ok(TxExecutionStatus::None); } + let mut awaiting_receipt_ids: HashSet<&CryptoHash> = + HashSet::from_iter(&execution_outcome.transaction_outcome.outcome.receipt_ids); + awaiting_receipt_ids.extend( + execution_outcome + .receipts_outcome + .iter() + .flat_map(|outcome| &outcome.outcome.receipt_ids), + ); + + // refund receipt == last receipt in outcome.receipt_ids + let mut awaiting_non_refund_receipt_ids: HashSet<&CryptoHash> = + HashSet::from_iter(&execution_outcome.transaction_outcome.outcome.receipt_ids); + awaiting_non_refund_receipt_ids.extend(execution_outcome.receipts_outcome.iter().flat_map( + |outcome| { + outcome.outcome.receipt_ids.split_last().map(|(_, ids)| ids).unwrap_or_else(|| &[]) + }, + )); + + let executed_receipt_ids: HashSet<&CryptoHash> = execution_outcome + .receipts_outcome + .iter() + .filter_map(|outcome| { + if outcome.outcome.status == ExecutionStatusView::Unknown { + None + } else { + Some(&outcome.id) + } + }) + .collect(); + + let executed_ignoring_refunds = + awaiting_non_refund_receipt_ids.is_subset(&executed_receipt_ids); + let executed_including_refunds = awaiting_receipt_ids.is_subset(&executed_receipt_ids); + if let Err(_) = self.chain.check_blocks_final_and_canonical(&[self .chain .get_block_header(&execution_outcome.transaction_outcome.block_hash)?]) { - return if execution_outcome - .receipts_outcome - .iter() - .all(|e| e.outcome.status != ExecutionStatusView::Unknown) - { + return if executed_ignoring_refunds { Ok(TxExecutionStatus::ExecutedOptimistic) } else { Ok(TxExecutionStatus::Included) }; } - if execution_outcome - .receipts_outcome - .iter() - .any(|e| e.outcome.status == ExecutionStatusView::Unknown) - { + if !executed_ignoring_refunds { return Ok(TxExecutionStatus::IncludedFinal); } + if !executed_including_refunds { + return Ok(TxExecutionStatus::Executed); + } let block_hashes: BTreeSet = execution_outcome.receipts_outcome.iter().map(|e| e.block_hash).collect(); @@ -507,12 +536,12 @@ impl ViewClientActor { target_shard_id, true, ) { - match self.chain.get_final_transaction_result(&tx_hash) { + match self.chain.get_partial_transaction_result(&tx_hash) { Ok(tx_result) => { let status = self.get_tx_execution_status(&tx_result)?; let res = if fetch_receipt { let final_result = - self.chain.get_final_transaction_result_with_receipt(tx_result)?; + self.chain.get_transaction_result_with_receipt(tx_result)?; FinalExecutionOutcomeViewEnum::FinalExecutionOutcomeWithReceipt( final_result, ) diff --git a/chain/epoch-manager/src/adapter.rs b/chain/epoch-manager/src/adapter.rs index 40dfd7d7e93..c7168874f24 100644 --- a/chain/epoch-manager/src/adapter.rs +++ b/chain/epoch-manager/src/adapter.rs @@ -15,7 +15,7 @@ use near_primitives::hash::CryptoHash; use near_primitives::shard_layout::{account_id_to_shard_id, ShardLayout, ShardLayoutError}; use near_primitives::sharding::{ChunkHash, ShardChunkHeader}; use near_primitives::stateless_validation::{ - ChunkEndorsement, ChunkStateWitness, ChunkValidatorAssignments, + ChunkEndorsement, ChunkValidatorAssignments, SignedEncodedChunkStateWitness, }; use near_primitives::types::validator_stake::ValidatorStake; use near_primitives::types::{ @@ -413,12 +413,8 @@ pub trait EpochManagerAdapter: Send + Sync { fn verify_chunk_state_witness_signature( &self, - state_witness: &ChunkStateWitness, - ) -> Result; - - fn verify_chunk_state_witness_signature_in_epoch( - &self, - state_witness: &ChunkStateWitness, + signed_witness: &SignedEncodedChunkStateWitness, + chunk_producer: &AccountId, epoch_id: &EpochId, ) -> Result; @@ -1063,30 +1059,15 @@ impl EpochManagerAdapter for EpochManagerHandle { fn verify_chunk_state_witness_signature( &self, - state_witness: &ChunkStateWitness, - ) -> Result { - let epoch_manager = self.read(); - let chunk_header = &state_witness.inner.chunk_header; - let epoch_id = - epoch_manager.get_epoch_id_from_prev_block(chunk_header.prev_block_hash())?; - self.verify_chunk_state_witness_signature_in_epoch(state_witness, &epoch_id) - } - - fn verify_chunk_state_witness_signature_in_epoch( - &self, - state_witness: &ChunkStateWitness, + signed_witness: &SignedEncodedChunkStateWitness, + chunk_producer: &AccountId, epoch_id: &EpochId, ) -> Result { let epoch_manager = self.read(); - let chunk_header = &state_witness.inner.chunk_header; - let chunk_producer = epoch_manager.get_chunk_producer_info( - &epoch_id, - chunk_header.height_created(), - chunk_header.shard_id(), - )?; - Ok(state_witness + let validator = epoch_manager.get_validator_by_account_id(epoch_id, chunk_producer)?; + Ok(signed_witness .signature - .verify(&borsh::to_vec(&state_witness.inner)?, chunk_producer.public_key())) + .verify(signed_witness.witness_bytes.as_slice(), validator.public_key())) } fn cares_about_shard_from_prev_block( diff --git a/chain/epoch-manager/src/lib.rs b/chain/epoch-manager/src/lib.rs index 7f5162f57af..7f73fd1b583 100644 --- a/chain/epoch-manager/src/lib.rs +++ b/chain/epoch-manager/src/lib.rs @@ -111,6 +111,11 @@ impl EpochInfoProvider for EpochManagerHandle { let epoch_manager = self.read(); epoch_manager.minimum_stake(prev_block_hash) } + + fn chain_id(&self) -> String { + let epoch_manager = self.read(); + epoch_manager.config.chain_id().into() + } } /// Tracks epoch information across different forks, such as validators. diff --git a/chain/epoch-manager/src/reward_calculator.rs b/chain/epoch-manager/src/reward_calculator.rs index 804592ccc03..865e8bb8bea 100644 --- a/chain/epoch-manager/src/reward_calculator.rs +++ b/chain/epoch-manager/src/reward_calculator.rs @@ -1,7 +1,7 @@ use std::collections::HashMap; use num_rational::Rational32; -use primitive_types::U256; +use primitive_types::{U256, U512}; use near_chain_configs::GenesisConfig; use near_primitives::checked_feature; @@ -138,9 +138,9 @@ impl RewardCalculator { // Apply min between 1. and computed uptime. uptime_numer = if uptime_numer > uptime_denum { uptime_denum } else { uptime_numer }; - (U256::from(epoch_validator_reward) * uptime_numer * U256::from(stake) - / uptime_denum - / U256::from(total_stake)) + (U512::from(epoch_validator_reward) * U512::from(uptime_numer) * U512::from(stake) + / U512::from(uptime_denum) + / U512::from(total_stake)) .as_u128() }; res.insert(account_id, reward); diff --git a/chain/epoch-manager/src/tests/mod.rs b/chain/epoch-manager/src/tests/mod.rs index c090c6af9d7..4bd4cf05539 100644 --- a/chain/epoch-manager/src/tests/mod.rs +++ b/chain/epoch-manager/src/tests/mod.rs @@ -18,7 +18,8 @@ use near_primitives::hash::hash; use near_primitives::shard_layout::ShardLayout; use near_primitives::sharding::{ShardChunkHeader, ShardChunkHeaderV3}; use near_primitives::stateless_validation::{ - ChunkStateTransition, ChunkStateWitness, ChunkStateWitnessInner, + ChunkStateTransition, ChunkStateWitness, EncodedChunkStateWitness, + SignedEncodedChunkStateWitness, }; use near_primitives::types::ValidatorKickoutReason::{NotEnoughBlocks, NotEnoughChunks}; use near_primitives::validator_signer::ValidatorSigner; @@ -2904,12 +2905,15 @@ fn test_verify_chunk_state_witness() { // Verify if the test signer has same public key as the chunk validator. let (validator, _) = epoch_manager.get_validator_by_account_id(&epoch_id, &h[0], &account_id).unwrap(); - let signer = Arc::new(create_test_signer("test1")); + let chunk_producer: AccountId = "test1".parse().unwrap(); + let signer = Arc::new(create_test_signer(chunk_producer.as_str())); assert_eq!(signer.public_key(), validator.public_key().clone()); // Build a chunk state witness with arbitrary data. let chunk_header = test_chunk_header(&h, signer.as_ref()); - let witness_inner = ChunkStateWitnessInner::new( + let witness = ChunkStateWitness::new( + chunk_producer.clone(), + epoch_id.clone(), chunk_header, ChunkStateTransition { block_hash: h[0], @@ -2923,21 +2927,29 @@ fn test_verify_chunk_state_witness() { vec![], Default::default(), ); - let signature = signer.sign_chunk_state_witness(&witness_inner).0; - // Check chunk state witness validity. - let mut chunk_state_witness = ChunkStateWitness { inner: witness_inner, signature }; - assert!(epoch_manager.verify_chunk_state_witness_signature(&chunk_state_witness).unwrap()); + let witness_bytes = EncodedChunkStateWitness::encode(&witness).unwrap().0; + let mut chunk_state_witness = SignedEncodedChunkStateWitness { + signature: signer.sign_chunk_state_witness(&witness_bytes), + witness_bytes, + }; + assert!(epoch_manager + .verify_chunk_state_witness_signature(&chunk_state_witness, &chunk_producer, &epoch_id) + .unwrap()); // Check invalid chunk state witness signature. chunk_state_witness.signature = Signature::default(); - assert!(!epoch_manager.verify_chunk_state_witness_signature(&chunk_state_witness).unwrap()); + assert!(!epoch_manager + .verify_chunk_state_witness_signature(&chunk_state_witness, &chunk_producer, &epoch_id) + .unwrap()); // Check chunk state witness invalidity when signer is not a chunk validator. let bad_signer = Arc::new(create_test_signer("test2")); chunk_state_witness.signature = - bad_signer.sign_chunk_state_witness(&chunk_state_witness.inner).0; - assert!(!epoch_manager.verify_chunk_state_witness_signature(&chunk_state_witness).unwrap()); + bad_signer.sign_chunk_state_witness(&chunk_state_witness.witness_bytes); + assert!(!epoch_manager + .verify_chunk_state_witness_signature(&chunk_state_witness, &chunk_producer, &epoch_id) + .unwrap()); } /// Simulate the blockchain over a few epochs and verify that possible_epochs_of_height_around_tip() diff --git a/chain/indexer/src/streamer/mod.rs b/chain/indexer/src/streamer/mod.rs index 388639c8ed5..2b10100de63 100644 --- a/chain/indexer/src/streamer/mod.rs +++ b/chain/indexer/src/streamer/mod.rs @@ -136,6 +136,7 @@ pub async fn build_streamer_message( .filter(|tx| tx.transaction.signer_id == tx.transaction.receiver_id) .collect::>(), &block, + protocol_config_view.protocol_version, ) .await?; @@ -322,6 +323,7 @@ async fn find_local_receipt_by_id_in_block( ) -> Result, FailedToFetchData> { let chunks = fetch_block_chunks(&client, &block).await?; + let protocol_config_view = fetch_protocol_config(&client, block.header.hash).await?; let mut shards_outcomes = fetch_outcomes(&client, block.header.hash).await?; for chunk in chunks { @@ -348,6 +350,7 @@ async fn find_local_receipt_by_id_in_block( &runtime_config, vec![&indexer_transaction], &block, + protocol_config_view.protocol_version, ) .await?; diff --git a/chain/indexer/src/streamer/utils.rs b/chain/indexer/src/streamer/utils.rs index 0ec742cb900..15d559b8ac8 100644 --- a/chain/indexer/src/streamer/utils.rs +++ b/chain/indexer/src/streamer/utils.rs @@ -2,6 +2,7 @@ use actix::Addr; use near_indexer_primitives::IndexerTransactionWithOutcome; use near_parameters::RuntimeConfig; +use near_primitives::version::ProtocolVersion; use near_primitives::views; use node_runtime::config::tx_cost; @@ -13,6 +14,7 @@ pub(crate) async fn convert_transactions_sir_into_local_receipts( runtime_config: &RuntimeConfig, txs: Vec<&IndexerTransactionWithOutcome>, block: &views::BlockView, + protocol_version: ProtocolVersion, ) -> Result, FailedToFetchData> { if txs.is_empty() { return Ok(vec![]); @@ -43,6 +45,7 @@ pub(crate) async fn convert_transactions_sir_into_local_receipts( }, prev_block_gas_price, true, + protocol_version, ); views::ReceiptView { predecessor_id: tx.transaction.signer_id.clone(), diff --git a/chain/jsonrpc/jsonrpc-tests/res/genesis_config.json b/chain/jsonrpc/jsonrpc-tests/res/genesis_config.json index 8547e1f2c6d..40a157a136a 100644 --- a/chain/jsonrpc/jsonrpc-tests/res/genesis_config.json +++ b/chain/jsonrpc/jsonrpc-tests/res/genesis_config.json @@ -1,5 +1,5 @@ { - "protocol_version": 65, + "protocol_version": 66, "genesis_time": "1970-01-01T00:00:00.000000000Z", "chain_id": "sample", "genesis_height": 0, @@ -70,4 +70,4 @@ "shuffle_shard_assignment_for_chunk_producers": false, "use_production_config": false, "records": [] -} \ No newline at end of file +} diff --git a/chain/jsonrpc/src/lib.rs b/chain/jsonrpc/src/lib.rs index ddedbbd3f84..3dc262911dd 100644 --- a/chain/jsonrpc/src/lib.rs +++ b/chain/jsonrpc/src/lib.rs @@ -711,8 +711,7 @@ impl JsonRpcHandler { > { self.send_tx(RpcSendTransactionRequest { signed_transaction: request_data.signed_transaction, - // Will be ignored, broadcast_tx_commit is not aligned with existing enum - wait_until: Default::default(), + wait_until: TxExecutionStatus::ExecutedOptimistic, }) .await } diff --git a/chain/network/src/client.rs b/chain/network/src/client.rs index 064a31b89d8..77b14871345 100644 --- a/chain/network/src/client.rs +++ b/chain/network/src/client.rs @@ -7,7 +7,7 @@ use near_primitives::errors::InvalidTxError; use near_primitives::hash::CryptoHash; use near_primitives::network::{AnnounceAccount, PeerId}; use near_primitives::stateless_validation::{ - ChunkEndorsement, ChunkStateWitness, ChunkStateWitnessAck, + ChunkEndorsement, ChunkStateWitnessAck, SignedEncodedChunkStateWitness, }; use near_primitives::transaction::SignedTransaction; use near_primitives::types::{AccountId, EpochId, ShardId}; @@ -116,7 +116,7 @@ pub struct AnnounceAccountRequest(pub Vec<(AnnounceAccount, Option)>); #[derive(actix::Message, Debug, Clone, PartialEq, Eq)] #[rtype(result = "()")] -pub struct ChunkStateWitnessMessage(pub ChunkStateWitness); +pub struct ChunkStateWitnessMessage(pub SignedEncodedChunkStateWitness); #[derive(actix::Message, Debug, Clone, PartialEq, Eq)] #[rtype(result = "()")] diff --git a/chain/network/src/network_protocol/mod.rs b/chain/network/src/network_protocol/mod.rs index 26c88c7951d..8ffe190d25d 100644 --- a/chain/network/src/network_protocol/mod.rs +++ b/chain/network/src/network_protocol/mod.rs @@ -8,8 +8,8 @@ mod proto_conv; mod state_sync; pub use edge::*; use near_primitives::stateless_validation::ChunkEndorsement; -use near_primitives::stateless_validation::ChunkStateWitness; use near_primitives::stateless_validation::ChunkStateWitnessAck; +use near_primitives::stateless_validation::SignedEncodedChunkStateWitness; pub use peer::*; pub use state_sync::*; @@ -53,6 +53,10 @@ use std::fmt::Debug; use std::sync::Arc; use tracing::Span; +/// Send important messages three times. +/// We send these messages multiple times to reduce the chance that they are lost +const IMPORTANT_MESSAGE_RESENT_COUNT: usize = 3; + #[derive(PartialEq, Eq, Clone, Debug, Hash)] pub struct PeerAddr { pub addr: std::net::SocketAddr, @@ -530,24 +534,22 @@ pub enum RoutedMessageBody { VersionedPartialEncodedChunk(PartialEncodedChunk), _UnusedVersionedStateResponse, PartialEncodedChunkForward(PartialEncodedChunkForwardMsg), - ChunkStateWitness(ChunkStateWitness), + ChunkStateWitness(SignedEncodedChunkStateWitness), ChunkEndorsement(ChunkEndorsement), ChunkStateWitnessAck(ChunkStateWitnessAck), } impl RoutedMessageBody { - // Return whether this message is important. - // In routing logics, we send important messages multiple times to minimize the risk that they are - // lost - pub fn is_important(&self) -> bool { + // Return the number of times this message should be sent. + // In routing logics, we send important messages multiple times to minimize the risk that they are lost + pub fn message_resend_count(&self) -> usize { match self { // These messages are important because they are critical for block and chunk production, // and lost messages cannot be requested again. RoutedMessageBody::BlockApproval(_) - | RoutedMessageBody::ChunkEndorsement(_) - | RoutedMessageBody::ChunkStateWitness(_) - | RoutedMessageBody::VersionedPartialEncodedChunk(_) => true, - _ => false, + | RoutedMessageBody::VersionedPartialEncodedChunk(_) => IMPORTANT_MESSAGE_RESENT_COUNT, + // Default value is sending just once. + _ => 1, } } } diff --git a/chain/network/src/network_protocol/testonly.rs b/chain/network/src/network_protocol/testonly.rs index c6f1d18a680..f25f8872ae5 100644 --- a/chain/network/src/network_protocol/testonly.rs +++ b/chain/network/src/network_protocol/testonly.rs @@ -13,9 +13,9 @@ use near_primitives::challenge::{BlockDoubleSign, Challenge, ChallengeBody}; use near_primitives::hash::CryptoHash; use near_primitives::network::{AnnounceAccount, PeerId}; use near_primitives::num_rational::Ratio; +use near_primitives::reed_solomon::ReedSolomonWrapper; use near_primitives::sharding::{ - ChunkHash, EncodedShardChunk, EncodedShardChunkBody, PartialEncodedChunkPart, - ReedSolomonWrapper, ShardChunk, + ChunkHash, EncodedShardChunkBody, PartialEncodedChunkPart, ShardChunk, }; use near_primitives::transaction::SignedTransaction; use near_primitives::types::{AccountId, BlockHeight, EpochId, StateRoot}; @@ -136,10 +136,6 @@ pub fn make_edge(a: &SecretKey, b: &SecretKey, nonce: u64) -> Edge { Edge::new(ap, bp, nonce, a.sign(hash.as_ref()), b.sign(hash.as_ref())) } -pub fn make_edge_tombstone(a: &SecretKey, b: &SecretKey) -> Edge { - make_edge(a, b, 1).remove_edge(PeerId::new(a.public_key()), &a) -} - pub fn make_routing_table(rng: &mut R) -> RoutingTableUpdate { let signers: Vec<_> = (0..7).map(|_| make_secret_key(rng)).collect(); RoutingTableUpdate { @@ -184,18 +180,17 @@ pub fn make_challenge(rng: &mut R) -> Challenge { // the real thing, since this functionality is not encapsulated in // the production code well enough to reuse it in tests. pub fn make_chunk_parts(chunk: ShardChunk) -> Vec { - let mut rs = ReedSolomonWrapper::new(10, 5); - let (parts, _) = EncodedShardChunk::encode_transaction_receipts( - &mut rs, - chunk.transactions().to_vec(), - &chunk.prev_outgoing_receipts(), - ) - .unwrap(); + let total_shard_count = 10; + let parity_shard_count = 5; + let mut rs = ReedSolomonWrapper::new(total_shard_count, parity_shard_count); + let transaction_receipts = + (chunk.transactions().to_vec(), chunk.prev_outgoing_receipts().to_vec()); + let (parts, _) = rs.encode(transaction_receipts); + let mut content = EncodedShardChunkBody { parts }; - content.reconstruct(&mut rs).unwrap(); let (_, merkle_paths) = content.get_merkle_hash_and_paths(); let mut parts = vec![]; - for ord in 0..rs.total_shard_count() { + for ord in 0..total_shard_count { parts.push(PartialEncodedChunkPart { part_ord: ord as u64, part: content.parts[ord].take().unwrap(), diff --git a/chain/network/src/peer_manager/connection/mod.rs b/chain/network/src/peer_manager/connection/mod.rs index e47747a2dc8..56fb5a82b5d 100644 --- a/chain/network/src/peer_manager/connection/mod.rs +++ b/chain/network/src/peer_manager/connection/mod.rs @@ -45,8 +45,10 @@ impl tcp::Tier { pub(crate) fn is_allowed_routed(self, body: &RoutedMessageBody) -> bool { match body { - RoutedMessageBody::BlockApproval(..) => true, - RoutedMessageBody::VersionedPartialEncodedChunk(..) => true, + RoutedMessageBody::BlockApproval(..) + | RoutedMessageBody::ChunkStateWitness(..) + | RoutedMessageBody::ChunkEndorsement(..) + | RoutedMessageBody::VersionedPartialEncodedChunk(..) => true, _ => self == tcp::Tier::T2, } } diff --git a/chain/network/src/peer_manager/network_state/mod.rs b/chain/network/src/peer_manager/network_state/mod.rs index bb9dfa874c2..0efbd8539d4 100644 --- a/chain/network/src/peer_manager/network_state/mod.rs +++ b/chain/network/src/peer_manager/network_state/mod.rs @@ -42,10 +42,6 @@ mod tier1; /// Limit number of pending Peer actors to avoid OOM. pub(crate) const LIMIT_PENDING_PEERS: usize = 60; -/// Send important messages three times. -/// We send these messages multiple times to reduce the chance that they are lost -const IMPORTANT_MESSAGE_RESENT_COUNT: usize = 3; - /// Size of LRU cache size of recent routed messages. /// It should be large enough to detect duplicates (i.e. all messages received during /// production of 1 block should fit). @@ -171,14 +167,11 @@ impl NetworkState { ) -> Self { Self { runtime: Runtime::new(), - graph: Arc::new(crate::routing::Graph::new( - crate::routing::GraphConfig { - node_id: config.node_id(), - prune_unreachable_peers_after: PRUNE_UNREACHABLE_PEERS_AFTER, - prune_edges_after: Some(PRUNE_EDGES_AFTER), - }, - store.clone(), - )), + graph: Arc::new(crate::routing::Graph::new(crate::routing::GraphConfig { + node_id: config.node_id(), + prune_unreachable_peers_after: PRUNE_UNREACHABLE_PEERS_AFTER, + prune_edges_after: Some(PRUNE_EDGES_AFTER), + })), graph_v2: Arc::new(crate::routing::GraphV2::new(crate::routing::GraphConfigV2 { node_id: config.node_id(), prune_edges_after: Some(PRUNE_EDGES_AFTER), @@ -445,9 +438,9 @@ impl NetworkState { tracing::info!(target:"network", err = format!("{:#}", err), "Failed to connect to {peer_info}"); } - if self.peer_store.peer_connection_attempt(&clock, &peer_info.id, result).is_err() { - tracing::error!(target: "network", ?peer_info, "Failed to store connection attempt."); - } + // The peer may not be in the peer store; we try to record the connection attempt but + // ignore any storage errors + let _ = self.peer_store.peer_connection_attempt(&clock, &peer_info.id, result); if succeeded { return; @@ -557,18 +550,14 @@ impl NetworkState { /// Send message to specific account. /// Return whether the message is sent or not. - /// The message might be sent over TIER1 and/or TIER2 connection depending on the message type. + /// The message might be sent over TIER1 or TIER2 connection depending on the message type. pub fn send_message_to_account( &self, clock: &time::Clock, account_id: &AccountId, msg: RoutedMessageBody, ) -> bool { - let mut success = false; let accounts_data = self.accounts_data.load(); - // All TIER1 messages are being sent over both TIER1 and TIER2 connections for now, - // so that we can actually observe the latency/reliability improvements in practice: - // for each message we track over which network tier it arrived faster? if tcp::Tier::T1.is_allowed_routed(&msg) { for key in accounts_data.keys_by_id.get(account_id).iter().flat_map(|keys| keys.iter()) { @@ -586,11 +575,10 @@ impl NetworkState { clock, RawRoutedMessage { target: PeerIdOrHash::PeerId(data.peer_id.clone()), - body: msg.clone(), + body: msg, }, )))); - success |= true; - break; + return true; } } @@ -624,14 +612,11 @@ impl NetworkState { return false; }; + let mut success = false; let msg = RawRoutedMessage { target: PeerIdOrHash::PeerId(target), body: msg }; let msg = self.sign_message(clock, msg); - if msg.body.is_important() { - for _ in 0..IMPORTANT_MESSAGE_RESENT_COUNT { - success |= self.send_message_to_peer(clock, tcp::Tier::T2, msg.clone()); - } - } else { - success |= self.send_message_to_peer(clock, tcp::Tier::T2, msg) + for _ in 0..msg.body.message_resend_count() { + success |= self.send_message_to_peer(clock, tcp::Tier::T2, msg.clone()); } success } diff --git a/chain/network/src/peer_manager/tests/routing.rs b/chain/network/src/peer_manager/tests/routing.rs index d107c47691b..7d0ed938758 100644 --- a/chain/network/src/peer_manager/tests/routing.rs +++ b/chain/network/src/peer_manager/tests/routing.rs @@ -2,7 +2,7 @@ use crate::blacklist; use crate::broadcast; use crate::config::NetworkConfig; use crate::network_protocol::testonly as data; -use crate::network_protocol::{Edge, Encoding, Ping, Pong, RoutedMessageBody, RoutingTableUpdate}; +use crate::network_protocol::{Encoding, Ping, Pong, RoutedMessageBody, RoutingTableUpdate}; use crate::peer; use crate::peer::peer_actor::{ ClosingReason, ConnectionClosedEvent, DROP_DUPLICATED_MESSAGES_PERIOD, @@ -12,7 +12,6 @@ use crate::peer_manager::peer_manager_actor::Event as PME; use crate::peer_manager::testonly::start as start_pm; use crate::peer_manager::testonly::Event; use crate::private_actix::RegisterPeerError; -use crate::store; use crate::tcp; use crate::testonly::{abort_on_panic, make_rng, Rng}; use crate::types::PeerMessage; @@ -1013,96 +1012,6 @@ async fn repeated_data_in_sync_routing_table() { } } -/// Awaits for SyncRoutingTable messages until all edges from `want` arrive. -/// Panics if any other edges arrive. -async fn wait_for_edges( - mut events: broadcast::Receiver, - want: &HashSet, -) { - let mut got = HashSet::new(); - tracing::info!(target: "test", "want edges: {:?}",want.iter().map(|e|e.hash()).collect::>()); - while &got != want { - match events.recv().await { - peer::testonly::Event::Network(PME::MessageProcessed( - tcp::Tier::T2, - PeerMessage::SyncRoutingTable(msg), - )) => { - tracing::info!(target: "test", "got edges: {:?}",msg.edges.iter().map(|e|e.hash()).collect::>()); - got.extend(msg.edges); - assert!(want.is_superset(&got), "want: {:#?}, got: {:#?}", want, got); - } - // Ignore other messages. - _ => {} - } - } -} - -// After each handshake a full sync of routing table is performed with the peer. -// After a restart, some edges reside in storage. The node shouldn't broadcast -// edges which it learned about before the restart. -#[tokio::test] -async fn no_edge_broadcast_after_restart() { - abort_on_panic(); - let mut rng = make_rng(921853233); - let rng = &mut rng; - let mut clock = time::FakeClock::default(); - let chain = Arc::new(data::Chain::make(&mut clock, rng, 10)); - - let make_edges = |rng: &mut Rng| { - vec![ - data::make_edge(&data::make_secret_key(rng), &data::make_secret_key(rng), 1), - data::make_edge(&data::make_secret_key(rng), &data::make_secret_key(rng), 1), - data::make_edge_tombstone(&data::make_secret_key(rng), &data::make_secret_key(rng)), - ] - }; - - // Create a bunch of fresh unreachable edges, then send all the edges created so far. - let stored_edges = make_edges(rng); - - // We are preparing the initial storage by hand (rather than simulating the restart), - // because semantics of the RoutingTable protocol are very poorly defined, and it - // is hard to write a solid test for it without literally assuming the implementation details. - let store = near_store::db::TestDB::new(); - { - let mut stored_peers = HashSet::new(); - for e in &stored_edges { - stored_peers.insert(e.key().0.clone()); - stored_peers.insert(e.key().1.clone()); - } - let mut store: store::Store = store.clone().into(); - store.push_component(&stored_peers, &stored_edges).unwrap(); - } - - // Start a PeerManager and connect a peer to it. - let pm = - peer_manager::testonly::start(clock.clock(), store, chain.make_config(rng), chain.clone()) - .await; - let peer = pm - .start_inbound(chain.clone(), chain.make_config(rng)) - .await - .handshake(&clock.clock()) - .await; - tracing::info!(target:"test","pm = {}",pm.cfg.node_id()); - tracing::info!(target:"test","peer = {}",peer.cfg.id()); - // Wait for the initial sync, which will contain just 1 edge. - // Only incremental sync are guaranteed to not contain already known edges. - wait_for_edges(peer.events.clone(), &[peer.edge.clone().unwrap()].into()).await; - - let fresh_edges = make_edges(rng); - let mut total_edges = stored_edges.clone(); - total_edges.extend(fresh_edges.iter().cloned()); - let events = peer.events.from_now(); - peer.send(PeerMessage::SyncRoutingTable(RoutingTableUpdate { - edges: total_edges, - accounts: vec![], - })) - .await; - - // Wait for the fresh edges to be broadcasted back. - tracing::info!(target: "test", "wait_for_edges()"); - wait_for_edges(events, &fresh_edges.into_iter().collect()).await; -} - #[tokio::test] async fn square() { abort_on_panic(); diff --git a/chain/network/src/routing/graph/mod.rs b/chain/network/src/routing/graph/mod.rs index 1b47475970d..39190c3dd09 100644 --- a/chain/network/src/routing/graph/mod.rs +++ b/chain/network/src/routing/graph/mod.rs @@ -4,7 +4,6 @@ use crate::network_protocol::{Edge, EdgeState}; use crate::routing::bfs; use crate::routing::routing_table_view::RoutingTableView; use crate::stats::metrics; -use crate::store; use arc_swap::ArcSwap; use near_async::time; use near_primitives::network::PeerId; @@ -47,7 +46,6 @@ struct Inner { edges: im::HashMap, /// Last time a peer was reachable. peer_reachable_at: HashMap, - store: store::Store, } fn has(set: &im::HashMap, edge: &Edge) -> bool { @@ -105,54 +103,8 @@ impl Inner { } } - /// If peer_id is not in memory check if it is on disk in bring it back on memory. - /// - /// Note: here an advanced example, which shows what's happening. - /// Let's say we have a full graph fully connected with nodes `A, B, C, D`. - /// Step 1 ) `A`, `B` get removed. - /// We store edges belonging to `A` and `B`: `, , , , ` - /// into component 1 let's call it `C_1`. - /// And mapping from `A` to `C_1`, and from `B` to `C_1` - /// - /// Note that `C`, `D` is still active. - /// - /// Step 2) 'C' gets removed. - /// We stored edges into component 2 `C_2`. - /// And a mapping from `C` to `C_2`. - /// - /// Note that `D` is still active. - /// - /// Step 3) An active edge gets added from `D` to `A`. - /// We will load `C_1` and try to re-add all edges belonging to `C_1`. - /// We will add `, , , , ` - /// - /// Important note: `C_1` also contains an edge from `A` to `C`, though `C` was removed in `C_2`. - /// - 1) We will not load edges belonging to `C_2`, even though we are adding an edges from `A` to deleted `C`. - /// - 2) We will not delete mapping from `C` to `C_2`, because `C` doesn't belong to `C_1`. - /// - 3) Later, `C` will be deleted, because we will figure out it's not reachable. - /// New component `C_3` will be created. - /// And mapping from `C` to `C_2` will be overridden by mapping from `C` to `C_3`. - /// And therefore `C_2` component will become unreachable. - /// TODO(gprusak): this whole algorithm seems to be leaking stuff to storage and never cleaning up. - /// What is the point of it? What does it actually gives us? - fn load_component(&mut self, now: time::Utc, peer_id: PeerId) { - if peer_id == self.config.node_id || self.peer_reachable_at.contains_key(&peer_id) { - return; - } - let edges = match self.store.pop_component(&peer_id) { - Ok(edges) => edges, - Err(e) => { - tracing::warn!("self.store.pop_component({}): {}", peer_id, e); - return; - } - }; - for e in edges { - self.update_edge(now, e); - } - } - /// Prunes peers unreachable since (and their adjacent edges) - /// from the in-mem graph and stores them in DB. + /// from the in-mem graph. fn prune_unreachable_peers(&mut self, unreachable_since: time::Instant) { // Select peers to prune. let mut peers = HashSet::new(); @@ -178,12 +130,7 @@ impl Inner { } // Prune edges from graph. - let edges = self.remove_adjacent_edges(&peers); - - // Store the pruned data in DB. - if let Err(e) = self.store.push_component(&peers, &edges) { - tracing::warn!("self.store.push_component(): {}", e); - } + self.remove_adjacent_edges(&peers); } /// Verifies edges, then adds them to the graph. @@ -204,17 +151,6 @@ impl Inner { // PROTOCOL_VERSION 60 earliest. edges = Edge::deduplicate(edges); - // load the components BEFORE updating the edges. - // so that result doesn't contain edges we already have in storage. - // It is especially important for initial full sync with peers, because - // we broadcast all the returned edges to all connected peers. - let now = clock.now_utc(); - for edge in &edges { - let key = edge.key(); - self.load_component(now, key.0.clone()); - self.load_component(now, key.1.clone()); - } - // Retain only new edges. edges.retain(|e| !has(&self.edges, e)); @@ -231,13 +167,19 @@ impl Inner { }); // Add the verified edges to the graph. - edges.retain(|e| self.update_edge(now, e.clone())); + edges.retain(|e| self.update_edge(clock.now_utc(), e.clone())); (edges, ok) } /// 1. Prunes expired edges. /// 2. Prunes unreachable graph components. /// 3. Recomputes GraphSnapshot. + #[tracing::instrument( + target = "network::routing::graph", + level = "debug", + "GraphInner::update", + skip_all + )] pub fn update( &mut self, clock: &time::Clock, @@ -287,7 +229,7 @@ pub(crate) struct Graph { } impl Graph { - pub fn new(config: GraphConfig, store: store::Store) -> Self { + pub fn new(config: GraphConfig) -> Self { Self { routing_table: RoutingTableView::new(), inner: Arc::new(Mutex::new(Inner { @@ -295,7 +237,6 @@ impl Graph { config, edges: Default::default(), peer_reachable_at: HashMap::new(), - store, })), unreliable_peers: ArcSwap::default(), snapshot: ArcSwap::default(), @@ -337,9 +278,16 @@ impl Graph { // together. let this = self.clone(); let clock = clock.clone(); + let current_span = tracing::Span::current(); self.runtime .handle .spawn_blocking(move || { + let _span = tracing::debug_span!( + target: "network::routing::graph", + parent: current_span, + "Graph::update" + ) + .entered(); let mut inner = this.inner.lock(); let mut new_edges = vec![]; let mut oks = vec![]; diff --git a/chain/network/src/routing/graph/tests.rs b/chain/network/src/routing/graph/tests.rs index c3f9911783d..e96efca66a9 100644 --- a/chain/network/src/routing/graph/tests.rs +++ b/chain/network/src/routing/graph/tests.rs @@ -2,14 +2,12 @@ use super::{Graph, GraphConfig}; use crate::network_protocol::testonly as data; use crate::network_protocol::Edge; use crate::network_protocol::EDGE_MIN_TIMESTAMP_NONCE; -use crate::store; -use crate::store::testonly::Component; use crate::testonly::make_rng; use near_async::time; use near_crypto::SecretKey; use near_o11y::testonly::init_test_logger; use near_primitives::network::PeerId; -use std::collections::{HashMap, HashSet}; +use std::collections::HashMap; use std::sync::Arc; impl Graph { @@ -17,7 +15,7 @@ impl Graph { assert_eq!(vec![true], self.update(clock, vec![edges]).await.1); } - async fn check(&self, want_mem: &[Edge], want_db: &[Component]) { + async fn check(&self, want_mem: &[Edge]) { let got_mem = self.load(); let got_mem: HashMap<_, _> = got_mem.edges.iter().collect(); let mut want_mem_map = HashMap::new(); @@ -27,18 +25,9 @@ impl Graph { } } assert_eq!(got_mem, want_mem_map); - - let got_db: HashSet<_> = - self.inner.lock().store.list_components().into_iter().map(|c| c.normal()).collect(); - let want_db: HashSet<_> = want_db.iter().map(|c| c.clone().normal()).collect(); - assert_eq!(got_db, want_db); } } -fn store() -> store::Store { - store::Store::from(near_store::db::TestDB::new()) -} - fn peer_id(key: &SecretKey) -> PeerId { PeerId::new(key.public_key()) } @@ -54,8 +43,8 @@ async fn empty() { prune_unreachable_peers_after: time::Duration::seconds(3), prune_edges_after: None, }; - let g = Graph::new(cfg, store()); - g.check(&[], &[]).await; + let g = Graph::new(cfg); + g.check(&[]).await; } const SEC: time::Duration = time::Duration::seconds(1); @@ -72,7 +61,7 @@ async fn one_edge() { prune_unreachable_peers_after: time::Duration::seconds(3), prune_edges_after: None, }; - let g = Arc::new(Graph::new(cfg.clone(), store())); + let g = Arc::new(Graph::new(cfg.clone())); let p1 = data::make_secret_key(rng); let e1 = data::make_edge(&node_key, &p1, 1); @@ -81,131 +70,23 @@ async fn one_edge() { tracing::info!(target:"test", "Add an active edge. Update RT with pruning."); // NOOP, since p1 is reachable. g.simple_update(&clock.clock(), vec![e1.clone()]).await; - g.check(&[e1.clone()], &[]).await; + g.check(&[e1.clone()]).await; tracing::info!(target:"test", "Override with an inactive edge."); g.simple_update(&clock.clock(), vec![e1v2.clone()]).await; - g.check(&[e1v2.clone()], &[]).await; + g.check(&[e1v2.clone()]).await; tracing::info!(target:"test", "After 2s, simple_update RT with pruning unreachable for 3s."); // NOOP, since p1 is unreachable for 2s. clock.advance(2 * SEC); g.simple_update(&clock.clock(), vec![]).await; - g.check(&[e1v2.clone()], &[]).await; + g.check(&[e1v2.clone()]).await; tracing::info!(target:"test", "Update RT with pruning unreachable for 1s."); // p1 should be moved to DB. clock.advance(2 * SEC); g.simple_update(&clock.clock(), vec![]).await; - g.check(&[], &[Component { edges: vec![e1v2.clone()], peers: vec![peer_id(&p1)] }]).await; -} - -#[tokio::test] -async fn load_component() { - init_test_logger(); - let clock = time::FakeClock::default(); - let mut rng = make_rng(87927345); - let rng = &mut rng; - let node_key = data::make_secret_key(rng); - let cfg = GraphConfig { - node_id: peer_id(&node_key), - prune_unreachable_peers_after: time::Duration::seconds(3), - prune_edges_after: None, - }; - let g = Arc::new(Graph::new(cfg.clone(), store())); - - let p1 = data::make_secret_key(rng); - let p2 = data::make_secret_key(rng); - let e1 = data::make_edge_tombstone(&node_key, &p1); - let e2 = data::make_edge_tombstone(&node_key, &p2); - let e3 = data::make_edge(&p1, &p2, 1); - let e1v2 = data::make_edge(&node_key, &p1, e1.nonce() + 1); - - // There is an active edge between p1,p2, but neither is reachable from me(). - // They should be pruned. - g.simple_update(&clock.clock(), vec![e1.clone(), e2.clone(), e3.clone()]).await; - g.check( - &[], - &[Component { - edges: vec![e1.clone(), e2.clone(), e3.clone()], - peers: vec![peer_id(&p1), peer_id(&p2)], - }], - ) - .await; - - // Add an active edge from me() to p1. This should trigger loading the whole component from DB. - g.simple_update(&clock.clock(), vec![e1v2.clone()]).await; - g.check(&[e1v2, e2, e3], &[]).await; -} - -#[tokio::test] -async fn components_nonces_are_tracked_in_storage() { - init_test_logger(); - let clock = time::FakeClock::default(); - let mut rng = make_rng(87927345); - let rng = &mut rng; - let node_key = data::make_secret_key(rng); - let cfg = GraphConfig { - node_id: peer_id(&node_key), - prune_unreachable_peers_after: time::Duration::seconds(3), - prune_edges_after: None, - }; - let store = store(); - let g = Arc::new(Graph::new(cfg.clone(), store.clone())); - - tracing::info!(target:"test", "Add an inactive edge and prune it."); - let p1 = data::make_secret_key(rng); - let e1 = data::make_edge_tombstone(&node_key, &p1); - g.simple_update(&clock.clock(), vec![e1.clone()]).await; - g.check(&[], &[Component { edges: vec![e1.clone()], peers: vec![peer_id(&p1)] }]).await; - - tracing::info!(target:"test", "Add an active unreachable edge, which also should get pruned."); - let p2 = data::make_secret_key(rng); - let p3 = data::make_secret_key(rng); - let e23 = data::make_edge(&p2, &p3, 3); - g.simple_update(&clock.clock(), vec![e23.clone()]).await; - g.check( - &[], - &[ - Component { edges: vec![e1.clone()], peers: vec![peer_id(&p1)] }, - Component { edges: vec![e23.clone()], peers: vec![peer_id(&p2), peer_id(&p3)] }, - ], - ) - .await; - - // Spawn a new graph with the same storage. - // Add another inactive edge and prune it. The previously created component shouldn't get - // overwritten, but rather a new one should be created. - // This verifies that the last_component_nonce (which indicates which component IDs have been - // already utilized) is persistently stored in DB. - let g = Arc::new(Graph::new(cfg.clone(), store)); - let p4 = data::make_secret_key(rng); - let e4 = data::make_edge_tombstone(&node_key, &p4); - g.simple_update(&clock.clock(), vec![e4.clone()]).await; - g.check( - &[], - &[ - Component { edges: vec![e1.clone()], peers: vec![peer_id(&p1)] }, - Component { edges: vec![e23.clone()], peers: vec![peer_id(&p2), peer_id(&p3)] }, - Component { edges: vec![e4.clone()], peers: vec![peer_id(&p4)] }, - ], - ) - .await; - - // Add an active edge between unreachable nodes, which will merge 2 components in DB. - let e34 = data::make_edge(&p3, &p4, 1); - g.simple_update(&clock.clock(), vec![e34.clone()]).await; - g.check( - &[], - &[ - Component { edges: vec![e1.clone()], peers: vec![peer_id(&p1)] }, - Component { - edges: vec![e4.clone(), e23.clone(), e34.clone()], - peers: vec![peer_id(&p2), peer_id(&p3), peer_id(&p4)], - }, - ], - ) - .await; + g.check(&[]).await; } fn to_active_nonce(t: time::Utc) -> u64 { @@ -229,7 +110,7 @@ async fn expired_edges() { prune_unreachable_peers_after: time::Duration::hours(100), prune_edges_after: Some(110 * SEC), }; - let g = Arc::new(Graph::new(cfg.clone(), store())); + let g = Arc::new(Graph::new(cfg.clone())); let p1 = data::make_secret_key(rng); let p2 = data::make_secret_key(rng); @@ -242,40 +123,40 @@ async fn expired_edges() { tracing::info!(target:"test", "Add an active edge."); g.simple_update(&clock.clock(), vec![e1.clone(), old_e2.clone()]).await; - g.check(&[e1.clone(), old_e2.clone()], &[]).await; + g.check(&[e1.clone(), old_e2.clone()]).await; tracing::info!(target:"test", "Update RT with pruning."); // e1 should stay - as it is fresh, but old_e2 should be removed. clock.advance(40 * SEC); g.simple_update(&clock.clock(), vec![]).await; - g.check(&[e1.clone()], &[]).await; + g.check(&[e1.clone()]).await; tracing::info!(target:"test", "Adding 'still old' edge to e2 should fail."); // (as it is older than the last prune_edges_older_than) g.simple_update(&clock.clock(), vec![still_old_e2.clone()]).await; - g.check(&[e1.clone()], &[]).await; + g.check(&[e1.clone()]).await; tracing::info!(target:"test", "But adding the fresh edge should work."); g.simple_update(&clock.clock(), vec![fresh_e2.clone()]).await; - g.check(&[e1.clone(), fresh_e2.clone()], &[]).await; + g.check(&[e1.clone(), fresh_e2.clone()]).await; tracing::info!(target:"test", "Advance so that the edge is 'too old' and should be removed."); clock.advance(100 * SEC); g.simple_update(&clock.clock(), vec![]).await; - g.check(&[], &[]).await; + g.check(&[]).await; tracing::info!(target:"test", "Let's create a removal edge."); let e1v2 = data::make_edge(&node_key, &p1, to_active_nonce(clock.now_utc())) .remove_edge(peer_id(&p1), &p1); g.simple_update(&clock.clock(), vec![e1v2.clone()]).await; - g.check(&[e1v2.clone()], &[]).await; + g.check(&[e1v2.clone()]).await; // Advance time a bit. The edge should stay. clock.advance(20 * SEC); g.simple_update(&clock.clock(), vec![]).await; - g.check(&[e1v2.clone()], &[]).await; + g.check(&[e1v2.clone()]).await; // Advance time a lot. The edge should be pruned. clock.advance(100 * SEC); g.simple_update(&clock.clock(), vec![]).await; - g.check(&[], &[]).await; + g.check(&[]).await; } diff --git a/chain/network/src/store/mod.rs b/chain/network/src/store/mod.rs index 70a715fdc91..9bf69187e00 100644 --- a/chain/network/src/store/mod.rs +++ b/chain/network/src/store/mod.rs @@ -1,17 +1,12 @@ /// Store module defines atomic DB operations on top of schema module. /// All transactions should be implemented within this module, /// in particular schema::StoreUpdate is not exported. -use crate::network_protocol::Edge; use crate::types::ConnectionInfo; -use near_primitives::network::{AnnounceAccount, PeerId}; +use near_primitives::network::AnnounceAccount; use near_primitives::types::AccountId; -use std::collections::HashSet; use std::sync::Arc; -use tracing::debug; mod schema; -#[cfg(test)] -pub mod testonly; /// Opaque error type representing storage errors. /// @@ -30,19 +25,15 @@ pub(crate) struct Error(schema::Error); #[derive(Clone)] pub(crate) struct Store(schema::Store); -/// Everytime a group of peers becomes unreachable at the same time; We store edges belonging to -/// them in components. We remove all of those edges from memory, and save them to database, -/// If any of them become reachable again, we re-add whole component. -/// -/// To store components, we have following column in the DB. -/// DBCol::LastComponentNonce -> stores component_nonce: u64, which is the lowest nonce that -/// hasn't been used yet. If new component gets created it will use -/// this nonce. -/// DBCol::ComponentEdges -> Mapping from `component_nonce` to list of edges -/// DBCol::PeerComponent -> Mapping from `peer_id` to last component nonce if there -/// exists one it belongs to. impl Store { /// Inserts (account_id,aa) to the AccountAnnouncements column. + #[tracing::instrument( + target = "network::store", + level = "trace", + "Store::set_account_announcement", + skip_all, + fields(%account_id) + )] pub fn set_account_announcement( &mut self, account_id: &AccountId, @@ -60,66 +51,16 @@ impl Store { ) -> Result, Error> { self.0.get::(account_id).map_err(Error) } - - /// Atomically stores a graph component consisting of and - /// to the DB. On completion, all peers are considered members of the new component - /// (even if they were members of a different component so far). - /// The name (even though technically correct) is misleading, because the do - /// NOT have to constitute a CONNECTED component. I'm not fixing that because - /// the whole routing table in the current form is scheduled for deprecation. - pub fn push_component( - &mut self, - peers: &HashSet, - edges: &Vec, - ) -> Result<(), Error> { - debug!(target: "network", "push_component: moving {} peers from memory to DB", peers.len()); - let component = - self.0.get::(&()).map_err(Error)?.unwrap_or(0) + 1; - let mut update = self.0.new_update(); - update.set::(&(), &component); - update.set::(&component, &edges); - for peer_id in peers { - update.set::(peer_id, &component); - } - self.0.commit(update).map_err(Error) - } - - /// Reads and deletes from DB the component that is a member of. - /// Returns Ok(vec![]) if peer_id is not a member of any component. - pub fn pop_component(&mut self, peer_id: &PeerId) -> Result, Error> { - // Fetch the component assigned to the peer. - let component = match self.0.get::(peer_id).map_err(Error)? { - Some(c) => c, - None => return Ok(vec![]), - }; - let edges = - self.0.get::(&component).map_err(Error)?.unwrap_or(vec![]); - let mut update = self.0.new_update(); - update.delete::(&component); - let mut peers_checked = HashSet::new(); - for edge in &edges { - let key = edge.key(); - for peer_id in [&key.0, &key.1] { - if !peers_checked.insert(peer_id.clone()) { - // Store doesn't accept 2 mutations modifying the same row in a single - // transaction, even if they are identical. Therefore tracking peers_checked - // is critical for correctness, rather than just an optimization minimizing - // the number of lookups. - continue; - } - match self.0.get::(&peer_id).map_err(Error)? { - Some(c) if c == component => update.delete::(&peer_id), - _ => {} - } - } - } - self.0.commit(update).map_err(Error)?; - Ok(edges) - } } // ConnectionStore storage. impl Store { + #[tracing::instrument( + target = "network::store", + level = "trace", + "Store::set_recent_outbound_connections", + skip_all + )] pub fn set_recent_outbound_connections( &mut self, recent_outbound_connections: &Vec, diff --git a/chain/network/src/store/schema/mod.rs b/chain/network/src/store/schema/mod.rs index dbc153f1257..bbe650472df 100644 --- a/chain/network/src/store/schema/mod.rs +++ b/chain/network/src/store/schema/mod.rs @@ -11,8 +11,6 @@ use near_store::DBCol; use std::io; use std::sync::Arc; -#[cfg(test)] -mod testonly; #[cfg(test)] mod tests; @@ -228,6 +226,13 @@ impl Store { pub fn new_update(&mut self) -> StoreUpdate { Default::default() } + + #[tracing::instrument( + target = "network::store::schema", + level = "trace", + "Store::commit", + skip_all + )] pub fn commit(&mut self, update: StoreUpdate) -> Result<(), Error> { self.0.write(update.0) } @@ -255,7 +260,7 @@ impl StoreUpdate { pub fn set(&mut self, k: &::T, v: &::T) { self.0.set(C::COL, to_vec::(k), to_vec::(v)) } - pub fn delete(&mut self, k: &::T) { + pub fn _delete(&mut self, k: &::T) { self.0.delete(C::COL, to_vec::(k)) } } diff --git a/chain/network/src/store/schema/testonly.rs b/chain/network/src/store/schema/testonly.rs deleted file mode 100644 index af48754c1f1..00000000000 --- a/chain/network/src/store/schema/testonly.rs +++ /dev/null @@ -1,13 +0,0 @@ -use crate::store::schema::{Column, Error, Format}; - -impl super::Store { - pub fn iter( - &self, - ) -> impl Iterator::T, ::T), Error>> + '_ - { - debug_assert!(!C::COL.is_rc()); - self.0 - .iter_raw_bytes(C::COL) - .map(|item| item.and_then(|(k, v)| Ok((C::Key::decode(&k)?, C::Value::decode(&v)?)))) - } -} diff --git a/chain/network/src/store/testonly.rs b/chain/network/src/store/testonly.rs deleted file mode 100644 index 4ab7b98705f..00000000000 --- a/chain/network/src/store/testonly.rs +++ /dev/null @@ -1,52 +0,0 @@ -use super::*; -use std::collections::HashMap; - -#[derive(Default, Debug, Clone, Hash, PartialEq, Eq)] -pub struct Component { - pub peers: Vec, - pub edges: Vec, -} - -impl Component { - pub fn normal(mut self) -> Self { - self.peers.sort(); - self.edges.sort_by(|a, b| a.key().cmp(b.key())); - self - } -} - -impl Store { - /// Reads all the components from the database. - /// Panics if any of the invariants has been violated. - pub fn list_components(&self) -> Vec { - let edges: HashMap<_, _> = - self.0.iter::().map(|x| x.unwrap()).collect(); - let peers: HashMap<_, _> = - self.0.iter::().map(|x| x.unwrap()).collect(); - let lcn: HashMap<(), _> = - self.0.iter::().map(|x| x.unwrap()).collect(); - // all component nonces should be <= LastComponentNonce - let lcn = lcn.get(&()).unwrap_or(&0); - for (c, _) in &edges { - assert!(c <= lcn); - } - for (_, c) in &peers { - assert!(c <= lcn); - } - // Each edge has to be incident to at least one peer in the same component. - for (c, es) in &edges { - for e in es { - let key = e.key(); - assert!(peers.get(&key.0) == Some(c) || peers.get(&key.1) == Some(c)); - } - } - let mut cs = HashMap::::new(); - for (c, es) in edges { - cs.entry(c).or_default().edges = es; - } - for (p, c) in peers { - cs.entry(c).or_default().peers.push(p); - } - cs.into_iter().map(|(_, v)| v).collect() - } -} diff --git a/chain/network/src/types.rs b/chain/network/src/types.rs index 6341cca3918..db69861001f 100644 --- a/chain/network/src/types.rs +++ b/chain/network/src/types.rs @@ -20,7 +20,7 @@ use near_primitives::hash::CryptoHash; use near_primitives::network::{AnnounceAccount, PeerId}; use near_primitives::sharding::PartialEncodedChunkWithArcReceipts; use near_primitives::stateless_validation::{ - ChunkEndorsement, ChunkStateWitness, ChunkStateWitnessAck, + ChunkEndorsement, ChunkStateWitnessAck, SignedEncodedChunkStateWitness, }; use near_primitives::transaction::SignedTransaction; use near_primitives::types::{AccountId, BlockHeight, EpochHeight, ShardId}; @@ -259,7 +259,7 @@ pub enum NetworkRequests { /// A challenge to invalidate a block. Challenge(Challenge), /// A chunk's state witness. - ChunkStateWitness(Vec, ChunkStateWitness), + ChunkStateWitness(Vec, SignedEncodedChunkStateWitness), /// Acknowledgement to a chunk's state witness, sent back to the originating chunk producer. ChunkStateWitnessAck(AccountId, ChunkStateWitnessAck), /// Message for a chunk endorsement, sent by a chunk validator to the block producer. diff --git a/core/async/src/examples/actix_component_test.rs b/core/async/src/examples/actix_component_test.rs index a3917305399..e3950776992 100644 --- a/core/async/src/examples/actix_component_test.rs +++ b/core/async/src/examples/actix_component_test.rs @@ -4,9 +4,7 @@ use super::actix_component::{ use crate::futures::FutureSpawnerExt; use crate::messaging::IntoSender; use crate::test_loop::event_handler::{capture_events, LoopEventHandler}; -use crate::test_loop::futures::{ - drive_delayed_action_runners, drive_futures, TestLoopDelayedActionEvent, TestLoopTask, -}; +use crate::test_loop::futures::{drive_futures, TestLoopDelayedActionEvent, TestLoopTask}; use crate::test_loop::TestLoopBuilder; use derive_enum_from_into::{EnumFrom, EnumTryInto}; use std::sync::Arc; @@ -57,7 +55,7 @@ fn test_actix_component() { // test itself is synchronous. test.register_handler(drive_futures().widen()); // This is to allow the ExampleComponent to run delayed actions (timers). - test.register_handler(drive_delayed_action_runners::().widen()); + test.register_delayed_action_handler::(); // This is to capture the periodic requests sent by the ExampleComponent // so we can assert against it. test.register_handler(capture_events::().widen()); @@ -66,7 +64,7 @@ fn test_actix_component() { test.register_handler(example_handler().widen()); // We need to redo whatever the ExampleActor does in its `started` method. - test.data.example.start(&mut test.sender().into_delayed_action_runner()); + test.data.example.start(&mut test.sender().into_delayed_action_runner(test.shutting_down())); // Send some requests; this can be done in the asynchronous context. test.future_spawner().spawn("wait for 5", { let res = test.data.outer.call_example_component_for_response(5); @@ -87,4 +85,6 @@ fn test_actix_component() { test.data.periodic_requests_captured, vec![PeriodicRequest { id: 0 }, PeriodicRequest { id: 1 }, PeriodicRequest { id: 2 },] ); + + test.shutdown_and_drain_remaining_events(Duration::seconds(1)); } diff --git a/core/async/src/examples/mod.rs b/core/async/src/examples/mod.rs index 710c4525f93..5c56dba55d2 100644 --- a/core/async/src/examples/mod.rs +++ b/core/async/src/examples/mod.rs @@ -5,5 +5,3 @@ mod async_component_test; mod multi_instance_test; mod sum_numbers; mod sum_numbers_test; -mod timed_component; -mod timed_component_test; diff --git a/core/async/src/examples/multi_instance_test.rs b/core/async/src/examples/multi_instance_test.rs index c345bd12d01..27ff2d0521b 100644 --- a/core/async/src/examples/multi_instance_test.rs +++ b/core/async/src/examples/multi_instance_test.rs @@ -1,6 +1,7 @@ use crate::time; use derive_enum_from_into::{EnumFrom, EnumTryInto}; +use crate::test_loop::delay_sender::DelaySender; use crate::{ examples::sum_numbers_test::forward_sum_request, messaging::{CanSend, IntoSender}, @@ -27,13 +28,14 @@ enum TestEvent { /// Let's pretend that when we send a remote request, the number gets sent to /// every other instance in the setup as a local request. -fn forward_remote_request_to_other_instances() -> LoopEventHandler, (usize, TestEvent)> -{ - LoopEventHandler::new(|event: (usize, TestEvent), data: &mut Vec, context| { +fn forward_remote_request_to_other_instances( + sender: DelaySender<(usize, TestEvent)>, +) -> LoopEventHandler, (usize, TestEvent)> { + LoopEventHandler::new(move |event: (usize, TestEvent), data: &mut Vec| { if let TestEvent::RemoteRequest(number) = event.1 { for i in 0..data.len() { if i != event.0 { - context.sender.send((i, TestEvent::LocalRequest(SumRequest::Number(number)))) + sender.send((i, TestEvent::LocalRequest(SumRequest::Number(number)))) } } Ok(()) @@ -58,7 +60,7 @@ fn test_multi_instance() { } let sender = builder.sender(); let mut test = builder.build(data); - test.register_handler(forward_remote_request_to_other_instances()); + test.register_handler(forward_remote_request_to_other_instances(test.sender())); for i in 0..5 { // Single-instance handlers can be reused for multi-instance tests. test.register_handler(forward_sum_request().widen().for_index(i)); diff --git a/core/async/src/examples/timed_component.rs b/core/async/src/examples/timed_component.rs deleted file mode 100644 index 373a486af3b..00000000000 --- a/core/async/src/examples/timed_component.rs +++ /dev/null @@ -1,28 +0,0 @@ -use crate::messaging::Sender; - -pub(crate) struct TimedComponent { - buffered_messages: Vec, - message_sender: Sender>, -} - -/// Mimics a component that has a specific function that is supposed to be -/// triggered by a timer. -impl TimedComponent { - pub fn new(message_sender: Sender>) -> Self { - Self { buffered_messages: vec![], message_sender } - } - - pub fn send_message(&mut self, msg: String) { - self.buffered_messages.push(msg); - } - - /// This is supposed to be triggered by a timer so it flushes the - /// messages every tick. - pub fn flush(&mut self) { - if self.buffered_messages.is_empty() { - return; - } - self.message_sender.send(self.buffered_messages.clone()); - self.buffered_messages.clear(); - } -} diff --git a/core/async/src/examples/timed_component_test.rs b/core/async/src/examples/timed_component_test.rs deleted file mode 100644 index 4d6c677a718..00000000000 --- a/core/async/src/examples/timed_component_test.rs +++ /dev/null @@ -1,63 +0,0 @@ -use crate::time; -use derive_enum_from_into::{EnumFrom, EnumTryInto}; - -use crate::{ - messaging::IntoSender, - test_loop::event_handler::{capture_events, interval, LoopEventHandler}, -}; - -use super::timed_component::TimedComponent; - -#[derive(Debug, Clone, PartialEq)] -struct Flush; - -#[derive(Debug, EnumTryInto, EnumFrom)] -enum TestEvent { - SendMessage(String), - Flush(Flush), - MessageSent(Vec), -} - -#[derive(derive_more::AsMut, derive_more::AsRef)] -struct TestData { - component: TimedComponent, - messages_sent: Vec>, -} - -fn forward_send_message() -> LoopEventHandler { - LoopEventHandler::new_simple(|event, data: &mut TimedComponent| { - data.send_message(event); - }) -} - -#[test] -fn test_timed_component() { - let builder = crate::test_loop::TestLoopBuilder::::new(); - let data = TestData { - component: TimedComponent::new(builder.sender().into_sender()), - messages_sent: vec![], - }; - let sender = builder.sender(); - let mut test = builder.build(data); - test.register_handler(forward_send_message().widen()); - test.register_handler( - interval(time::Duration::milliseconds(100), Flush, |data: &mut TimedComponent| { - data.flush() - }) - .widen(), - ); - test.register_handler(capture_events::>().widen()); - - sender.send_with_delay("Hello".to_string().into(), time::Duration::milliseconds(10)); - sender.send_with_delay("World".to_string().into(), time::Duration::milliseconds(20)); - // The timer fires at 100ms here and flushes "Hello" and "World". - sender.send_with_delay("!".to_string().into(), time::Duration::milliseconds(110)); - // The timer fires again at 200ms here and flushes "!"". - // Further timer events do not send messages. - - test.run_for(time::Duration::seconds(1)); - assert_eq!( - test.data.messages_sent, - vec![vec!["Hello".to_string(), "World".to_string()], vec!["!".to_string()]] - ); -} diff --git a/core/async/src/test_loop.rs b/core/async/src/test_loop.rs index 551bf7a4373..cf25fce6e79 100644 --- a/core/async/src/test_loop.rs +++ b/core/async/src/test_loop.rs @@ -63,18 +63,17 @@ pub mod adhoc; pub mod delay_sender; pub mod event_handler; pub mod futures; -pub mod multi_instance; use self::{ delay_sender::DelaySender, event_handler::LoopEventHandler, futures::{TestLoopFutureSpawner, TestLoopTask}, }; -use crate::test_loop::event_handler::LoopHandlerContext; use crate::time; -use crate::time::Duration; +use crate::time::{Clock, Duration}; use near_o11y::{testonly::init_test_logger, tracing::info}; use serde::Serialize; +use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Mutex; use std::{collections::BinaryHeap, fmt::Debug, sync::Arc}; @@ -109,9 +108,9 @@ pub struct TestLoop { current_time: Duration, /// Fake clock that always returns the virtual time. clock: time::FakeClock, - - /// Handlers are initialized only once, upon the first call to run(). - handlers_initialized: bool, + /// Shutdown flag. When this flag is true, delayed action runners will no + /// longer post any new events to the event loop. + shutting_down: Arc, /// All the event handlers that are registered. We invoke them one by one /// for each event, until one of them handles the event (or panic if no one /// handles it). @@ -121,7 +120,7 @@ pub struct TestLoop { /// An event waiting to be executed, ordered by the due time and then by ID. struct EventInHeap { event: Event, - due: time::Duration, + due: Duration, id: usize, } @@ -190,6 +189,7 @@ pub struct TestLoopBuilder { clock: time::FakeClock, pending_events: Arc>>, pending_events_sender: DelaySender, + shutting_down: Arc, } impl TestLoopBuilder { @@ -207,6 +207,7 @@ impl TestLoopBuilder { pending_events_sender: DelaySender::new(move |event, delay| { pending_events.lock().unwrap().add(event, delay); }), + shutting_down: Arc::new(AtomicBool::new(false)), } } @@ -220,8 +221,20 @@ impl TestLoopBuilder { self.clock.clock() } + /// Returns a flag indicating whether the TestLoop system is being shut down; + /// this is similar to whether the Actix system is shutting down. + pub fn shutting_down(&self) -> Arc { + self.shutting_down.clone() + } + pub fn build(self, data: Data) -> TestLoop { - TestLoop::new(self.pending_events, self.pending_events_sender, self.clock, data) + TestLoop::new( + self.pending_events, + self.pending_events_sender, + self.clock, + self.shutting_down, + data, + ) } } @@ -253,6 +266,7 @@ impl TestLoop { pending_events: Arc>>, sender: DelaySender, clock: time::FakeClock, + shutting_down: Arc, data: Data, ) -> Self { Self { @@ -263,7 +277,7 @@ impl TestLoop { next_event_index: 0, current_time: time::Duration::ZERO, clock, - handlers_initialized: false, + shutting_down, handlers: Vec::new(), } } @@ -272,28 +286,22 @@ impl TestLoop { self.sender.clone() } + pub fn clock(&self) -> Clock { + self.clock.clock() + } + + pub fn shutting_down(&self) -> Arc { + self.shutting_down.clone() + } + /// Registers a new event handler to the test loop. pub fn register_handler(&mut self, handler: LoopEventHandler) { - assert!(!self.handlers_initialized, "Cannot register more handlers after run() is called"); self.handlers.push(handler); } - fn maybe_initialize_handlers(&mut self) { - if self.handlers_initialized { - return; - } - for handler in &mut self.handlers { - handler.init(LoopHandlerContext { - sender: self.sender.clone(), - clock: self.clock.clock(), - }); - } - } - /// Helper to push events we have just received into the heap. fn queue_received_events(&mut self) { for event in self.pending_events.lock().unwrap().events.drain(..) { - info!("Queuing new event at index {}: {:?}", self.next_event_index, event.event); self.events.push(EventInHeap { due: self.current_time + event.delay, event: event.event, @@ -303,29 +311,63 @@ impl TestLoop { } } - /// Runs the test loop for the given duration. This function may be called - /// multiple times, but further test handlers may not be registered after - /// the first call. - pub fn run_for(&mut self, duration: time::Duration) { - self.maybe_initialize_handlers(); - // Push events we have received outside the test or during handler init into the heap. - self.queue_received_events(); - let deadline = self.current_time + duration; + /// Performs the logic to find the next event, advance to its time, and dequeue it. + /// Takes a decider to determine whether to advance time, handle the next event, and/or to stop. + fn advance_till_next_event( + &mut self, + decider: &impl Fn(Option, &mut Data) -> AdvanceDecision, + ) -> Option> { loop { - // Don't execute any more events after the deadline. - match self.events.peek() { - Some(event) => { - if event.due > deadline { - break; - } + // New events may have been sent to the TestLoop from outside, and the previous + // iteration of the loop may have made new futures ready, so queue up any received + // events. + self.queue_received_events(); + + // Now there are two ways an event may be/become available. One is that the event is + // queued into the event loop at a specific time; the other is that some future is + // waiting on our fake clock to advance beyond a specific time. Pick the earliest. + let next_timestamp = { + let next_event_timestamp = self.events.peek().map(|event| event.due); + let next_future_waiter_timestamp = self + .clock + .first_waiter() + .map(|time| time - (self.clock.now() - self.current_time)); + next_event_timestamp + .map(|t1| next_future_waiter_timestamp.map(|t2| t2.min(t1)).unwrap_or(t1)) + .or(next_future_waiter_timestamp) + }; + // If the next event is immediately available (i.e. its time is same as current time), + // just return that event; there's no decision to make (as we only give deciders a + // chance to stop processing if we would advance the clock) and no need to advance time. + if next_timestamp == Some(self.current_time) { + let event = self.events.pop().expect("Programming error in TestLoop"); + assert_eq!(event.due, self.current_time); + return Some(event); + } + // If we reach this point, it means we need to advance the clock. Let the decider choose + // if we should do that, or if we should stop. + let decision = decider(next_timestamp, &mut self.data); + match decision { + AdvanceDecision::AdvanceToNextEvent => { + let next_timestamp = next_timestamp.unwrap(); + self.clock.advance(next_timestamp - self.current_time); + self.current_time = next_timestamp; + // Run the loop again, because if the reason why we advance the clock to this + // time is due to a possible future waiting on the clock, we may or may not get + // another future queued into the TestLoop, so we just check the whole thing + // again. + continue; + } + AdvanceDecision::AdvanceToAndStop(target) => { + self.clock.advance(target - self.current_time); + self.current_time = target; + return None; + } + AdvanceDecision::Stop => { + return None; } - None => break, } - // Process the event. - let event = self.events.pop().unwrap(); - self.process_event(event); } - self.current_time = deadline; } /// Processes the given event, by logging a line first and then finding a handler to run it. @@ -338,8 +380,7 @@ impl TestLoop { }) .unwrap(); info!(target: "test_loop", "TEST_LOOP_EVENT_START {}", start_json); - self.clock.advance(event.due - self.current_time); - self.current_time = event.due; + assert_eq!(self.current_time, event.due); for handler in &mut self.handlers { if let Err(e) = handler.handle(event.event, &mut self.data) { @@ -359,33 +400,42 @@ impl TestLoop { panic!("Unhandled event: {:?}", event.event); } + /// Runs the test loop for the given duration. This function may be called + /// multiple times, but further test handlers may not be registered after + /// the first call. + pub fn run_for(&mut self, duration: Duration) { + let deadline = self.current_time + duration; + while let Some(event) = self.advance_till_next_event(&|next_time, _| { + if let Some(next_time) = next_time { + if next_time <= deadline { + return AdvanceDecision::AdvanceToNextEvent; + } + } + AdvanceDecision::AdvanceToAndStop(deadline) + }) { + self.process_event(event); + } + } + /// Run until the given condition is true, asserting that it happens before the maximum duration /// is reached. /// /// To maximize logical consistency, the condition is only checked before the clock would /// advance. If it returns true, execution stops before advancing the clock. pub fn run_until(&mut self, condition: impl Fn(&mut Data) -> bool, maximum_duration: Duration) { - self.maybe_initialize_handlers(); - // Push events we have received outside the test or during handler init into the heap. - self.queue_received_events(); let deadline = self.current_time + maximum_duration; - loop { - // Don't execute any more events after the deadline. - match self.events.peek() { - Some(event) => { - if event.due > deadline { - panic!("run_until did not fulfill the condition within the given deadline"); - } - if event.due > self.current_time { - if condition(&mut self.data) { - return; - } - } + let decider = |next_time, data: &mut Data| { + if condition(data) { + return AdvanceDecision::Stop; + } + if let Some(next_time) = next_time { + if next_time <= deadline { + return AdvanceDecision::AdvanceToNextEvent; } - None => break, } - // Process the event. - let event = self.events.pop().unwrap(); + panic!("run_until did not fulfill the condition within the given deadline"); + }; + while let Some(event) = self.advance_till_next_event(&decider) { self.process_event(event); } } @@ -393,41 +443,11 @@ impl TestLoop { /// Used to finish off remaining events that are still in the loop. This can be necessary if the /// destructor of some components wait for certain condition to become true. Otherwise, the /// destructors may end up waiting forever. This also helps avoid a panic when destructing - /// TestLoop itself, as it asserts that all important events have been handled. - /// - /// Note that events that are droppable are dropped and not handled. It would not be consistent - /// to continue using the TestLoop, and therefore it is consumed by this function. - pub fn finish_remaining_events(mut self, maximum_duration: Duration) { - self.maybe_initialize_handlers(); - // Push events we have received outside the test or during handler init into the heap. - self.queue_received_events(); - let max_time = self.current_time + maximum_duration; - 'outer: loop { - // Don't execute any more events after the deadline. - match self.events.peek() { - Some(event) => { - if event.due > max_time { - panic!( - "finish_remaining_events could not finish all events; \ - event still remaining: {:?}", - event.event - ); - } - } - None => break, - } - // Only execute the event if we can't drop it. - let mut event = self.events.pop().unwrap(); - for handler in &self.handlers { - if let Err(e) = handler.try_drop(event.event) { - event.event = e; - } else { - continue 'outer; - } - } - // Process the event. - self.process_event(event); - } + /// TestLoop itself, as it asserts that all events have been handled. + pub fn shutdown_and_drain_remaining_events(mut self, maximum_duration: Duration) { + self.shutting_down.store(true, Ordering::Relaxed); + self.run_for(maximum_duration); + // Implicitly dropped here, which asserts that no more events are remaining. } pub fn run_instant(&mut self) { @@ -445,20 +465,81 @@ impl TestLoop { impl Drop for TestLoop { fn drop(&mut self) { self.queue_received_events(); - 'outer: for event in self.events.drain() { - let mut to_handle = event.event; - for handler in &mut self.handlers { - if let Err(e) = handler.try_drop(to_handle) { - to_handle = e; - } else { - continue 'outer; - } - } + if let Some(event) = self.events.pop() { panic!( - "Important event scheduled at {} is not handled at the end of the test: {:?}. - Consider calling `test.run()` again, or with a longer duration.", - event.due, to_handle + "Event scheduled at {} is not handled at the end of the test: {:?}. + Consider calling `test.shutdown_and_drain_remaining_events(...)`.", + event.due, event.event ); } } } + +enum AdvanceDecision { + AdvanceToNextEvent, + AdvanceToAndStop(Duration), + Stop, +} + +#[cfg(test)] +mod tests { + use crate::futures::FutureSpawnerExt; + use crate::test_loop::futures::{drive_futures, TestLoopTask}; + use crate::test_loop::TestLoopBuilder; + use derive_enum_from_into::{EnumFrom, EnumTryInto}; + use derive_more::AsMut; + use std::sync::atomic::{AtomicUsize, Ordering}; + use std::sync::Arc; + use time::Duration; + + #[derive(Debug, EnumFrom, EnumTryInto)] + enum TestEvent { + Task(Arc), + } + + #[derive(AsMut)] + struct TestData { + dummy: (), + } + + // Tests that the TestLoop correctly handles futures that sleep on the fake clock. + #[test] + fn test_futures() { + let builder = TestLoopBuilder::::new(); + let clock = builder.clock(); + let mut test = builder.build::(TestData { dummy: () }); + test.register_handler(drive_futures().widen()); + let start_time = clock.now(); + + let finished = Arc::new(AtomicUsize::new(0)); + + let clock1 = clock.clone(); + let finished1 = finished.clone(); + test.sender().into_future_spawner().spawn("test1", async move { + assert_eq!(clock1.now(), start_time); + clock1.sleep(Duration::seconds(10)).await; + assert_eq!(clock1.now(), start_time + Duration::seconds(10)); + clock1.sleep(Duration::seconds(5)).await; + assert_eq!(clock1.now(), start_time + Duration::seconds(15)); + finished1.fetch_add(1, Ordering::Relaxed); + }); + + test.run_for(Duration::seconds(2)); + + let clock2 = clock; + let finished2 = finished.clone(); + test.sender().into_future_spawner().spawn("test2", async move { + assert_eq!(clock2.now(), start_time + Duration::seconds(2)); + clock2.sleep(Duration::seconds(3)).await; + assert_eq!(clock2.now(), start_time + Duration::seconds(5)); + clock2.sleep(Duration::seconds(20)).await; + assert_eq!(clock2.now(), start_time + Duration::seconds(25)); + finished2.fetch_add(1, Ordering::Relaxed); + }); + // During these 30 virtual seconds, the TestLoop should've automatically advanced the clock + // to wake each future as they become ready to run again. The code inside the futures + // assert that the fake clock does indeed have the expected times. + test.run_for(Duration::seconds(30)); + assert_eq!(finished.load(Ordering::Relaxed), 2); + } +} diff --git a/core/async/src/test_loop/adhoc.rs b/core/async/src/test_loop/adhoc.rs index dee459cffa0..29a3847d645 100644 --- a/core/async/src/test_loop/adhoc.rs +++ b/core/async/src/test_loop/adhoc.rs @@ -52,8 +52,7 @@ impl> + 'static> AdhocEventSender() -> LoopEventHandler> { - LoopEventHandler::new(|event: AdhocEvent, data, _ctx| { + LoopEventHandler::new_simple(|event: AdhocEvent, data| { (event.handler)(data); - Ok(()) }) } diff --git a/core/async/src/test_loop/delay_sender.rs b/core/async/src/test_loop/delay_sender.rs index e5baa3f5009..a3d0dd918b4 100644 --- a/core/async/src/test_loop/delay_sender.rs +++ b/core/async/src/test_loop/delay_sender.rs @@ -7,6 +7,7 @@ use crate::test_loop::futures::{ }; use crate::time; use crate::time::Duration; +use std::sync::atomic::AtomicBool; use std::sync::Arc; use super::futures::{TestLoopFutureSpawner, TestLoopTask}; @@ -70,11 +71,14 @@ impl DelaySender { self.into_sender().break_apart().into_multi_sender() } - pub fn into_delayed_action_runner(self) -> TestLoopDelayedActionRunner + pub fn into_delayed_action_runner( + self, + shutting_down: Arc, + ) -> TestLoopDelayedActionRunner where Event: From> + 'static, { - TestLoopDelayedActionRunner { sender: self.narrow() } + TestLoopDelayedActionRunner { sender: self.narrow(), shutting_down } } /// Returns a FutureSpawner that can be used to spawn futures into the loop. diff --git a/core/async/src/test_loop/event_handler.rs b/core/async/src/test_loop/event_handler.rs index c8f5897ccfe..e9479d02744 100644 --- a/core/async/src/test_loop/event_handler.rs +++ b/core/async/src/test_loop/event_handler.rs @@ -1,81 +1,27 @@ -use super::{delay_sender::DelaySender, multi_instance::IndexedLoopEventHandler}; -use crate::time; - -/// Context given to the loop handler on each call. -pub struct LoopHandlerContext { - /// The sender that can be used to send more messages to the loop. - pub sender: DelaySender, - /// The clock whose .now() returns the current virtual time maintained by - /// the test loop. - pub clock: time::Clock, -} - /// An event handler registered on a test loop. Each event handler usually /// handles only some events, so we will usually have multiple event handlers /// registered to cover all event types. -pub struct LoopEventHandler { - inner: Box>, -} +pub struct LoopEventHandler( + Box Result<(), Event>>, +); impl LoopEventHandler { /// Creates a handler from the handling logic function. The function is /// called on each event. It should return Ok(()) if the event was handled, /// or Err(event) if the event was not handled (which will cause it to be /// passed to the next handler). - pub fn new( - handler: impl FnMut(Event, &mut Data, &LoopHandlerContext) -> Result<(), Event> + 'static, - ) -> Self { - Self { - inner: Box::new(LoopEventHandlerImplByFunction { - initial_event: None, - handler: Box::new(handler), - ok_to_drop: Box::new(|_| false), - context: None, - }), - } + pub fn new(handler: impl FnMut(Event, &mut Data) -> Result<(), Event> + 'static) -> Self { + Self(Box::new(handler)) } - /// Like new(), but the handler function is only given an event and data, - /// without the context, and also without the ability to reject the event. + /// Like new(), but the handler is not given the ability to reject the event. pub fn new_simple(mut handler: impl FnMut(Event, &mut Data) + 'static) -> Self { - Self::new(move |event, data, _| { + Self::new(move |event, data| { handler(event, data); Ok(()) }) } - pub fn new_with_drop( - handler: impl FnMut(Event, &mut Data, &LoopHandlerContext) -> Result<(), Event> + 'static, - ok_to_drop: impl Fn(&Event) -> bool + 'static, - ) -> Self { - Self { - inner: Box::new(LoopEventHandlerImplByFunction { - initial_event: None, - handler: Box::new(handler), - ok_to_drop: Box::new(ok_to_drop), - context: None, - }), - } - } - - /// Like new(), but additionally sends an initial event with an initial - /// delay. See periodic_interval() for why this is useful. - pub fn new_with_initial_event( - initial_event: Event, - initial_delay: time::Duration, - handler: impl FnMut(Event, &mut Data, &LoopHandlerContext) -> Result<(), Event> + 'static, - ok_to_drop: impl Fn(&Event) -> bool + 'static, - ) -> Self { - Self { - inner: Box::new(LoopEventHandlerImplByFunction { - initial_event: Some((initial_event, initial_delay)), - handler: Box::new(handler), - ok_to_drop: Box::new(ok_to_drop), - context: None, - }), - } - } - /// Adapts this handler to a handler whose data is a superset of our data /// and whose event is a superset of our event. /// For data, A is a superset of B if A implements AsRef and AsMut. @@ -85,78 +31,31 @@ impl LoopEventHandler { OuterData: AsMut, OuterEvent: TryIntoOrSelf + From + 'static, >( - self, + mut self, ) -> LoopEventHandler { - LoopEventHandler { inner: Box::new(WideningEventHandler(self)) } + LoopEventHandler(Box::new(move |event, data| { + let mut inner_data = data.as_mut(); + let inner_event = event.try_into_or_self()?; + self.0(inner_event, &mut inner_data)?; + Ok(()) + })) } /// Adapts this handler to a handler whose data is a vector of our data, /// and whose event is a is the tuple (index, our event), for a specific /// index. - pub fn for_index(self, index: usize) -> LoopEventHandler, (usize, Event)> { - LoopEventHandler { inner: Box::new(IndexedLoopEventHandler { inner: self, index }) } - } - - pub(crate) fn init(&mut self, context: LoopHandlerContext) { - self.inner.init(context) + pub fn for_index(mut self, index: usize) -> LoopEventHandler, (usize, Event)> { + LoopEventHandler(Box::new(move |event, data| { + if event.0 == index { + self.0(event.1, &mut data[index]).map_err(|event| (index, event)) + } else { + Err(event) + } + })) } pub(crate) fn handle(&mut self, event: Event, data: &mut Data) -> Result<(), Event> { - self.inner.handle(event, data) - } - - pub(crate) fn try_drop(&self, event: Event) -> Result<(), Event> { - self.inner.try_drop(event) - } -} - -/// Internal implementation of LoopEventHandler. -pub(crate) trait LoopEventHandlerImpl { - /// init is called when the test loop runs for the first time. - fn init(&mut self, context: LoopHandlerContext); - /// handle is called when we have a pending event from the test loop. - fn handle(&mut self, event: Event, data: &mut Data) -> Result<(), Event>; - /// try_drop is called when the TestLoop is dropped, but an event - /// remains in the event queue. If this handler knows that it's OK to - /// drop the event, it should return Ok(()); otherwise it should return - /// the original event as an Err. - /// - /// This is basically used for periodic timers, as it's OK to drop timers, - /// but not OK to drop an event that forgot to be handled. - fn try_drop(&self, event: Event) -> Result<(), Event>; -} - -/// Implementation of LoopEventHandlerImpl by a closure. We cache the context -/// upon receiving the init() call, so that we can pass a reference to the -/// closure every time we receive the handle() call. -struct LoopEventHandlerImplByFunction { - initial_event: Option<(Event, time::Duration)>, - handler: Box) -> Result<(), Event>>, - ok_to_drop: Box bool>, - context: Option>, -} - -impl LoopEventHandlerImpl - for LoopEventHandlerImplByFunction -{ - fn init(&mut self, context: LoopHandlerContext) { - if let Some((event, delay)) = self.initial_event.take() { - context.sender.send_with_delay(event, delay); - } - self.context = Some(context); - } - - fn handle(&mut self, event: Event, data: &mut Data) -> Result<(), Event> { - let context = self.context.as_ref().unwrap(); - (self.handler)(event, data, context) - } - - fn try_drop(&self, event: Event) -> Result<(), Event> { - if (self.ok_to_drop)(&event) { - Ok(()) - } else { - Err(event) - } + self.0(event, data) } } @@ -172,34 +71,6 @@ impl> TryIntoOrSelf for T { } } -/// Implements .widen() for an event handler. -struct WideningEventHandler(LoopEventHandler); - -impl< - Data, - Event, - OuterData: AsMut, - OuterEvent: TryIntoOrSelf + From + 'static, - > LoopEventHandlerImpl for WideningEventHandler -{ - fn init(&mut self, context: LoopHandlerContext) { - self.0.init(LoopHandlerContext { sender: context.sender.narrow(), clock: context.clock }) - } - - fn handle(&mut self, event: OuterEvent, data: &mut OuterData) -> Result<(), OuterEvent> { - let mut inner_data = data.as_mut(); - let inner_event = event.try_into_or_self()?; - self.0.handle(inner_event, &mut inner_data)?; - Ok(()) - } - - fn try_drop(&self, event: OuterEvent) -> Result<(), OuterEvent> { - let inner_event = event.try_into_or_self()?; - self.0.try_drop(inner_event)?; - Ok(()) - } -} - /// An event handler that puts the event into a vector in the Data, as long as /// the Data contains a Vec. (Use widen() right after). /// @@ -213,28 +84,3 @@ pub fn capture_events() -> LoopEventHandler, Event> { pub fn ignore_events() -> LoopEventHandler<(), Event> { LoopEventHandler::new_simple(|_, _| {}) } - -/// Periodically sends to the event loop the given event by the given interval. -/// Each time this event is handled, the given function is called. -/// The first invocation is triggered after the interval, not immediately. -pub fn interval( - interval: time::Duration, - event: Event, - func: impl Fn(&mut Data) + 'static, -) -> LoopEventHandler { - let event_cloned = event.clone(); - LoopEventHandler::new_with_initial_event( - event.clone(), - interval, - move |actual_event, data, context| { - if actual_event == event { - func(data); - context.sender.send_with_delay(actual_event, interval); - Ok(()) - } else { - Err(actual_event) - } - }, - move |actual_event| actual_event == &event_cloned, - ) -} diff --git a/core/async/src/test_loop/futures.rs b/core/async/src/test_loop/futures.rs index 9282ff7b795..ccf49414943 100644 --- a/core/async/src/test_loop/futures.rs +++ b/core/async/src/test_loop/futures.rs @@ -1,10 +1,12 @@ -use super::{delay_sender::DelaySender, event_handler::LoopEventHandler}; +use super::{delay_sender::DelaySender, event_handler::LoopEventHandler, TestLoop}; use crate::futures::{AsyncComputationSpawner, DelayedActionRunner}; +use crate::test_loop::event_handler::TryIntoOrSelf; use crate::time::Duration; use crate::{futures::FutureSpawner, messaging::CanSend}; use futures::future::BoxFuture; use futures::task::{waker_ref, ArcWake}; use std::fmt::Debug; +use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::{Arc, Mutex}; use std::task::Context; @@ -113,27 +115,24 @@ impl Debug for TestLoopDelayedActionEvent { /// An event handler that handles only `TestLoopDelayedActionEvent`s, by /// running the action encapsulated in the event. -pub fn drive_delayed_action_runners() -> LoopEventHandler> { - LoopEventHandler::new_with_drop( - |event, data, ctx| { - let mut runner = TestLoopDelayedActionRunner { sender: ctx.sender.clone() }; - (event.action)(data, &mut runner); - Ok(()) - }, - |_| { - // Delayed actions are usually used for timers, so let's just say - // it's OK to drop them at the end of the test. It would be hard - // to distinguish what sort of delayed action was being scheduled - // anyways. - true - }, - ) +pub fn drive_delayed_action_runners( + sender: DelaySender>, + shutting_down: Arc, +) -> LoopEventHandler> { + LoopEventHandler::new_simple(move |event: TestLoopDelayedActionEvent, data: &mut T| { + let mut runner = TestLoopDelayedActionRunner { + sender: sender.clone(), + shutting_down: shutting_down.clone(), + }; + (event.action)(data, &mut runner); + }) } /// `DelayedActionRunner` that schedules the action to be run later by the /// TestLoop event loop. pub struct TestLoopDelayedActionRunner { pub(crate) sender: DelaySender>, + pub(crate) shutting_down: Arc, } impl DelayedActionRunner for TestLoopDelayedActionRunner { @@ -143,6 +142,9 @@ impl DelayedActionRunner for TestLoopDelayedActionRunner { dur: Duration, action: Box) + Send + 'static>, ) { + if self.shutting_down.load(Ordering::Relaxed) { + return; + } self.sender.send_with_delay( TestLoopDelayedActionEvent { name: name.to_string(), action }, dur.try_into().unwrap(), @@ -150,6 +152,43 @@ impl DelayedActionRunner for TestLoopDelayedActionRunner { } } +impl TestLoop { + /// Shorthand for registering this frequently used handler. + pub fn register_delayed_action_handler(&mut self) + where + T: 'static, + Data: AsMut, + Event: TryIntoOrSelf> + + From> + + 'static, + { + self.register_handler( + drive_delayed_action_runners::(self.sender().narrow(), self.shutting_down()).widen(), + ); + } +} + +impl TestLoop, (usize, Event)> { + /// Shorthand for registering this frequently used handler for a multi-instance test. + pub fn register_delayed_action_handler_for_index(&mut self, idx: usize) + where + T: 'static, + Data: AsMut, + Event: TryIntoOrSelf> + + From> + + 'static, + { + self.register_handler( + drive_delayed_action_runners::( + self.sender().for_index(idx).narrow(), + self.shutting_down(), + ) + .widen() + .for_index(idx), + ); + } +} + /// An event that represents async computation. See async_computation_spawner() in DelaySender. pub struct TestLoopAsyncComputationEvent { name: String, diff --git a/core/async/src/test_loop/multi_instance.rs b/core/async/src/test_loop/multi_instance.rs deleted file mode 100644 index 2a84f2b3b2c..00000000000 --- a/core/async/src/test_loop/multi_instance.rs +++ /dev/null @@ -1,42 +0,0 @@ -use super::event_handler::{LoopEventHandler, LoopEventHandlerImpl, LoopHandlerContext}; - -/// Event handler that handles a specific single instance in a multi-instance -/// setup. -/// -/// To convert a single-instance handler to a multi-instance handler -/// (for one instance), use handler.for_index(index). -pub(crate) struct IndexedLoopEventHandler { - pub(crate) inner: LoopEventHandler, - pub(crate) index: usize, -} - -impl LoopEventHandlerImpl, (usize, Event)> - for IndexedLoopEventHandler -{ - fn init(&mut self, context: LoopHandlerContext<(usize, Event)>) { - self.inner.init(LoopHandlerContext { - sender: context.sender.for_index(self.index), - clock: context.clock, - }) - } - - fn handle( - &mut self, - event: (usize, Event), - data: &mut Vec, - ) -> Result<(), (usize, Event)> { - if event.0 == self.index { - self.inner.handle(event.1, &mut data[self.index]).map_err(|event| (self.index, event)) - } else { - Err(event) - } - } - - fn try_drop(&self, event: (usize, Event)) -> Result<(), (usize, Event)> { - if event.0 == self.index { - self.inner.try_drop(event.1).map_err(|event| (self.index, event)) - } else { - Err(event) - } - } -} diff --git a/core/async/src/time.rs b/core/async/src/time.rs index 4c4762a3d8f..2c61bd3333e 100644 --- a/core/async/src/time.rs +++ b/core/async/src/time.rs @@ -21,9 +21,10 @@ //! of different machines are not perfectly synchronized, and in extreme //! cases can be totally skewed. use once_cell::sync::Lazy; +use std::cmp::Ordering; +use std::collections::BinaryHeap; use std::sync::{Arc, Mutex}; pub use time::error; -use tokio::sync::watch; // TODO: consider wrapping these types to prevent interactions // with other time libraries, especially to prevent the direct access @@ -124,23 +125,46 @@ impl Clock { } struct FakeClockInner { - /// `mono` keeps the current time of the monotonic clock. - /// It is wrapped in watch::Sender, so that the value can - /// be observed from the clock::sleep() futures. - mono: watch::Sender, utc: Utc, - /// We need to keep it so that mono.send() always succeeds. - _mono_recv: watch::Receiver, + instant: Instant, + waiters: BinaryHeap, +} + +/// Whenever a user of a FakeClock calls `sleep` for `sleep_until`, we create a +/// `ClockWaiterInHeap` so that the returned future can be completed when the +/// clock advances past the desired deadline. +struct ClockWaiterInHeap { + deadline: Instant, + waker: tokio::sync::oneshot::Sender<()>, +} + +impl PartialEq for ClockWaiterInHeap { + fn eq(&self, other: &Self) -> bool { + self.deadline == other.deadline + } +} + +impl PartialOrd for ClockWaiterInHeap { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Eq for ClockWaiterInHeap {} + +impl Ord for ClockWaiterInHeap { + fn cmp(&self, other: &Self) -> Ordering { + other.deadline.cmp(&self.deadline) + } } impl FakeClockInner { pub fn new(utc: Utc) -> Self { - let (mono, _mono_recv) = watch::channel(*FAKE_CLOCK_MONO_START); - Self { utc, mono, _mono_recv } + Self { utc, instant: *FAKE_CLOCK_MONO_START, waiters: BinaryHeap::new() } } pub fn now(&mut self) -> Instant { - *self.mono.borrow() + self.instant } pub fn now_utc(&mut self) -> Utc { self.utc @@ -150,17 +174,19 @@ impl FakeClockInner { if d == Duration::ZERO { return; } - let now = *self.mono.borrow(); - self.mono.send(now + d).unwrap(); + self.instant += d; self.utc += d; + while let Some(earliest_waiter) = self.waiters.peek() { + if earliest_waiter.deadline <= self.instant { + self.waiters.pop().unwrap().waker.send(()).ok(); + } else { + break; + } + } } pub fn advance_until(&mut self, t: Instant) { - let now = *self.mono.borrow(); - if t <= now { - return; - } - self.mono.send(t).unwrap(); - self.utc += t - now; + let by = t - self.now(); + self.advance(by); } } @@ -198,19 +224,40 @@ impl FakeClock { /// Cancel-safe. pub async fn sleep(&self, d: Duration) { - let mut watch = self.0.lock().unwrap().mono.subscribe(); - let t = *watch.borrow() + d; - while *watch.borrow() < t { - watch.changed().await.unwrap(); + if d <= Duration::ZERO { + return; } + let receiver = { + let mut inner = self.0.lock().unwrap(); + let (sender, receiver) = tokio::sync::oneshot::channel(); + let waiter = ClockWaiterInHeap { waker: sender, deadline: inner.now() + d }; + inner.waiters.push(waiter); + receiver + }; + receiver.await.unwrap(); } /// Cancel-safe. pub async fn sleep_until(&self, t: Instant) { - let mut watch = self.0.lock().unwrap().mono.subscribe(); - while *watch.borrow() < t { - watch.changed().await.unwrap(); - } + let receiver = { + let mut inner = self.0.lock().unwrap(); + if inner.now() >= t { + return; + } + let (sender, receiver) = tokio::sync::oneshot::channel(); + let waiter = ClockWaiterInHeap { waker: sender, deadline: t }; + inner.waiters.push(waiter); + receiver + }; + receiver.await.unwrap(); + } + + /// Returns the earliest waiter, or None if no one is waiting on the clock. + /// The returned instant is guaranteed to be <= any waiter that is currently + /// waiting on the clock to advance. + pub fn first_waiter(&self) -> Option { + let inner = self.0.lock().unwrap(); + inner.waiters.peek().map(|waiter| waiter.deadline) } } diff --git a/core/chain-configs/src/client_config.rs b/core/chain-configs/src/client_config.rs index 0f8dcf3b40f..c10f837509d 100644 --- a/core/chain-configs/src/client_config.rs +++ b/core/chain-configs/src/client_config.rs @@ -488,7 +488,7 @@ impl ClientConfig { assert!( archive || save_trie_changes, "Configuration with archive = false and save_trie_changes = false is not supported \ - because non-archival nodes must save trie changes in order to do do garbage collection." + because non-archival nodes must save trie changes in order to do garbage collection." ); Self { diff --git a/core/o11y/Cargo.toml b/core/o11y/Cargo.toml index 45f5cc0cc97..2bbcc417a59 100644 --- a/core/o11y/Cargo.toml +++ b/core/o11y/Cargo.toml @@ -21,6 +21,7 @@ base64.workspace = true clap.workspace = true once_cell.workspace = true opentelemetry.workspace = true +opentelemetry_sdk.workspace = true opentelemetry-otlp.workspace = true opentelemetry-semantic-conventions.workspace = true prometheus.workspace = true diff --git a/core/o11y/src/log_config.rs b/core/o11y/src/log_config.rs index 304724cf699..447ead4709a 100644 --- a/core/o11y/src/log_config.rs +++ b/core/o11y/src/log_config.rs @@ -1,4 +1,3 @@ -use crate::OpenTelemetryLevel; use serde::{Deserialize, Serialize}; use std::path::Path; use std::{fs::File, io::Write}; @@ -12,7 +11,15 @@ pub struct LogConfig { /// Some("module") enables debug logging for "module". pub verbose_module: Option, /// Verbosity level of collected traces. - pub opentelemetry_level: Option, + /// + /// This is similar to `rust_log` but rather than + /// [`EnvFilter`](tracing_subscriber::filter::EnvFilter) it uses a simpler + /// [`Targets`](tracing_subscriber::filter::targets::Targets) filter. + /// + /// You can use the usual `debug` or `info` to set the same level for all spans, or customize + /// individual spans with something like `debug,store::trie=trace` to have specific targets be + /// more verbose than the default. + pub opentelemetry: Option, } impl LogConfig { diff --git a/core/o11y/src/metrics.rs b/core/o11y/src/metrics.rs index 80a4395d142..d209bb67cd0 100644 --- a/core/o11y/src/metrics.rs +++ b/core/o11y/src/metrics.rs @@ -70,8 +70,8 @@ use once_cell::sync::Lazy; pub use prometheus::{ self, core::MetricVec, core::MetricVecBuilder, exponential_buckets, linear_buckets, Counter, - Encoder, Gauge, GaugeVec, Histogram, HistogramOpts, HistogramVec, IntCounter, IntCounterVec, - IntGauge, IntGaugeVec, Opts, Result, TextEncoder, + CounterVec, Encoder, Gauge, GaugeVec, Histogram, HistogramOpts, HistogramVec, IntCounter, + IntCounterVec, IntGauge, IntGaugeVec, Opts, Result, TextEncoder, }; use std::collections::HashSet; @@ -114,6 +114,16 @@ pub fn try_create_counter(name: &str, help: &str) -> Result { Ok(counter) } +/// Attempts to crate an `Counter`, returning `Err` if the registry does not accept the counter +/// (potentially due to naming conflict). +pub fn try_create_counter_vec(name: &str, help: &str, labels: &[&str]) -> Result { + check_metric_near_prefix(name)?; + let opts = Opts::new(name, help); + let counter = CounterVec::new(opts, labels)?; + prometheus::register(Box::new(counter.clone()))?; + Ok(counter) +} + /// Attempts to crate an `IntGauge`, returning `Err` if the registry does not accept the gauge /// (potentially due to naming conflict). pub fn try_create_int_gauge(name: &str, help: &str) -> Result { diff --git a/core/o11y/src/opentelemetry.rs b/core/o11y/src/opentelemetry.rs index 6771818fee8..a29dbf9c503 100644 --- a/core/o11y/src/opentelemetry.rs +++ b/core/o11y/src/opentelemetry.rs @@ -1,17 +1,18 @@ use crate::reload::TracingLayer; use near_crypto::PublicKey; use near_primitives_core::types::AccountId; -use opentelemetry::sdk::trace::{self, IdGenerator, Sampler}; -use opentelemetry::sdk::Resource; use opentelemetry::KeyValue; +use opentelemetry_sdk::trace::{self, RandomIdGenerator, Sampler}; +use opentelemetry_sdk::Resource; use opentelemetry_semantic_conventions::resource::SERVICE_NAME; use tracing::level_filters::LevelFilter; +use tracing_subscriber::filter::targets::Targets; use tracing_subscriber::layer::SubscriberExt; use tracing_subscriber::registry::LookupSpan; use tracing_subscriber::{reload, Layer}; // Doesn't define WARN and ERROR, because the highest verbosity of spans is INFO. -#[derive(Copy, Clone, Debug, Default, clap::ValueEnum, serde::Serialize, serde::Deserialize)] +#[derive(Copy, Clone, Debug, Default, clap::ValueEnum)] pub enum OpenTelemetryLevel { #[default] OFF, @@ -30,12 +31,12 @@ pub(crate) async fn add_opentelemetry_layer( node_public_key: PublicKey, account_id: Option, subscriber: S, -) -> (TracingLayer, reload::Handle) +) -> (TracingLayer, reload::Handle) where S: tracing::Subscriber + for<'span> LookupSpan<'span> + Send + Sync, { let filter = get_opentelemetry_filter(opentelemetry_level); - let (filter, handle) = reload::Layer::::new(filter); + let (filter, handle) = reload::Layer::::new(filter); let mut resource = vec![ KeyValue::new("chain_id", chain_id), @@ -51,26 +52,36 @@ where }; resource.push(KeyValue::new(SERVICE_NAME, service_name)); + let overriding_vars = ["OTEL_BSP_MAX_CONCURRENT_EXPORTS", "OTEL_BSP_MAX_QUEUE_SIZE"]; + let batch_config = if overriding_vars.iter().any(|v| std::env::var_os(v).is_some()) { + opentelemetry_sdk::trace::BatchConfigBuilder::default() + } else { + opentelemetry_sdk::trace::BatchConfigBuilder::default() + .with_max_concurrent_exports(2) + .with_max_queue_size(4096) + } + .build(); let tracer = opentelemetry_otlp::new_pipeline() .tracing() .with_exporter(opentelemetry_otlp::new_exporter().tonic()) .with_trace_config( trace::config() .with_sampler(Sampler::AlwaysOn) - .with_id_generator(IdGenerator::default()) + .with_id_generator(RandomIdGenerator::default()) .with_resource(Resource::new(resource)), ) - .install_batch(opentelemetry::runtime::Tokio) + .with_batch_config(batch_config) + .install_batch(opentelemetry_sdk::runtime::Tokio) .unwrap(); let layer = tracing_opentelemetry::layer().with_tracer(tracer).with_filter(filter); (subscriber.with(layer), handle) } -pub(crate) fn get_opentelemetry_filter(opentelemetry_level: OpenTelemetryLevel) -> LevelFilter { - match opentelemetry_level { +pub(crate) fn get_opentelemetry_filter(opentelemetry_level: OpenTelemetryLevel) -> Targets { + Targets::new().with_default(match opentelemetry_level { OpenTelemetryLevel::OFF => LevelFilter::OFF, OpenTelemetryLevel::INFO => LevelFilter::INFO, OpenTelemetryLevel::DEBUG => LevelFilter::DEBUG, OpenTelemetryLevel::TRACE => LevelFilter::TRACE, - } + }) } diff --git a/core/o11y/src/reload.rs b/core/o11y/src/reload.rs index 2e0338606dc..e75e49a36a9 100644 --- a/core/o11y/src/reload.rs +++ b/core/o11y/src/reload.rs @@ -1,11 +1,11 @@ use crate::opentelemetry::get_opentelemetry_filter; use crate::{log_config, log_counter, BuildEnvFilterError, EnvFilterBuilder, OpenTelemetryLevel}; use once_cell::sync::OnceCell; -use opentelemetry::sdk::trace::Tracer; -use tracing::level_filters::LevelFilter; +use opentelemetry_sdk::trace::Tracer; +use std::str::FromStr as _; use tracing_appender::non_blocking::NonBlocking; use tracing_opentelemetry::OpenTelemetryLayer; -use tracing_subscriber::filter::Filtered; +use tracing_subscriber::filter::{Filtered, Targets}; use tracing_subscriber::layer::Layered; use tracing_subscriber::reload::Handle; use tracing_subscriber::{fmt, reload, EnvFilter, Registry}; @@ -14,7 +14,7 @@ static LOG_LAYER_RELOAD_HANDLE: OnceCell< Handle>, > = OnceCell::new(); static OTLP_LAYER_RELOAD_HANDLE: OnceCell< - Handle>>, + Handle>>, > = OnceCell::new(); // Records the level of opentelemetry tracing verbosity configured via command-line flags at the startup. @@ -39,7 +39,7 @@ pub(crate) type SimpleLogLayer = Layered< >; pub(crate) type TracingLayer = Layered< - Filtered, reload::Layer, Inner>, + Filtered, reload::Layer, Inner>, Inner, >; @@ -52,7 +52,7 @@ pub(crate) fn set_log_layer_handle( } pub(crate) fn set_otlp_layer_handle( - handle: Handle>>, + handle: Handle>>, ) { OTLP_LAYER_RELOAD_HANDLE .set(handle) @@ -77,6 +77,8 @@ pub enum ReloadError { ReloadOpentelemetryLayer(#[source] reload::Error), #[error("could not create the log filter")] Parse(#[source] BuildEnvFilterError), + #[error("could not parse the opentelemetry filter")] + ParseOpentelemetry(#[source] tracing_subscriber::filter::ParseError), } pub fn reload_log_config(config: Option<&log_config::LogConfig>) { @@ -84,7 +86,7 @@ pub fn reload_log_config(config: Option<&log_config::LogConfig>) { reload( config.rust_log.as_deref(), config.verbose_module.as_deref(), - config.opentelemetry_level, + config.opentelemetry.as_deref(), ) } else { // When the LOG_CONFIG_FILENAME is not available, reset to the tracing and logging config @@ -116,7 +118,7 @@ pub fn reload_log_config(config: Option<&log_config::LogConfig>) { pub fn reload( rust_log: Option<&str>, verbose_module: Option<&str>, - opentelemetry_level: Option, + opentelemetry: Option<&str>, ) -> Result<(), Vec> { let log_reload_result = LOG_LAYER_RELOAD_HANDLE.get().map_or( Err(ReloadError::NoLogReloadHandle), @@ -137,14 +139,20 @@ pub fn reload( }, ); - let opentelemetry_level = opentelemetry_level - .unwrap_or(*DEFAULT_OTLP_LEVEL.get().unwrap_or(&OpenTelemetryLevel::OFF)); + let opentelemetry_filter = opentelemetry + .map(|f| Targets::from_str(f).map_err(ReloadError::ParseOpentelemetry)) + .unwrap_or_else(|| { + Ok(get_opentelemetry_filter( + *DEFAULT_OTLP_LEVEL.get().unwrap_or(&OpenTelemetryLevel::OFF), + )) + }); let opentelemetry_reload_result = OTLP_LAYER_RELOAD_HANDLE.get().map_or( Err(ReloadError::NoOpentelemetryReloadHandle), |reload_handle| { + let opentelemetry_filter = opentelemetry_filter?; reload_handle .modify(|otlp_filter| { - *otlp_filter = get_opentelemetry_filter(opentelemetry_level); + *otlp_filter = opentelemetry_filter; }) .map_err(ReloadError::ReloadOpentelemetryLayer)?; Ok(()) diff --git a/core/parameters/res/runtime_configs/66.yaml b/core/parameters/res/runtime_configs/66.yaml new file mode 100644 index 00000000000..f6ed018a86a --- /dev/null +++ b/core/parameters/res/runtime_configs/66.yaml @@ -0,0 +1,18 @@ +# These changes aim to increase the capacity of network when executing function calls for small +# smart contracts while still preserving the same gas costs for the largest smart contracts (4MB). +action_function_call: { + old: { + send_sir: 2_319_861_500_000, + send_not_sir: 2_319_861_500_000, + execution: 2_319_861_500_000, + }, + new: { + send_sir: 200_000_000_000, + send_not_sir: 200_000_000_000, + execution: 780_000_000_000, + } +} +wasm_contract_loading_bytes: { + old: 216_750, + new: 1_089_295, +} diff --git a/core/parameters/res/runtime_configs/parameters.snap b/core/parameters/res/runtime_configs/parameters.snap index 88a408b4c85..b9c6932e1fe 100644 --- a/core/parameters/res/runtime_configs/parameters.snap +++ b/core/parameters/res/runtime_configs/parameters.snap @@ -39,9 +39,9 @@ action_deploy_contract_per_byte - send_not_sir: 6_812_999 - execution: 64_572_944 action_function_call -- send_sir: 2_319_861_500_000 -- send_not_sir: 2_319_861_500_000 -- execution: 2_319_861_500_000 +- send_sir: 200_000_000_000 +- send_not_sir: 200_000_000_000 +- execution: 780_000_000_000 action_function_call_per_byte - send_sir: 2_235_934 - send_not_sir: 2_235_934 @@ -78,7 +78,7 @@ wasm_regular_op_cost 822_756 wasm_grow_mem_cost 1 wasm_base 264_768_111 wasm_contract_loading_base 35_445_963 -wasm_contract_loading_bytes 216_750 +wasm_contract_loading_bytes 1_089_295 wasm_read_memory_base 2_609_863_200 wasm_read_memory_byte 3_801_333 wasm_write_memory_base 2_803_794_861 diff --git a/core/parameters/src/config.rs b/core/parameters/src/config.rs index a503fbed6e2..427e1c19924 100644 --- a/core/parameters/src/config.rs +++ b/core/parameters/src/config.rs @@ -41,8 +41,11 @@ impl RuntimeConfig { pub fn test() -> Self { let config_store = super::config_store::RuntimeConfigStore::new(None); - let wasm_config = + let mut wasm_config = crate::vm::Config::clone(&config_store.get_config(PROTOCOL_VERSION).wasm_config); + // Lower the yield timeout length so that we can observe timeouts in integration tests. + wasm_config.limit_config.yield_timeout_length_in_blocks = 10; + RuntimeConfig { fees: RuntimeFeesConfig::test(), wasm_config, diff --git a/core/parameters/src/config_store.rs b/core/parameters/src/config_store.rs index e0cf6d18a2f..eddc92fc35e 100644 --- a/core/parameters/src/config_store.rs +++ b/core/parameters/src/config_store.rs @@ -37,6 +37,7 @@ static CONFIG_DIFFS: &[(ProtocolVersion, &str)] = &[ (62, include_config!("62.yaml")), (63, include_config!("63.yaml")), (64, include_config!("64.yaml")), + (66, include_config!("66.yaml")), (83, include_config!("83.yaml")), (129, include_config!("129.yaml")), // Introduce ETH-implicit accounts. @@ -157,7 +158,8 @@ mod tests { use super::*; use crate::cost::{ActionCosts, ExtCosts}; use near_primitives_core::version::ProtocolFeature::{ - LowerDataReceiptAndEcrecoverBaseCost, LowerStorageCost, LowerStorageKeyLimit, + DecreaseFunctionCallBaseCost, LowerDataReceiptAndEcrecoverBaseCost, LowerStorageCost, + LowerStorageKeyLimit, }; use std::collections::HashSet; @@ -196,6 +198,11 @@ mod tests { fn test_max_prepaid_gas() { let store = RuntimeConfigStore::new(None); for (protocol_version, config) in store.store.iter() { + if *protocol_version >= DecreaseFunctionCallBaseCost.protocol_version() { + continue; + } + + // TODO(#10955): Enforce the depth limit directly, regardless of the gas costs. assert!( config.wasm_config.limit_config.max_total_prepaid_gas / config.fees.min_receipt_with_function_call_gas() diff --git a/core/parameters/src/snapshots/near_parameters__config_store__tests__129.json.snap b/core/parameters/src/snapshots/near_parameters__config_store__tests__129.json.snap index 0dcc901b2d9..27db573c370 100644 --- a/core/parameters/src/snapshots/near_parameters__config_store__tests__129.json.snap +++ b/core/parameters/src/snapshots/near_parameters__config_store__tests__129.json.snap @@ -39,9 +39,9 @@ expression: config_view "execution": 64572944 }, "function_call_cost": { - "send_sir": 2319861500000, - "send_not_sir": 2319861500000, - "execution": 2319861500000 + "send_sir": 200000000000, + "send_not_sir": 200000000000, + "execution": 780000000000 }, "function_call_cost_per_byte": { "send_sir": 2235934, @@ -109,7 +109,7 @@ expression: config_view "ext_costs": { "base": 264768111, "contract_loading_base": 35445963, - "contract_loading_bytes": 216750, + "contract_loading_bytes": 1089295, "read_memory_base": 2609863200, "read_memory_byte": 3801333, "write_memory_base": 2803794861, diff --git a/core/parameters/src/snapshots/near_parameters__config_store__tests__138.json.snap b/core/parameters/src/snapshots/near_parameters__config_store__tests__138.json.snap index 275c62eae4a..05bd3cb37ba 100644 --- a/core/parameters/src/snapshots/near_parameters__config_store__tests__138.json.snap +++ b/core/parameters/src/snapshots/near_parameters__config_store__tests__138.json.snap @@ -39,9 +39,9 @@ expression: config_view "execution": 64572944 }, "function_call_cost": { - "send_sir": 2319861500000, - "send_not_sir": 2319861500000, - "execution": 2319861500000 + "send_sir": 200000000000, + "send_not_sir": 200000000000, + "execution": 780000000000 }, "function_call_cost_per_byte": { "send_sir": 2235934, @@ -109,7 +109,7 @@ expression: config_view "ext_costs": { "base": 264768111, "contract_loading_base": 35445963, - "contract_loading_bytes": 216750, + "contract_loading_bytes": 1089295, "read_memory_base": 2609863200, "read_memory_byte": 3801333, "write_memory_base": 2803794861, diff --git a/core/parameters/src/snapshots/near_parameters__config_store__tests__139.json.snap b/core/parameters/src/snapshots/near_parameters__config_store__tests__139.json.snap index cbe117d48dc..dbab708c54e 100644 --- a/core/parameters/src/snapshots/near_parameters__config_store__tests__139.json.snap +++ b/core/parameters/src/snapshots/near_parameters__config_store__tests__139.json.snap @@ -39,9 +39,9 @@ expression: config_view "execution": 64572944 }, "function_call_cost": { - "send_sir": 2319861500000, - "send_not_sir": 2319861500000, - "execution": 2319861500000 + "send_sir": 200000000000, + "send_not_sir": 200000000000, + "execution": 780000000000 }, "function_call_cost_per_byte": { "send_sir": 2235934, @@ -109,7 +109,7 @@ expression: config_view "ext_costs": { "base": 264768111, "contract_loading_base": 35445963, - "contract_loading_bytes": 216750, + "contract_loading_bytes": 1089295, "read_memory_base": 2609863200, "read_memory_byte": 3801333, "write_memory_base": 2803794861, diff --git a/core/parameters/src/snapshots/near_parameters__config_store__tests__66.json.snap b/core/parameters/src/snapshots/near_parameters__config_store__tests__66.json.snap new file mode 100644 index 00000000000..f1ee5bf82a5 --- /dev/null +++ b/core/parameters/src/snapshots/near_parameters__config_store__tests__66.json.snap @@ -0,0 +1,226 @@ +--- +source: core/parameters/src/config_store.rs +expression: config_view +--- +{ + "storage_amount_per_byte": "10000000000000000000", + "transaction_costs": { + "action_receipt_creation_config": { + "send_sir": 108059500000, + "send_not_sir": 108059500000, + "execution": 108059500000 + }, + "data_receipt_creation_config": { + "base_cost": { + "send_sir": 36486732312, + "send_not_sir": 36486732312, + "execution": 36486732312 + }, + "cost_per_byte": { + "send_sir": 17212011, + "send_not_sir": 17212011, + "execution": 17212011 + } + }, + "action_creation_config": { + "create_account_cost": { + "send_sir": 3850000000000, + "send_not_sir": 3850000000000, + "execution": 3850000000000 + }, + "deploy_contract_cost": { + "send_sir": 184765750000, + "send_not_sir": 184765750000, + "execution": 184765750000 + }, + "deploy_contract_cost_per_byte": { + "send_sir": 6812999, + "send_not_sir": 6812999, + "execution": 64572944 + }, + "function_call_cost": { + "send_sir": 200000000000, + "send_not_sir": 200000000000, + "execution": 780000000000 + }, + "function_call_cost_per_byte": { + "send_sir": 2235934, + "send_not_sir": 2235934, + "execution": 2235934 + }, + "transfer_cost": { + "send_sir": 115123062500, + "send_not_sir": 115123062500, + "execution": 115123062500 + }, + "stake_cost": { + "send_sir": 141715687500, + "send_not_sir": 141715687500, + "execution": 102217625000 + }, + "add_key_cost": { + "full_access_cost": { + "send_sir": 101765125000, + "send_not_sir": 101765125000, + "execution": 101765125000 + }, + "function_call_cost": { + "send_sir": 102217625000, + "send_not_sir": 102217625000, + "execution": 102217625000 + }, + "function_call_cost_per_byte": { + "send_sir": 1925331, + "send_not_sir": 1925331, + "execution": 1925331 + } + }, + "delete_key_cost": { + "send_sir": 94946625000, + "send_not_sir": 94946625000, + "execution": 94946625000 + }, + "delete_account_cost": { + "send_sir": 147489000000, + "send_not_sir": 147489000000, + "execution": 147489000000 + }, + "delegate_cost": { + "send_sir": 200000000000, + "send_not_sir": 200000000000, + "execution": 200000000000 + } + }, + "storage_usage_config": { + "num_bytes_account": 100, + "num_extra_bytes_record": 40 + }, + "burnt_gas_reward": [ + 3, + 10 + ], + "pessimistic_gas_price_inflation_ratio": [ + 103, + 100 + ], + "storage_proof_size_soft_limit": 999999999999999 + }, + "wasm_config": { + "ext_costs": { + "base": 264768111, + "contract_loading_base": 35445963, + "contract_loading_bytes": 1089295, + "read_memory_base": 2609863200, + "read_memory_byte": 3801333, + "write_memory_base": 2803794861, + "write_memory_byte": 2723772, + "read_register_base": 2517165186, + "read_register_byte": 98562, + "write_register_base": 2865522486, + "write_register_byte": 3801564, + "utf8_decoding_base": 3111779061, + "utf8_decoding_byte": 291580479, + "utf16_decoding_base": 3543313050, + "utf16_decoding_byte": 163577493, + "sha256_base": 4540970250, + "sha256_byte": 24117351, + "keccak256_base": 5879491275, + "keccak256_byte": 21471105, + "keccak512_base": 5811388236, + "keccak512_byte": 36649701, + "ripemd160_base": 853675086, + "ripemd160_block": 680107584, + "ed25519_verify_base": 210000000000, + "ed25519_verify_byte": 9000000, + "ecrecover_base": 278821988457, + "log_base": 3543313050, + "log_byte": 13198791, + "storage_write_base": 64196736000, + "storage_write_key_byte": 70482867, + "storage_write_value_byte": 31018539, + "storage_write_evicted_byte": 32117307, + "storage_read_base": 56356845750, + "storage_read_key_byte": 30952533, + "storage_read_value_byte": 5611005, + "storage_remove_base": 53473030500, + "storage_remove_key_byte": 38220384, + "storage_remove_ret_value_byte": 11531556, + "storage_has_key_base": 54039896625, + "storage_has_key_byte": 30790845, + "storage_iter_create_prefix_base": 0, + "storage_iter_create_prefix_byte": 0, + "storage_iter_create_range_base": 0, + "storage_iter_create_from_byte": 0, + "storage_iter_create_to_byte": 0, + "storage_iter_next_base": 0, + "storage_iter_next_key_byte": 0, + "storage_iter_next_value_byte": 0, + "touching_trie_node": 16101955926, + "read_cached_trie_node": 2280000000, + "promise_and_base": 1465013400, + "promise_and_per_promise": 5452176, + "promise_return": 560152386, + "validator_stake_base": 911834726400, + "validator_total_stake_base": 911834726400, + "contract_compile_base": 0, + "contract_compile_bytes": 0, + "alt_bn128_g1_multiexp_base": 713000000000, + "alt_bn128_g1_multiexp_element": 320000000000, + "alt_bn128_g1_sum_base": 3000000000, + "alt_bn128_g1_sum_element": 5000000000, + "alt_bn128_pairing_check_base": 9686000000000, + "alt_bn128_pairing_check_element": 5102000000000, + "yield_create_base": 300000000000000, + "yield_create_byte": 300000000000000, + "yield_resume_base": 300000000000000, + "yield_resume_byte": 300000000000000 + }, + "grow_mem_cost": 1, + "regular_op_cost": 822756, + "vm_kind": "", + "disable_9393_fix": false, + "storage_get_mode": "FlatStorage", + "fix_contract_loading_cost": false, + "implicit_account_creation": true, + "math_extension": true, + "ed25519_verify": true, + "alt_bn128": true, + "function_call_weight": true, + "eth_implicit_accounts": false, + "yield_resume_host_functions": false, + "limit_config": { + "max_gas_burnt": 300000000000000, + "max_stack_height": 262144, + "contract_prepare_version": 2, + "initial_memory_pages": 1024, + "max_memory_pages": 2048, + "registers_memory_limit": 1073741824, + "max_register_size": 104857600, + "max_number_registers": 100, + "max_number_logs": 100, + "max_total_log_length": 16384, + "max_total_prepaid_gas": 300000000000000, + "max_actions_per_receipt": 100, + "max_number_bytes_method_names": 2000, + "max_length_method_name": 256, + "max_arguments_length": 4194304, + "max_length_returned_data": 4194304, + "max_contract_size": 4194304, + "max_transaction_size": 4194304, + "max_length_storage_key": 2048, + "max_length_storage_value": 4194304, + "max_promises_per_function_call_action": 1024, + "max_number_input_data_dependencies": 128, + "max_functions_number_per_contract": 10000, + "wasmer2_stack_limit": 204800, + "max_locals_per_contract": 1000000, + "account_id_validity_rules_version": 1, + "yield_timeout_length_in_blocks": 200, + "max_yield_payload_size": 1024 + } + }, + "account_creation_config": { + "min_allowed_top_level_account_length": 65, + "registrar_account_id": "registrar" + } +} diff --git a/core/parameters/src/snapshots/near_parameters__config_store__tests__83.json.snap b/core/parameters/src/snapshots/near_parameters__config_store__tests__83.json.snap index cd7502ecacb..b79fb0d9e59 100644 --- a/core/parameters/src/snapshots/near_parameters__config_store__tests__83.json.snap +++ b/core/parameters/src/snapshots/near_parameters__config_store__tests__83.json.snap @@ -39,9 +39,9 @@ expression: config_view "execution": 64572944 }, "function_call_cost": { - "send_sir": 2319861500000, - "send_not_sir": 2319861500000, - "execution": 2319861500000 + "send_sir": 200000000000, + "send_not_sir": 200000000000, + "execution": 780000000000 }, "function_call_cost_per_byte": { "send_sir": 2235934, @@ -109,7 +109,7 @@ expression: config_view "ext_costs": { "base": 264768111, "contract_loading_base": 35445963, - "contract_loading_bytes": 216750, + "contract_loading_bytes": 1089295, "read_memory_base": 2609863200, "read_memory_byte": 3801333, "write_memory_base": 2803794861, diff --git a/core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_129.json.snap b/core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_129.json.snap index 0dcc901b2d9..27db573c370 100644 --- a/core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_129.json.snap +++ b/core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_129.json.snap @@ -39,9 +39,9 @@ expression: config_view "execution": 64572944 }, "function_call_cost": { - "send_sir": 2319861500000, - "send_not_sir": 2319861500000, - "execution": 2319861500000 + "send_sir": 200000000000, + "send_not_sir": 200000000000, + "execution": 780000000000 }, "function_call_cost_per_byte": { "send_sir": 2235934, @@ -109,7 +109,7 @@ expression: config_view "ext_costs": { "base": 264768111, "contract_loading_base": 35445963, - "contract_loading_bytes": 216750, + "contract_loading_bytes": 1089295, "read_memory_base": 2609863200, "read_memory_byte": 3801333, "write_memory_base": 2803794861, diff --git a/core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_138.json.snap b/core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_138.json.snap index 275c62eae4a..05bd3cb37ba 100644 --- a/core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_138.json.snap +++ b/core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_138.json.snap @@ -39,9 +39,9 @@ expression: config_view "execution": 64572944 }, "function_call_cost": { - "send_sir": 2319861500000, - "send_not_sir": 2319861500000, - "execution": 2319861500000 + "send_sir": 200000000000, + "send_not_sir": 200000000000, + "execution": 780000000000 }, "function_call_cost_per_byte": { "send_sir": 2235934, @@ -109,7 +109,7 @@ expression: config_view "ext_costs": { "base": 264768111, "contract_loading_base": 35445963, - "contract_loading_bytes": 216750, + "contract_loading_bytes": 1089295, "read_memory_base": 2609863200, "read_memory_byte": 3801333, "write_memory_base": 2803794861, diff --git a/core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_139.json.snap b/core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_139.json.snap index cbe117d48dc..dbab708c54e 100644 --- a/core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_139.json.snap +++ b/core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_139.json.snap @@ -39,9 +39,9 @@ expression: config_view "execution": 64572944 }, "function_call_cost": { - "send_sir": 2319861500000, - "send_not_sir": 2319861500000, - "execution": 2319861500000 + "send_sir": 200000000000, + "send_not_sir": 200000000000, + "execution": 780000000000 }, "function_call_cost_per_byte": { "send_sir": 2235934, @@ -109,7 +109,7 @@ expression: config_view "ext_costs": { "base": 264768111, "contract_loading_base": 35445963, - "contract_loading_bytes": 216750, + "contract_loading_bytes": 1089295, "read_memory_base": 2609863200, "read_memory_byte": 3801333, "write_memory_base": 2803794861, diff --git a/core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_66.json.snap b/core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_66.json.snap new file mode 100644 index 00000000000..f1ee5bf82a5 --- /dev/null +++ b/core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_66.json.snap @@ -0,0 +1,226 @@ +--- +source: core/parameters/src/config_store.rs +expression: config_view +--- +{ + "storage_amount_per_byte": "10000000000000000000", + "transaction_costs": { + "action_receipt_creation_config": { + "send_sir": 108059500000, + "send_not_sir": 108059500000, + "execution": 108059500000 + }, + "data_receipt_creation_config": { + "base_cost": { + "send_sir": 36486732312, + "send_not_sir": 36486732312, + "execution": 36486732312 + }, + "cost_per_byte": { + "send_sir": 17212011, + "send_not_sir": 17212011, + "execution": 17212011 + } + }, + "action_creation_config": { + "create_account_cost": { + "send_sir": 3850000000000, + "send_not_sir": 3850000000000, + "execution": 3850000000000 + }, + "deploy_contract_cost": { + "send_sir": 184765750000, + "send_not_sir": 184765750000, + "execution": 184765750000 + }, + "deploy_contract_cost_per_byte": { + "send_sir": 6812999, + "send_not_sir": 6812999, + "execution": 64572944 + }, + "function_call_cost": { + "send_sir": 200000000000, + "send_not_sir": 200000000000, + "execution": 780000000000 + }, + "function_call_cost_per_byte": { + "send_sir": 2235934, + "send_not_sir": 2235934, + "execution": 2235934 + }, + "transfer_cost": { + "send_sir": 115123062500, + "send_not_sir": 115123062500, + "execution": 115123062500 + }, + "stake_cost": { + "send_sir": 141715687500, + "send_not_sir": 141715687500, + "execution": 102217625000 + }, + "add_key_cost": { + "full_access_cost": { + "send_sir": 101765125000, + "send_not_sir": 101765125000, + "execution": 101765125000 + }, + "function_call_cost": { + "send_sir": 102217625000, + "send_not_sir": 102217625000, + "execution": 102217625000 + }, + "function_call_cost_per_byte": { + "send_sir": 1925331, + "send_not_sir": 1925331, + "execution": 1925331 + } + }, + "delete_key_cost": { + "send_sir": 94946625000, + "send_not_sir": 94946625000, + "execution": 94946625000 + }, + "delete_account_cost": { + "send_sir": 147489000000, + "send_not_sir": 147489000000, + "execution": 147489000000 + }, + "delegate_cost": { + "send_sir": 200000000000, + "send_not_sir": 200000000000, + "execution": 200000000000 + } + }, + "storage_usage_config": { + "num_bytes_account": 100, + "num_extra_bytes_record": 40 + }, + "burnt_gas_reward": [ + 3, + 10 + ], + "pessimistic_gas_price_inflation_ratio": [ + 103, + 100 + ], + "storage_proof_size_soft_limit": 999999999999999 + }, + "wasm_config": { + "ext_costs": { + "base": 264768111, + "contract_loading_base": 35445963, + "contract_loading_bytes": 1089295, + "read_memory_base": 2609863200, + "read_memory_byte": 3801333, + "write_memory_base": 2803794861, + "write_memory_byte": 2723772, + "read_register_base": 2517165186, + "read_register_byte": 98562, + "write_register_base": 2865522486, + "write_register_byte": 3801564, + "utf8_decoding_base": 3111779061, + "utf8_decoding_byte": 291580479, + "utf16_decoding_base": 3543313050, + "utf16_decoding_byte": 163577493, + "sha256_base": 4540970250, + "sha256_byte": 24117351, + "keccak256_base": 5879491275, + "keccak256_byte": 21471105, + "keccak512_base": 5811388236, + "keccak512_byte": 36649701, + "ripemd160_base": 853675086, + "ripemd160_block": 680107584, + "ed25519_verify_base": 210000000000, + "ed25519_verify_byte": 9000000, + "ecrecover_base": 278821988457, + "log_base": 3543313050, + "log_byte": 13198791, + "storage_write_base": 64196736000, + "storage_write_key_byte": 70482867, + "storage_write_value_byte": 31018539, + "storage_write_evicted_byte": 32117307, + "storage_read_base": 56356845750, + "storage_read_key_byte": 30952533, + "storage_read_value_byte": 5611005, + "storage_remove_base": 53473030500, + "storage_remove_key_byte": 38220384, + "storage_remove_ret_value_byte": 11531556, + "storage_has_key_base": 54039896625, + "storage_has_key_byte": 30790845, + "storage_iter_create_prefix_base": 0, + "storage_iter_create_prefix_byte": 0, + "storage_iter_create_range_base": 0, + "storage_iter_create_from_byte": 0, + "storage_iter_create_to_byte": 0, + "storage_iter_next_base": 0, + "storage_iter_next_key_byte": 0, + "storage_iter_next_value_byte": 0, + "touching_trie_node": 16101955926, + "read_cached_trie_node": 2280000000, + "promise_and_base": 1465013400, + "promise_and_per_promise": 5452176, + "promise_return": 560152386, + "validator_stake_base": 911834726400, + "validator_total_stake_base": 911834726400, + "contract_compile_base": 0, + "contract_compile_bytes": 0, + "alt_bn128_g1_multiexp_base": 713000000000, + "alt_bn128_g1_multiexp_element": 320000000000, + "alt_bn128_g1_sum_base": 3000000000, + "alt_bn128_g1_sum_element": 5000000000, + "alt_bn128_pairing_check_base": 9686000000000, + "alt_bn128_pairing_check_element": 5102000000000, + "yield_create_base": 300000000000000, + "yield_create_byte": 300000000000000, + "yield_resume_base": 300000000000000, + "yield_resume_byte": 300000000000000 + }, + "grow_mem_cost": 1, + "regular_op_cost": 822756, + "vm_kind": "", + "disable_9393_fix": false, + "storage_get_mode": "FlatStorage", + "fix_contract_loading_cost": false, + "implicit_account_creation": true, + "math_extension": true, + "ed25519_verify": true, + "alt_bn128": true, + "function_call_weight": true, + "eth_implicit_accounts": false, + "yield_resume_host_functions": false, + "limit_config": { + "max_gas_burnt": 300000000000000, + "max_stack_height": 262144, + "contract_prepare_version": 2, + "initial_memory_pages": 1024, + "max_memory_pages": 2048, + "registers_memory_limit": 1073741824, + "max_register_size": 104857600, + "max_number_registers": 100, + "max_number_logs": 100, + "max_total_log_length": 16384, + "max_total_prepaid_gas": 300000000000000, + "max_actions_per_receipt": 100, + "max_number_bytes_method_names": 2000, + "max_length_method_name": 256, + "max_arguments_length": 4194304, + "max_length_returned_data": 4194304, + "max_contract_size": 4194304, + "max_transaction_size": 4194304, + "max_length_storage_key": 2048, + "max_length_storage_value": 4194304, + "max_promises_per_function_call_action": 1024, + "max_number_input_data_dependencies": 128, + "max_functions_number_per_contract": 10000, + "wasmer2_stack_limit": 204800, + "max_locals_per_contract": 1000000, + "account_id_validity_rules_version": 1, + "yield_timeout_length_in_blocks": 200, + "max_yield_payload_size": 1024 + } + }, + "account_creation_config": { + "min_allowed_top_level_account_length": 65, + "registrar_account_id": "registrar" + } +} diff --git a/core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_83.json.snap b/core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_83.json.snap index cd7502ecacb..b79fb0d9e59 100644 --- a/core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_83.json.snap +++ b/core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_83.json.snap @@ -39,9 +39,9 @@ expression: config_view "execution": 64572944 }, "function_call_cost": { - "send_sir": 2319861500000, - "send_not_sir": 2319861500000, - "execution": 2319861500000 + "send_sir": 200000000000, + "send_not_sir": 200000000000, + "execution": 780000000000 }, "function_call_cost_per_byte": { "send_sir": 2235934, @@ -109,7 +109,7 @@ expression: config_view "ext_costs": { "base": 264768111, "contract_loading_base": 35445963, - "contract_loading_bytes": 216750, + "contract_loading_bytes": 1089295, "read_memory_base": 2609863200, "read_memory_byte": 3801333, "write_memory_base": 2803794861, diff --git a/core/parameters/src/snapshots/near_parameters__view__tests__runtime_config_view.snap b/core/parameters/src/snapshots/near_parameters__view__tests__runtime_config_view.snap index 3f253cf734b..bd066f5c12b 100644 --- a/core/parameters/src/snapshots/near_parameters__view__tests__runtime_config_view.snap +++ b/core/parameters/src/snapshots/near_parameters__view__tests__runtime_config_view.snap @@ -39,9 +39,9 @@ expression: "&view" "execution": 64572944 }, "function_call_cost": { - "send_sir": 2319861500000, - "send_not_sir": 2319861500000, - "execution": 2319861500000 + "send_sir": 200000000000, + "send_not_sir": 200000000000, + "execution": 780000000000 }, "function_call_cost_per_byte": { "send_sir": 2235934, @@ -109,7 +109,7 @@ expression: "&view" "ext_costs": { "base": 264768111, "contract_loading_base": 35445963, - "contract_loading_bytes": 216750, + "contract_loading_bytes": 1089295, "read_memory_base": 2609863200, "read_memory_byte": 3801333, "write_memory_base": 2803794861, diff --git a/core/primitives-core/src/version.rs b/core/primitives-core/src/version.rs index ce12dcbe518..8753534f127 100644 --- a/core/primitives-core/src/version.rs +++ b/core/primitives-core/src/version.rs @@ -98,6 +98,8 @@ pub enum ProtocolFeature { /// /// Compute Costs NEP-455: https://github.com/near/NEPs/blob/master/neps/nep-0455.md ComputeCosts, + /// Decrease the cost of function call action. Only affects the execution cost. + DecreaseFunctionCallBaseCost, /// Enable flat storage for reads, reducing number of DB accesses from `2 * key.len()` in /// the worst case to 2. /// @@ -135,6 +137,11 @@ pub enum ProtocolFeature { /// Enables stateless validation which is introduced in https://github.com/near/NEPs/pull/509 StatelessValidationV0, EthImplicitAccounts, + /// Enables yield execution which is introduced in https://github.com/near/NEPs/pull/519 + YieldExecution, + + /// Protocol version reserved for use in resharding tests. + SimpleNightshadeTestonly, // Stateless validation: lower block and chunk validator kickout percent from 90 to 50. LowerValidatorKickoutPercentForDebugging, @@ -195,6 +202,15 @@ impl ProtocolFeature { // TODO(resharding) clean up after stake wars is over. #[cfg(not(feature = "statelessnet_protocol"))] ProtocolFeature::SimpleNightshadeV3 => 65, + ProtocolFeature::DecreaseFunctionCallBaseCost => 66, + + // Nightly features which should be tested for compatibility with resharding + ProtocolFeature::YieldExecution => 78, + + // This protocol version is reserved for use in resharding tests. An extra resharding + // is simulated on top of the latest shard layout in production. Note that later + // protocol versions will still have the production layout. + ProtocolFeature::SimpleNightshadeTestonly => 79, // StatelessNet features ProtocolFeature::StatelessValidationV0 => 80, @@ -222,7 +238,7 @@ impl ProtocolFeature { /// Current protocol version used on the mainnet. /// Some features (e. g. FixStorageUsage) require that there is at least one epoch with exactly /// the corresponding version -const STABLE_PROTOCOL_VERSION: ProtocolVersion = 65; +const STABLE_PROTOCOL_VERSION: ProtocolVersion = 66; /// Largest protocol version supported by the current binary. pub const PROTOCOL_VERSION: ProtocolVersion = if cfg!(feature = "statelessnet_protocol") { diff --git a/core/primitives/Cargo.toml b/core/primitives/Cargo.toml index 6e4bd93588e..10edbbe4a37 100644 --- a/core/primitives/Cargo.toml +++ b/core/primitives/Cargo.toml @@ -16,6 +16,7 @@ arbitrary.workspace = true base64.workspace = true borsh.workspace = true bytesize.workspace = true +bytes.workspace = true cfg-if.workspace = true chrono.workspace = true derive_more.workspace = true @@ -39,6 +40,7 @@ stdx.workspace = true strum.workspace = true thiserror.workspace = true tracing.workspace = true +zstd.workspace = true near-async.workspace = true near-crypto.workspace = true diff --git a/core/primitives/src/block.rs b/core/primitives/src/block.rs index e6070030bb8..c8a97555f04 100644 --- a/core/primitives/src/block.rs +++ b/core/primitives/src/block.rs @@ -9,9 +9,9 @@ use crate::checked_feature; use crate::hash::{hash, CryptoHash}; use crate::merkle::{merklize, verify_path, MerklePath}; use crate::num_rational::Rational32; +use crate::reed_solomon::ReedSolomonWrapper; use crate::sharding::{ - ChunkHashHeight, EncodedShardChunk, ReedSolomonWrapper, ShardChunk, ShardChunkHeader, - ShardChunkHeaderV1, + ChunkHashHeight, EncodedShardChunk, ShardChunk, ShardChunkHeader, ShardChunkHeaderV1, }; use crate::types::{Balance, BlockHeight, EpochId, Gas, NumBlocks, StateRoot}; use crate::validator_signer::{EmptyValidatorSigner, ValidatorSigner}; diff --git a/core/primitives/src/epoch_manager.rs b/core/primitives/src/epoch_manager.rs index 22d54ddfaa1..8ecc3a80d31 100644 --- a/core/primitives/src/epoch_manager.rs +++ b/core/primitives/src/epoch_manager.rs @@ -14,6 +14,9 @@ use near_primitives_core::types::BlockHeight; use smart_default::SmartDefault; use std::collections::{BTreeMap, HashMap}; +#[cfg(feature = "nightly")] +use crate::version::ProtocolFeature; + pub type RngSeed = [u8; 32]; pub const AGGREGATOR_KEY: &[u8] = b"AGGREGATOR"; @@ -146,6 +149,10 @@ impl AllEpochConfig { config } + pub fn chain_id(&self) -> &str { + &self.chain_id + } + fn config_stateless_net( config: &mut EpochConfig, chain_id: &str, @@ -177,6 +184,17 @@ impl AllEpochConfig { } fn config_nightshade(config: &mut EpochConfig, protocol_version: ProtocolVersion) { + // Unlike the other checks, this one is for strict equality. The testonly nightshade layout + // is specifically used in resharding tests, not for any other protocol versions. + #[cfg(feature = "nightly")] + if protocol_version == ProtocolFeature::SimpleNightshadeTestonly.protocol_version() { + Self::config_nightshade_impl( + config, + ShardLayout::get_simple_nightshade_layout_testonly(), + ); + return; + } + if checked_feature!("stable", SimpleNightshadeV3, protocol_version) { Self::config_nightshade_impl(config, ShardLayout::get_simple_nightshade_layout_v3()); return; diff --git a/core/primitives/src/lib.rs b/core/primitives/src/lib.rs index ed70396a719..a30512457d7 100644 --- a/core/primitives/src/lib.rs +++ b/core/primitives/src/lib.rs @@ -18,6 +18,7 @@ pub mod network; pub mod profile_data_v2; pub mod rand; pub mod receipt; +pub mod reed_solomon; pub mod runtime; pub mod sandbox; pub mod shard_layout; diff --git a/core/primitives/src/reed_solomon.rs b/core/primitives/src/reed_solomon.rs new file mode 100644 index 00000000000..ec3af9448af --- /dev/null +++ b/core/primitives/src/reed_solomon.rs @@ -0,0 +1,85 @@ +use borsh::{BorshDeserialize, BorshSerialize}; +use itertools::Itertools; +use reed_solomon_erasure::galois_8::{Field, ReedSolomon}; +use reed_solomon_erasure::ReconstructShard; +use std::io::Error; + +/// The ttl for a reed solomon instance to control memory usage. This number below corresponds to +/// roughly 60MB of memory usage. +const RS_TTL: u64 = 2 * 1024; + +/// Wrapper around reed solomon which occasionally resets the underlying +/// reed solomon instead to work around the memory leak in reed solomon +/// implementation +pub struct ReedSolomonWrapper { + rs: ReedSolomon, + ttl: u64, +} + +impl ReedSolomonWrapper { + pub fn new(data_shards: usize, parity_shards: usize) -> Self { + ReedSolomonWrapper { + rs: ReedSolomon::new(data_shards, parity_shards).unwrap(), + ttl: RS_TTL, + } + } + + // Encode function takes a serializable object and returns a tuple of parts and length of encoded data + pub fn encode(&mut self, data: T) -> (Vec>>, usize) { + let mut bytes = borsh::to_vec(&data).unwrap(); + let encoded_length = bytes.len(); + + let data_parts = self.rs.data_shard_count(); + let part_length = (encoded_length + data_parts - 1) / data_parts; + + // Pad the bytes to be a multiple of `part_length` + // Convert encoded data into `data_shard_count` number of parts and pad with `parity_shard_count` None values + // with 4 data_parts and 2 parity_parts + // b'aaabbbcccd' -> [Some(b'aaa'), Some(b'bbb'), Some(b'ccc'), Some(b'd00'), None, None] + bytes.resize(data_parts * part_length, 0); + let mut parts = bytes + .chunks_exact(part_length) + .map(|chunk| Some(chunk.to_vec().into_boxed_slice())) + .chain(itertools::repeat_n(None, self.rs.parity_shard_count())) + .collect_vec(); + + // Fine to unwrap here as we just constructed the parts + self.reconstruct(&mut parts).unwrap(); + + (parts, encoded_length) + } + + // Decode function is the reverse of encode function. It takes parts and length of encoded data + // and returns the deserialized object. + // Return an error if the reed solomon decoding fails or borsh deserialization fails. + pub fn decode( + &mut self, + parts: &mut [Option>], + encoded_length: usize, + ) -> Result { + if let Err(err) = self.reconstruct(parts) { + return Err(Error::other(err)); + } + + let encoded_data = parts + .iter() + .flat_map(|option| option.as_ref().expect("Missing shard").iter()) + .cloned() + .take(encoded_length) + .collect_vec(); + + T::try_from_slice(&encoded_data) + } + + fn reconstruct>( + &mut self, + slices: &mut [T], + ) -> Result<(), reed_solomon_erasure::Error> { + self.ttl -= 1; + if self.ttl == 0 { + *self = + ReedSolomonWrapper::new(self.rs.data_shard_count(), self.rs.parity_shard_count()); + } + self.rs.reconstruct(slices) + } +} diff --git a/core/primitives/src/shard_layout.rs b/core/primitives/src/shard_layout.rs index 9fc2f22965d..a08ffa78b1f 100644 --- a/core/primitives/src/shard_layout.rs +++ b/core/primitives/src/shard_layout.rs @@ -185,6 +185,27 @@ impl ShardLayout { ) } + /// This layout is used only in resharding tests. It allows testing of any features which were + /// introduced after the last layout upgrade in production. Currently it is built on top of V3. + #[cfg(feature = "nightly")] + pub fn get_simple_nightshade_layout_testonly() -> ShardLayout { + ShardLayout::v1( + vec![ + "aurora", + "aurora-0", + "game.hot.tg", + "kkuuue2akv_1630967379.near", + "nightly", + "tge-lockup.sweat", + ] + .into_iter() + .map(|s| s.parse().unwrap()) + .collect(), + Some(vec![vec![0], vec![1], vec![2], vec![3], vec![4, 5], vec![6]]), + 4, + ) + } + /// Given a parent shard id, return the shard uids for the shards in the current shard layout that /// are split from this parent shard. If this shard layout has no parent shard layout, return None pub fn get_children_shards_uids(&self, parent_shard_id: ShardId) -> Option> { @@ -429,7 +450,7 @@ impl<'de> serde::de::Visitor<'de> for ShardUIdVisitor { fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { write!( formatter, - "either string format of `ShardUId` like s0v1 for shard 0 version 1, or a map" + "either string format of `ShardUId` like 's0.v3' for shard 0 version 3, or a map" ) } diff --git a/core/primitives/src/sharding.rs b/core/primitives/src/sharding.rs index 2341ea24fe2..e421c79758b 100644 --- a/core/primitives/src/sharding.rs +++ b/core/primitives/src/sharding.rs @@ -1,6 +1,7 @@ use crate::hash::{hash, CryptoHash}; use crate::merkle::{combine_hash, merklize, verify_path, MerklePath}; use crate::receipt::Receipt; +use crate::reed_solomon::ReedSolomonWrapper; use crate::transaction::SignedTransaction; use crate::types::validator_stake::{ValidatorStake, ValidatorStakeIter, ValidatorStakeV1}; use crate::types::{Balance, BlockHeight, Gas, MerkleHash, ShardId, StateRoot}; @@ -9,8 +10,6 @@ use crate::version::{ProtocolFeature, ProtocolVersion, SHARD_CHUNK_HEADER_UPGRAD use borsh::{BorshDeserialize, BorshSerialize}; use near_crypto::Signature; use near_fmt::AbbrBytes; -use reed_solomon_erasure::galois_8::{Field, ReedSolomon}; -use reed_solomon_erasure::ReconstructShard; use std::cmp::Ordering; use std::sync::Arc; use tracing::debug_span; @@ -858,14 +857,6 @@ impl EncodedShardChunkBody { fetched_parts } - /// Returns true if reconstruction was successful - pub fn reconstruct( - &mut self, - rs: &mut ReedSolomonWrapper, - ) -> Result<(), reed_solomon_erasure::Error> { - rs.reconstruct(self.parts.as_mut_slice()) - } - pub fn get_merkle_hash_and_paths(&self) -> (MerkleHash, Vec) { let parts: Vec<&[u8]> = self.parts.iter().map(|x| x.as_deref().unwrap()).collect::>(); @@ -877,7 +868,7 @@ impl EncodedShardChunkBody { pub struct ReceiptList<'a>(pub ShardId, pub &'a [Receipt]); #[derive(BorshSerialize, BorshDeserialize)] -struct TransactionReceipt(Vec, Vec); +pub struct TransactionReceipt(pub Vec, pub Vec); #[derive(BorshSerialize, BorshDeserialize, Debug, Clone, PartialEq, Eq)] pub struct EncodedShardChunkV1 { @@ -885,26 +876,6 @@ pub struct EncodedShardChunkV1 { pub content: EncodedShardChunkBody, } -impl EncodedShardChunkV1 { - pub fn chunk_hash(&self) -> ChunkHash { - self.header.chunk_hash() - } - - pub fn decode_chunk(&self, data_parts: usize) -> Result { - let transaction_receipts = EncodedShardChunk::decode_transaction_receipts( - &self.content.parts[0..data_parts], - self.header.inner.encoded_length, - )?; - - Ok(ShardChunkV1 { - chunk_hash: self.header.chunk_hash(), - header: self.header.clone(), - transactions: transaction_receipts.0, - prev_outgoing_receipts: transaction_receipts.1, - }) - } -} - #[derive(BorshSerialize, BorshDeserialize, Debug, Clone, PartialEq, Eq)] pub struct EncodedShardChunkV2 { pub header: ShardChunkHeader, @@ -1003,37 +974,6 @@ impl EncodedShardChunk { TransactionReceipt::try_from_slice(&encoded_data) } - pub fn encode_transaction_receipts( - rs: &ReedSolomonWrapper, - transactions: Vec, - outgoing_receipts: &[Receipt], - ) -> Result<(Vec>>, u64), std::io::Error> { - let mut bytes = - borsh::to_vec(&TransactionReceipt(transactions, outgoing_receipts.to_vec()))?; - - let mut parts = Vec::with_capacity(rs.total_shard_count()); - let data_parts = rs.data_shard_count(); - let total_parts = rs.total_shard_count(); - let encoded_length = bytes.len(); - - if bytes.len() % data_parts != 0 { - bytes.extend((bytes.len() % data_parts..data_parts).map(|_| 0)); - } - let shard_length = (encoded_length + data_parts - 1) / data_parts; - assert_eq!(bytes.len(), shard_length * data_parts); - - for i in 0..data_parts { - parts.push(Some( - bytes[i * shard_length..(i + 1) * shard_length].to_vec().into_boxed_slice() - as Box<[u8]>, - )); - } - for _ in data_parts..total_parts { - parts.push(None); - } - Ok((parts, encoded_length as u64)) - } - pub fn new( prev_block_hash: CryptoHash, prev_state_root: StateRoot, @@ -1053,10 +993,8 @@ impl EncodedShardChunk { protocol_version: ProtocolVersion, ) -> Result<(Self, Vec), std::io::Error> { let (transaction_receipts_parts, encoded_length) = - Self::encode_transaction_receipts(rs, transactions, prev_outgoing_receipts)?; - - let mut content = EncodedShardChunkBody { parts: transaction_receipts_parts }; - content.reconstruct(rs).unwrap(); + rs.encode(TransactionReceipt(transactions, prev_outgoing_receipts.to_vec())); + let content = EncodedShardChunkBody { parts: transaction_receipts_parts }; let (encoded_merkle_root, merkle_paths) = content.get_merkle_hash_and_paths(); let block_header_v3_version = Some(ProtocolFeature::BlockHeaderV3.protocol_version()); @@ -1069,7 +1007,7 @@ impl EncodedShardChunk { prev_state_root, prev_outcome_root, encoded_merkle_root, - encoded_length, + encoded_length as u64, height, shard_id, prev_gas_used, @@ -1092,7 +1030,7 @@ impl EncodedShardChunk { prev_state_root, prev_outcome_root, encoded_merkle_root, - encoded_length, + encoded_length as u64, height, shard_id, prev_gas_used, @@ -1111,7 +1049,7 @@ impl EncodedShardChunk { prev_state_root, prev_outcome_root, encoded_merkle_root, - encoded_length, + encoded_length as u64, height, shard_id, prev_gas_used, @@ -1203,17 +1141,9 @@ impl EncodedShardChunk { shard_id = self.cloned_header().shard_id(), chunk_hash = ?self.chunk_hash()) .entered(); - let parts = match self { - Self::V1(chunk) => &chunk.content.parts[0..data_parts], - Self::V2(chunk) => &chunk.content.parts[0..data_parts], - }; - let encoded_length = match self { - Self::V1(chunk) => chunk.header.inner.encoded_length, - Self::V2(chunk) => chunk.header.encoded_length(), - }; - - let transaction_receipts = Self::decode_transaction_receipts(parts, encoded_length)?; + let transaction_receipts = + Self::decode_transaction_receipts(&self.content().parts, self.encoded_length())?; match self { Self::V1(chunk) => Ok(ShardChunk::V1(ShardChunkV1 { chunk_hash: chunk.header.chunk_hash(), @@ -1231,45 +1161,3 @@ impl EncodedShardChunk { } } } - -/// The ttl for a reed solomon instance to control memory usage. This number below corresponds to -/// roughly 60MB of memory usage. -const RS_TTL: u64 = 2 * 1024; - -/// Wrapper around reed solomon which occasionally resets the underlying -/// reed solomon instead to work around the memory leak in reed solomon -/// implementation -pub struct ReedSolomonWrapper { - rs: ReedSolomon, - ttl: u64, -} - -impl ReedSolomonWrapper { - pub fn new(data_shards: usize, parity_shards: usize) -> Self { - ReedSolomonWrapper { - rs: ReedSolomon::new(data_shards, parity_shards).unwrap(), - ttl: RS_TTL, - } - } - - pub fn reconstruct>( - &mut self, - slices: &mut [T], - ) -> Result<(), reed_solomon_erasure::Error> { - let res = self.rs.reconstruct(slices); - self.ttl -= 1; - if self.ttl == 0 { - *self = - ReedSolomonWrapper::new(self.rs.data_shard_count(), self.rs.parity_shard_count()); - } - res - } - - pub fn data_shard_count(&self) -> usize { - self.rs.data_shard_count() - } - - pub fn total_shard_count(&self) -> usize { - self.rs.total_shard_count() - } -} diff --git a/core/primitives/src/snapshots/near_primitives__views__tests__runtime_config_view.snap b/core/primitives/src/snapshots/near_primitives__views__tests__runtime_config_view.snap index e5c7866f1d1..3aa7a8d288c 100644 --- a/core/primitives/src/snapshots/near_primitives__views__tests__runtime_config_view.snap +++ b/core/primitives/src/snapshots/near_primitives__views__tests__runtime_config_view.snap @@ -39,9 +39,9 @@ expression: "&view" "execution": 64572944 }, "function_call_cost": { - "send_sir": 2319861500000, - "send_not_sir": 2319861500000, - "execution": 2319861500000 + "send_sir": 200000000000, + "send_not_sir": 200000000000, + "execution": 780000000000 }, "function_call_cost_per_byte": { "send_sir": 2235934, @@ -109,7 +109,7 @@ expression: "&view" "ext_costs": { "base": 264768111, "contract_loading_base": 35445963, - "contract_loading_bytes": 216750, + "contract_loading_bytes": 1089295, "read_memory_base": 2609863200, "read_memory_byte": 3801333, "write_memory_base": 2803794861, diff --git a/core/primitives/src/stateless_validation.rs b/core/primitives/src/stateless_validation.rs index 3dfc45cd944..93e06958651 100644 --- a/core/primitives/src/stateless_validation.rs +++ b/core/primitives/src/stateless_validation.rs @@ -3,8 +3,10 @@ use std::collections::{HashMap, HashSet}; use crate::challenge::PartialState; use crate::sharding::{ChunkHash, ReceiptProof, ShardChunkHeader, ShardChunkHeaderV3}; use crate::transaction::SignedTransaction; +use crate::types::EpochId; use crate::validator_signer::{EmptyValidatorSigner, ValidatorSigner}; use borsh::{BorshDeserialize, BorshSerialize}; +use bytes::BufMut; use near_crypto::{PublicKey, Signature}; use near_primitives_core::hash::CryptoHash; use near_primitives_core::types::{AccountId, Balance, BlockHeight, ShardId}; @@ -17,10 +19,52 @@ use near_primitives_core::types::{AccountId, Balance, BlockHeight, ShardId}; /// This is a messy workaround until we know what to do with NEP 483. type SignatureDifferentiator = String; -/// Signable +/// Represents bytes of encoded ChunkStateWitness. +/// For now encoding is raw borsh serialization, later we plan +/// adding compression on top of that. #[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)] -pub struct ChunkStateWitness { - pub inner: ChunkStateWitnessInner, +pub struct EncodedChunkStateWitness(Box<[u8]>); + +pub type ChunkStateWitnessSize = usize; + +impl EncodedChunkStateWitness { + /// Borsh-serialize and compress state witness. + /// Returns encoded witness along with the raw (uncompressed) witness size. + pub fn encode(witness: &ChunkStateWitness) -> std::io::Result<(Self, ChunkStateWitnessSize)> { + const STATE_WITNESS_COMPRESSION_LEVEL: i32 = 3; + let borsh_bytes = borsh::to_vec(witness)?; + Ok(( + Self(zstd::encode_all(borsh_bytes.as_slice(), STATE_WITNESS_COMPRESSION_LEVEL)?.into()), + borsh_bytes.len(), + )) + } + + /// Decompress and borsh-deserialize encoded witness bytes. + /// Returns decoded witness along with the raw (uncompressed) witness size. + pub fn decode(&self) -> std::io::Result<(ChunkStateWitness, ChunkStateWitnessSize)> { + // We want to limit the size of decompressed data to address "Zip bomb" attack. + // The value here is the same as NETWORK_MESSAGE_MAX_SIZE_BYTES. + const MAX_WITNESS_SIZE: usize = 512 * bytesize::MIB as usize; + let borsh_bytes = decompress_with_limit(self.0.as_ref(), MAX_WITNESS_SIZE)?; + let witness = ChunkStateWitness::try_from_slice(&borsh_bytes)?; + Ok((witness, borsh_bytes.len())) + } + + pub fn size_bytes(&self) -> ChunkStateWitnessSize { + self.0.len() + } + + pub fn as_slice(&self) -> &[u8] { + &self.0 + } +} + +#[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)] +pub struct SignedEncodedChunkStateWitness { + /// The content of the witness. It is convenient have it as bytes in order + /// to perform signature verification along with decoding. + pub witness_bytes: EncodedChunkStateWitness, + /// Signature corresponds to `witness_bytes.as_slice()` signed by the chunk producer pub signature: Signature, } @@ -39,15 +83,22 @@ pub struct ChunkStateWitnessAck { } impl ChunkStateWitnessAck { - pub fn new(witness_to_ack: &ChunkStateWitness) -> Self { - Self { chunk_hash: witness_to_ack.inner.chunk_header.chunk_hash() } + pub fn new(witness: &ChunkStateWitness) -> Self { + Self { chunk_hash: witness.chunk_header.chunk_hash() } } } /// The state witness for a chunk; proves the state transition that the /// chunk attests to. #[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)] -pub struct ChunkStateWitnessInner { +pub struct ChunkStateWitness { + pub chunk_producer: AccountId, + /// EpochId corresponds to the next block after chunk's previous block. + /// This is effectively the output of EpochManager::get_epoch_id_from_prev_block + /// with chunk_header.prev_block_hash(). + /// This is needed to validate signature when the previous block is not yet + /// available on the validator side (aka orphan state witness). + pub epoch_id: EpochId, /// The chunk header that this witness is for. While this is not needed /// to apply the state transition, it is needed for a chunk validator to /// produce a chunk endorsement while knowing what they are endorsing. @@ -113,8 +164,10 @@ pub struct ChunkStateWitnessInner { signature_differentiator: SignatureDifferentiator, } -impl ChunkStateWitnessInner { +impl ChunkStateWitness { pub fn new( + chunk_producer: AccountId, + epoch_id: EpochId, chunk_header: ShardChunkHeader, main_state_transition: ChunkStateTransition, source_receipt_proofs: HashMap, @@ -125,6 +178,8 @@ impl ChunkStateWitnessInner { new_transactions_validation_state: PartialState, ) -> Self { Self { + chunk_producer, + epoch_id, chunk_header, main_state_transition, source_receipt_proofs, @@ -136,15 +191,8 @@ impl ChunkStateWitnessInner { signature_differentiator: "ChunkStateWitness".to_owned(), } } -} -impl ChunkStateWitness { - // Make a new dummy ChunkStateWitness for testing. - pub fn new_dummy( - height: BlockHeight, - shard_id: ShardId, - prev_block_hash: CryptoHash, - ) -> ChunkStateWitness { + pub fn new_dummy(height: BlockHeight, shard_id: ShardId, prev_block_hash: CryptoHash) -> Self { let header = ShardChunkHeader::V3(ShardChunkHeaderV3::new( prev_block_hash, Default::default(), @@ -161,7 +209,9 @@ impl ChunkStateWitness { Default::default(), &EmptyValidatorSigner::default(), )); - let inner = ChunkStateWitnessInner::new( + Self::new( + "alice.near".parse().unwrap(), + EpochId::default(), header, Default::default(), Default::default(), @@ -170,8 +220,7 @@ impl ChunkStateWitness { Default::default(), Default::default(), Default::default(), - ); - ChunkStateWitness { inner, signature: Signature::default() } + ) } } @@ -322,3 +371,57 @@ impl ChunkValidatorAssignments { } } } + +fn decompress_with_limit(data: &[u8], limit: usize) -> std::io::Result> { + let mut buf = Vec::new().limit(limit).writer(); + match zstd::stream::copy_decode(data, &mut buf) { + Err(err) => { + // If decompressed data exceeds the limit then the following error is returned: + // Error { kind: WriteZero, message: "failed to write whole buffer" } + // Here we convert it to a more descriptive error to make debugging easier. + let err = if err.kind() == std::io::ErrorKind::WriteZero { + std::io::Error::other(format!( + "Decompressed data exceeded limit of {limit} bytes: {err}" + )) + } else { + err + }; + Err(err) + } + Ok(()) => Ok(buf.into_inner().into_inner()), + } +} + +#[cfg(test)] +mod tests { + use crate::stateless_validation::decompress_with_limit; + + #[test] + fn decompress_within_limit() { + let data = vec![1, 2, 3]; + let compressed = zstd::encode_all(data.as_slice(), 0).unwrap(); + let decompressed = decompress_with_limit(&compressed, 100); + assert!(decompressed.is_ok()); + assert_eq!(data, decompressed.unwrap()); + } + + #[test] + fn decompress_exceed_limit() { + let data = vec![0; 100]; + let compressed = zstd::encode_all(data.as_slice(), 0).unwrap(); + let decompress_res = decompress_with_limit(&compressed, 99); + assert!(decompress_res.is_err()); + assert_eq!( + decompress_res.unwrap_err().to_string(), + "Decompressed data exceeded limit of 99 bytes: failed to write whole buffer" + ); + } + + #[test] + fn decompress_invalid_data() { + let data = vec![0; 10]; + let decompress_res = decompress_with_limit(&data, 100); + assert!(decompress_res.is_err()); + assert_eq!(decompress_res.unwrap_err().to_string(), "Unknown frame descriptor"); + } +} diff --git a/core/primitives/src/test_utils.rs b/core/primitives/src/test_utils.rs index 091fac5fa5b..468657b5b59 100644 --- a/core/primitives/src/test_utils.rs +++ b/core/primitives/src/test_utils.rs @@ -621,6 +621,10 @@ impl EpochInfoProvider for MockEpochInfoProvider { fn minimum_stake(&self, _prev_block_hash: &CryptoHash) -> Result { Ok(0) } + + fn chain_id(&self) -> String { + "localnet".into() + } } /// Encode array of `u64` to be passed as a smart contract argument. diff --git a/core/primitives/src/trie_key.rs b/core/primitives/src/trie_key.rs index 8ff071401a6..648103e862a 100644 --- a/core/primitives/src/trie_key.rs +++ b/core/primitives/src/trie_key.rs @@ -695,7 +695,7 @@ mod tests { } #[test] - fn test_key_for_yielded_promise_consistency() { + fn test_key_for_promise_yield_consistency() { let key = TrieKey::PromiseYieldIndices; let raw_key = key.to_vec(); assert!(trie_key_parsers::parse_account_id_from_raw_key(&raw_key).unwrap().is_none()); diff --git a/core/primitives/src/types.rs b/core/primitives/src/types.rs index 791adef8c13..2481844aad0 100644 --- a/core/primitives/src/types.rs +++ b/core/primitives/src/types.rs @@ -2,7 +2,7 @@ use crate::account::{AccessKey, Account}; use crate::challenge::ChallengesResult; use crate::errors::EpochError; use crate::hash::CryptoHash; -use crate::receipt::Receipt; +use crate::receipt::{PromiseYieldTimeout, Receipt}; use crate::serialize::dec_format; use crate::trie_key::TrieKey; use borsh::{BorshDeserialize, BorshSerialize}; @@ -213,16 +213,18 @@ pub struct ConsolidatedStateChange { #[derive(Debug, Clone, BorshSerialize, BorshDeserialize)] pub struct StateChangesForResharding { pub changes: Vec, - // we need to store deleted receipts here because StateChanges will only include - // trie keys for removed values and account information can not be inferred from - // trie key for delayed receipts + // For DelayedReceipt and for PromiseYieldTimeout, account information is kept in the trie + // value rather than in the trie key. When such a key is erased, we need to know the erased + // value so that the change can be propagated to the correct child trie. pub processed_delayed_receipts: Vec, + pub processed_yield_timeouts: Vec, } impl StateChangesForResharding { pub fn from_raw_state_changes( changes: &[RawStateChangesWithTrieKey], processed_delayed_receipts: Vec, + processed_yield_timeouts: Vec, ) -> Self { let changes = changes .iter() @@ -231,7 +233,7 @@ impl StateChangesForResharding { ConsolidatedStateChange { trie_key: trie_key.clone(), value } }) .collect(); - Self { changes, processed_delayed_receipts } + Self { changes, processed_delayed_receipts, processed_yield_timeouts } } } @@ -1012,6 +1014,9 @@ pub trait EpochInfoProvider { ) -> Result; fn minimum_stake(&self, prev_block_hash: &CryptoHash) -> Result; + + /// Get the chain_id of the chain this epoch belongs to + fn chain_id(&self) -> String; } /// Mode of the trie cache. diff --git a/core/primitives/src/validator_signer.rs b/core/primitives/src/validator_signer.rs index 2f3a5647df1..d876bbe84cd 100644 --- a/core/primitives/src/validator_signer.rs +++ b/core/primitives/src/validator_signer.rs @@ -8,7 +8,7 @@ use crate::challenge::ChallengeBody; use crate::hash::CryptoHash; use crate::network::{AnnounceAccount, PeerId}; use crate::sharding::ChunkHash; -use crate::stateless_validation::{ChunkEndorsementInner, ChunkStateWitnessInner}; +use crate::stateless_validation::{ChunkEndorsementInner, EncodedChunkStateWitness}; use crate::telemetry::TelemetryInfo; use crate::types::{AccountId, BlockHeight, EpochId}; @@ -41,8 +41,7 @@ pub trait ValidatorSigner: Sync + Send { fn sign_chunk_endorsement(&self, inner: &ChunkEndorsementInner) -> Signature; /// Signs approval of the given chunk. - /// Returns signature and a signed payload size in bytes - fn sign_chunk_state_witness(&self, inner: &ChunkStateWitnessInner) -> (Signature, usize); + fn sign_chunk_state_witness(&self, witness_bytes: &EncodedChunkStateWitness) -> Signature; /// Signs challenge body. fn sign_challenge(&self, challenge_body: &ChallengeBody) -> (CryptoHash, Signature); @@ -120,8 +119,8 @@ impl ValidatorSigner for EmptyValidatorSigner { Signature::default() } - fn sign_chunk_state_witness(&self, _inner: &ChunkStateWitnessInner) -> (Signature, usize) { - (Signature::default(), 0) + fn sign_chunk_state_witness(&self, _witness_bytes: &EncodedChunkStateWitness) -> Signature { + Signature::default() } fn sign_challenge(&self, challenge_body: &ChallengeBody) -> (CryptoHash, Signature) { @@ -219,9 +218,8 @@ impl ValidatorSigner for InMemoryValidatorSigner { self.signer.sign(&borsh::to_vec(inner).unwrap()) } - fn sign_chunk_state_witness(&self, inner: &ChunkStateWitnessInner) -> (Signature, usize) { - let data = borsh::to_vec(inner).unwrap(); - (self.signer.sign(&data), data.len()) + fn sign_chunk_state_witness(&self, witness_bytes: &EncodedChunkStateWitness) -> Signature { + self.signer.sign(witness_bytes.as_slice()) } fn sign_challenge(&self, challenge_body: &ChallengeBody) -> (CryptoHash, Signature) { diff --git a/core/primitives/src/version.rs b/core/primitives/src/version.rs index 74d70f70751..58593f0feac 100644 --- a/core/primitives/src/version.rs +++ b/core/primitives/src/version.rs @@ -44,9 +44,13 @@ pub const DELETE_KEY_STORAGE_USAGE_PROTOCOL_VERSION: ProtocolVersion = 40; pub const SHARD_CHUNK_HEADER_UPGRADE_VERSION: ProtocolVersion = 41; -/// Updates the way receipt ID is constructed to use current block hash instead of last block hash +/// Updates the way receipt ID is constructed to use current block hash instead of last block hash. pub const CREATE_RECEIPT_ID_SWITCH_TO_CURRENT_BLOCK_VERSION: ProtocolVersion = 42; +/// Pessimistic gas price estimation uses a fixed value of `minimum_new_receipt_gas` to stop being +/// tied to the function call base cost. +pub const FIXED_MINIMUM_NEW_RECEIPT_GAS_VERSION: ProtocolVersion = 66; + /// The points in time after which the voting for the latest protocol version /// should start. /// diff --git a/core/primitives/src/views.rs b/core/primitives/src/views.rs index 900f2445bdb..e365a19d461 100644 --- a/core/primitives/src/views.rs +++ b/core/primitives/src/views.rs @@ -726,6 +726,8 @@ pub struct StatusResponse { pub node_key: Option, /// Uptime of the node. pub uptime_sec: i64, + /// Genesis hash of the chain. + pub genesis_hash: CryptoHash, /// Information about last blocks, network, epoch and chain & chunk info. #[serde(skip_serializing_if = "Option::is_none")] pub detailed_debug_status: Option, @@ -1714,18 +1716,18 @@ pub enum TxExecutionStatus { /// Transaction is included into the block. The block may be not finalised yet Included, /// Transaction is included into the block + - /// All the transaction receipts finished their execution. + /// All non-refund transaction receipts finished their execution. /// The corresponding blocks for tx and each receipt may be not finalised yet #[default] ExecutedOptimistic, /// Transaction is included into finalised block IncludedFinal, /// Transaction is included into finalised block + - /// All the transaction receipts finished their execution. + /// All non-refund transaction receipts finished their execution. /// The corresponding blocks for each receipt may be not finalised yet Executed, /// Transaction is included into finalised block + - /// Execution of transaction receipts is finalised + /// Execution of all transaction receipts is finalised, including refund receipts Final, } @@ -1756,7 +1758,7 @@ impl TxStatusView { } } -/// Execution outcome of the transaction and all of subsequent the receipts. +/// Execution outcome of the transaction and all the subsequent receipts. /// Could be not finalised yet #[derive( BorshSerialize, BorshDeserialize, serde::Serialize, serde::Deserialize, PartialEq, Eq, Clone, @@ -1933,16 +1935,25 @@ pub enum ReceiptEnumView { output_data_receivers: Vec, input_data_ids: Vec, actions: Vec, + #[serde(default = "default_is_promise")] is_promise_yield: bool, }, Data { data_id: CryptoHash, #[serde_as(as = "Option")] data: Option>, + #[serde(default = "default_is_promise")] is_promise_resume: bool, }, } +// Default value used when deserializing ReceiptEnumViews which are missing either the +// `is_promise_yield` or `is_promise_resume` fields. Data which is missing this field was +// serialized before the introduction of yield execution. +fn default_is_promise() -> bool { + false +} + impl From for ReceiptView { fn from(receipt: Receipt) -> Self { let is_promise_yield = matches!(&receipt.receipt, ReceiptEnum::PromiseYield(_)); diff --git a/core/store/Cargo.toml b/core/store/Cargo.toml index 4f0cceeebfe..4d4d4c8a311 100644 --- a/core/store/Cargo.toml +++ b/core/store/Cargo.toml @@ -76,7 +76,6 @@ io_trace = [] no_cache = [] single_thread_rocksdb = [] # Deactivate RocksDB IO background threads test_features = [] -serialize_all_state_changes = [] new_epoch_sync = [] yield_resume = [] diff --git a/core/store/benches/finalize_bench.rs b/core/store/benches/finalize_bench.rs index ea31892d9cc..f8f6eb0772d 100644 --- a/core/store/benches/finalize_bench.rs +++ b/core/store/benches/finalize_bench.rs @@ -22,11 +22,12 @@ use near_crypto::{InMemorySigner, KeyType, Signer}; use near_primitives::hash::CryptoHash; use near_primitives::merkle::{merklize, MerklePathItem}; use near_primitives::receipt::{ActionReceipt, DataReceipt, Receipt, ReceiptEnum}; +use near_primitives::reed_solomon::ReedSolomonWrapper; use near_primitives::shard_layout::ShardLayout; use near_primitives::sharding::{ ChunkHash, EncodedShardChunk, PartialEncodedChunk, PartialEncodedChunkPart, - PartialEncodedChunkV2, ReceiptProof, ReedSolomonWrapper, ShardChunk, ShardChunkHeader, - ShardChunkHeaderV3, ShardChunkV2, ShardProof, + PartialEncodedChunkV2, ReceiptProof, ShardChunk, ShardChunkHeader, ShardChunkHeaderV3, + ShardChunkV2, ShardProof, }; use near_primitives::transaction::{Action, FunctionCallAction, SignedTransaction}; use near_primitives::types::AccountId; diff --git a/core/store/src/config.rs b/core/store/src/config.rs index e4d97a9e050..78f060b0f74 100644 --- a/core/store/src/config.rs +++ b/core/store/src/config.rs @@ -52,6 +52,7 @@ pub struct StoreConfig { /// Enable fetching account and access key data ahead of time to avoid IO latency. pub enable_receipt_prefetching: bool, + /// TODO: use `PrefetchConfig` for SWEAT prefetching. /// Configured accounts will be prefetched as SWEAT token account, if predecessor is listed as receiver. /// This config option is temporary and will be removed once flat storage is implemented. pub sweat_prefetch_receivers: Vec, @@ -59,6 +60,9 @@ pub struct StoreConfig { /// This config option is temporary and will be removed once flat storage is implemented. pub sweat_prefetch_senders: Vec, + pub claim_sweat_prefetch_config: Vec, + pub kaiching_prefetch_config: Vec, + /// List of shard UIDs for which we should load the tries in memory. /// TODO(#9511): This does not automatically survive resharding. We may need to figure out a /// strategy for that. @@ -251,6 +255,23 @@ impl Default for StoreConfig { "oracle.sweat".to_owned(), "sweat_the_oracle.testnet".to_owned(), ], + claim_sweat_prefetch_config: vec![ + PrefetchConfig { + receiver: "claim.sweat".to_owned(), + sender: "token.sweat".to_owned(), + method_name: "record_batch_for_hold".to_owned(), + }, + PrefetchConfig { + receiver: "claim.sweat".to_owned(), + sender: String::new(), + method_name: "claim".to_owned(), + }, + ], + kaiching_prefetch_config: vec![PrefetchConfig { + receiver: "earn.kaiching".to_owned(), + sender: "wallet.kaiching".to_owned(), + method_name: "ft_on_transfer".to_owned(), + }], // TODO(#9511): Consider adding here shard id 3 or all shards after // this feature will be tested. Until that, use at your own risk. @@ -330,3 +351,15 @@ impl Default for TrieCacheConfig { } } } + +/// Parameters for prefetching certain contract calls. +#[derive(Clone, Debug, Default, serde::Serialize, serde::Deserialize)] +#[serde(default)] +pub struct PrefetchConfig { + /// Receipt receiver, or contract account id. + pub receiver: String, + /// Receipt sender. + pub sender: String, + /// Contract method name. + pub method_name: String, +} diff --git a/core/store/src/db.rs b/core/store/src/db.rs index cb92d99abda..60527ee5b5d 100644 --- a/core/store/src/db.rs +++ b/core/store/src/db.rs @@ -5,6 +5,7 @@ use std::io; pub(crate) mod rocksdb; mod colddb; +mod mixeddb; mod splitdb; pub mod refcount; @@ -14,6 +15,7 @@ mod testdb; mod database_tests; pub use self::colddb::ColdDB; +pub use self::mixeddb::{MixedDB, ReadOrder}; pub use self::rocksdb::RocksDB; pub use self::splitdb::SplitDB; @@ -73,6 +75,17 @@ impl DBOp { DBOp::DeleteRange { col, .. } => col, } } + + pub fn bytes(&self) -> usize { + match self { + DBOp::Set { key, value, .. } => key.len() + value.len(), + DBOp::Insert { key, value, .. } => key.len() + value.len(), + DBOp::UpdateRefcount { key, value, .. } => key.len() + value.len(), + DBOp::Delete { key, .. } => key.len(), + DBOp::DeleteAll { .. } => 0, + DBOp::DeleteRange { from, to, .. } => from.len() + to.len(), + } + } } impl std::fmt::Debug for DBOp { diff --git a/core/store/src/db/colddb.rs b/core/store/src/db/colddb.rs index 757c4bcd63e..c69a6ce7e7e 100644 --- a/core/store/src/db/colddb.rs +++ b/core/store/src/db/colddb.rs @@ -150,10 +150,13 @@ fn adjust_op(op: &mut DBOp) -> bool { } }; } - DBOp::Delete { col, key } => { - log_assert_fail!("Unexpected delete from {col} in cold store: {key:?}"); - false - } + DBOp::Delete { col, key } => match col { + DBCol::BlockMisc => true, + _ => { + log_assert_fail!("Unexpected delete from {col} in cold store: {key:?}"); + false + } + }, DBOp::DeleteAll { col } => { log_assert_fail!("Unexpected delete from {col} in cold store"); false diff --git a/core/store/src/db/mixeddb.rs b/core/store/src/db/mixeddb.rs new file mode 100644 index 00000000000..244c2544a78 --- /dev/null +++ b/core/store/src/db/mixeddb.rs @@ -0,0 +1,141 @@ +use std::io; +use std::sync::Arc; + +use crate::db::{DBIterator, DBSlice, DBTransaction, Database, StoreStatistics}; +use crate::DBCol; + +#[allow(dead_code)] +#[derive(Clone, Copy, Debug)] +pub enum ReadOrder { + ReadDBFirst, + WriteDBFirst, +} + +/// MixedDB allows to have dedicated read-only db, and specify the order of data retrieval. +/// With `ReadOrder::ReadDBFirst` you can overwrite some information in DB, without actually modifying it. +/// With `ReadOrder::WriteDBFirst` you can record results of any operations only in a separate DB. +/// This way you can conduct several experiments on data, without having to create checkpoints or restoring data. +/// And you can compare data from different experiments faster, as your write DB will be smaller. +/// +/// SplitDB can be considered a `MixedDB { read_db: cold_db, write_db: hot_db, read_order: ReadOrder::WriteDBFirst }` +/// But it also has several assertions about types of columns we can retrieve from `read_db`, +/// And it is suitable for production, +/// +/// MixedDB is designed to be used with neard tools, and is not planned for integration into production. +pub struct MixedDB { + /// Read-only DB. + read_db: Arc, + /// DB for writes. + write_db: Arc, + /// order of data lookup. + read_order: ReadOrder, +} + +impl MixedDB { + #[allow(dead_code)] + pub fn new( + read_db: Arc, + write_db: Arc, + read_order: ReadOrder, + ) -> Arc { + return Arc::new(MixedDB { read_db, write_db, read_order }); + } + + /// Return the first DB in the order of data lookup + fn first_db(&self) -> &Arc { + match self.read_order { + ReadOrder::ReadDBFirst => &self.read_db, + ReadOrder::WriteDBFirst => &self.write_db, + } + } + + /// Return the second DB in the order of data lookup + fn second_db(&self) -> &Arc { + match self.read_order { + ReadOrder::ReadDBFirst => &self.write_db, + ReadOrder::WriteDBFirst => &self.read_db, + } + } + + /// This function is imported from SplitDB, but we may want to refactor them later. + fn merge_iter<'a>(a: DBIterator<'a>, b: DBIterator<'a>) -> DBIterator<'a> { + crate::db::SplitDB::merge_iter(a, b) + } +} + +impl Database for MixedDB { + fn get_raw_bytes(&self, col: DBCol, key: &[u8]) -> io::Result>> { + if let Some(first_result) = self.first_db().get_raw_bytes(col, key)? { + return Ok(Some(first_result)); + } + self.second_db().get_raw_bytes(col, key) + } + + fn get_with_rc_stripped(&self, col: DBCol, key: &[u8]) -> io::Result>> { + assert!(col.is_rc()); + + if let Some(first_result) = self.first_db().get_with_rc_stripped(col, key)? { + return Ok(Some(first_result)); + } + self.second_db().get_with_rc_stripped(col, key) + } + + fn iter<'a>(&'a self, col: DBCol) -> DBIterator<'a> { + Self::merge_iter(self.read_db.iter(col), self.write_db.iter(col)) + } + + fn iter_prefix<'a>(&'a self, col: DBCol, key_prefix: &'a [u8]) -> DBIterator<'a> { + return Self::merge_iter( + self.first_db().iter_prefix(col, key_prefix), + self.second_db().iter_prefix(col, key_prefix), + ); + } + + fn iter_range<'a>( + &'a self, + col: DBCol, + lower_bound: Option<&[u8]>, + upper_bound: Option<&[u8]>, + ) -> DBIterator<'a> { + return Self::merge_iter( + self.first_db().iter_range(col, lower_bound, upper_bound), + self.second_db().iter_range(col, lower_bound, upper_bound), + ); + } + + fn iter_raw_bytes<'a>(&'a self, col: DBCol) -> DBIterator<'a> { + return Self::merge_iter( + self.first_db().iter_raw_bytes(col), + self.second_db().iter_raw_bytes(col), + ); + } + + fn write(&self, batch: DBTransaction) -> io::Result<()> { + self.write_db.write(batch) + } + + /// There is no need to flush a read-only DB. + fn flush(&self) -> io::Result<()> { + self.write_db.flush() + } + + /// There is no need to compact an immutable DB. + fn compact(&self) -> io::Result<()> { + self.write_db.compact() + } + + /// Write DB actually has real changes, + /// so exporting it's statistics is a reasonable request. + fn get_store_statistics(&self) -> Option { + self.write_db.get_store_statistics() + } + + /// There is no need to create checkpoint of an immutable DB. + fn create_checkpoint( + &self, + path: &std::path::Path, + columns_to_keep: Option<&[DBCol]>, + ) -> anyhow::Result<()> { + self.write_db.create_checkpoint(path, columns_to_keep) + } +} diff --git a/core/store/src/db/refcount.rs b/core/store/src/db/refcount.rs index c7844220003..0ecef4e7bb2 100644 --- a/core/store/src/db/refcount.rs +++ b/core/store/src/db/refcount.rs @@ -97,8 +97,8 @@ pub(crate) fn add_positive_refcount(data: &[u8], rc: std::num::NonZeroU32) -> Ve /// Returns empty value with encoded negative reference count. /// /// `rc` gives the absolute value of the reference count. -pub(crate) fn encode_negative_refcount(rc: std::num::NonZeroU32) -> Vec { - (-i64::from(rc.get())).to_le_bytes().to_vec() +pub(crate) fn encode_negative_refcount(rc: std::num::NonZeroU32) -> [u8; 8] { + (-i64::from(rc.get())).to_le_bytes() } /// Merge reference counted values together. diff --git a/core/store/src/db/rocksdb.rs b/core/store/src/db/rocksdb.rs index d3c83a1fb0a..aa37575d457 100644 --- a/core/store/src/db/rocksdb.rs +++ b/core/store/src/db/rocksdb.rs @@ -293,10 +293,55 @@ impl RocksDB { pub fn compact_column(&self, col: DBCol) -> io::Result<()> { let none = Option::<&[u8]>::None; - tracing::info!(target: "db", column = %col, "Compact column"); + tracing::info!(target: "store::db::rocksdb", col = %col, "RocksDB::compact_column"); self.db.compact_range_cf(self.cf_handle(col)?, none, none); Ok(()) } + + #[tracing::instrument( + target = "store::db::rocksdb", + level = "trace", + "RocksDB::build_write_batch", + skip_all, + fields(transaction.ops.len = transaction.ops.len()), + )] + fn build_write_batch(&self, transaction: DBTransaction) -> io::Result { + let mut batch = WriteBatch::default(); + for op in transaction.ops { + match op { + DBOp::Set { col, key, value } => { + batch.put_cf(self.cf_handle(col)?, key, value); + } + DBOp::Insert { col, key, value } => { + if cfg!(debug_assertions) { + if let Ok(Some(old_value)) = self.get_raw_bytes(col, &key) { + super::assert_no_overwrite(col, &key, &value, &*old_value) + } + } + batch.put_cf(self.cf_handle(col)?, key, value); + } + DBOp::UpdateRefcount { col, key, value } => { + batch.merge_cf(self.cf_handle(col)?, key, value); + } + DBOp::Delete { col, key } => { + batch.delete_cf(self.cf_handle(col)?, key); + } + DBOp::DeleteAll { col } => { + let cf_handle = self.cf_handle(col)?; + let range = self.get_cf_key_range(cf_handle).map_err(io::Error::other)?; + if let Some(range) = range { + batch.delete_range_cf(cf_handle, range.start(), range.end()); + // delete_range_cf deletes ["begin_key", "end_key"), so need one more delete + batch.delete_cf(cf_handle, range.end()) + } + } + DBOp::DeleteRange { col, from, to } => { + batch.delete_range_cf(self.cf_handle(col)?, from, to); + } + } + } + Ok(batch) + } } impl Database for RocksDB { @@ -336,44 +381,33 @@ impl Database for RocksDB { refcount::iter_with_rc_logic(col, iter) } + #[tracing::instrument( + target = "store::db::rocksdb", + level = "trace", + "RocksDB::write", + skip_all + )] fn write(&self, transaction: DBTransaction) -> io::Result<()> { - let mut batch = WriteBatch::default(); - for op in transaction.ops { - match op { - DBOp::Set { col, key, value } => { - batch.put_cf(self.cf_handle(col)?, key, value); - } - DBOp::Insert { col, key, value } => { - if cfg!(debug_assertions) { - if let Ok(Some(old_value)) = self.get_raw_bytes(col, &key) { - super::assert_no_overwrite(col, &key, &value, &*old_value) - } - } - batch.put_cf(self.cf_handle(col)?, key, value); - } - DBOp::UpdateRefcount { col, key, value } => { - batch.merge_cf(self.cf_handle(col)?, key, value); - } - DBOp::Delete { col, key } => { - batch.delete_cf(self.cf_handle(col)?, key); - } - DBOp::DeleteAll { col } => { - let cf_handle = self.cf_handle(col)?; - let range = self.get_cf_key_range(cf_handle).map_err(io::Error::other)?; - if let Some(range) = range { - batch.delete_range_cf(cf_handle, range.start(), range.end()); - // delete_range_cf deletes ["begin_key", "end_key"), so need one more delete - batch.delete_cf(cf_handle, range.end()) - } - } - DBOp::DeleteRange { col, from, to } => { - batch.delete_range_cf(self.cf_handle(col)?, from, to); - } - } + let write_batch_start = std::time::Instant::now(); + let batch = self.build_write_batch(transaction)?; + let elapsed = write_batch_start.elapsed(); + if elapsed.as_secs_f32() > 0.15 { + tracing::warn!( + target = "store::db::rocksdb", + message = "making a write batch took a very long time, make smaller transactions!", + ?elapsed, + backtrace = %std::backtrace::Backtrace::force_capture() + ); } self.db.write(batch).map_err(io::Error::other) } + #[tracing::instrument( + target = "store::db::rocksdb", + level = "info", + "RocksDB::compact", + skip_all + )] fn compact(&self) -> io::Result<()> { for col in DBCol::iter() { self.compact_column(col)?; @@ -381,6 +415,12 @@ impl Database for RocksDB { Ok(()) } + #[tracing::instrument( + target = "store::db::rocksdb", + level = "debug", + "RocksDB::flush", + skip_all + )] fn flush(&self) -> io::Result<()> { // Need to iterator over all CFs because the normal `flush()` only // flushes the default column family. @@ -408,13 +448,18 @@ impl Database for RocksDB { } } + #[tracing::instrument( + target = "store::db::rocksdb", + level = "debug", + "RocksDB::create_checkpoint", + skip_all, + fields(path = %path.display()), + )] fn create_checkpoint( &self, path: &std::path::Path, columns_to_keep: Option<&[DBCol]>, ) -> anyhow::Result<()> { - let _span = - tracing::info_span!(target: "state_snapshot", "create_checkpoint", ?path).entered(); let cp = ::rocksdb::checkpoint::Checkpoint::new(&self.db)?; cp.create_checkpoint(path) .with_context(|| format!("failed to create checkpoint at {}", path.display()))?; @@ -433,7 +478,7 @@ impl Database for RocksDB { // We need to keep DbVersion because it's expected to be there when // we check the metadata in DBOpener::get_metadata() tracing::debug!( - target: "store", + target: "store::db::rocksdb", "create_checkpoint called with columns to keep not including DBCol::DbVersion. Including it anyway." ); continue; diff --git a/core/store/src/db/splitdb.rs b/core/store/src/db/splitdb.rs index 46cdf469bdb..4586cf1b8b5 100644 --- a/core/store/src/db/splitdb.rs +++ b/core/store/src/db/splitdb.rs @@ -35,7 +35,7 @@ impl SplitDB { /// implements total order on values but always compares the error on the /// left as lesser. This isn't even partial order. It is fine for merging /// lists but should not be used for anything more complex like sorting. - fn db_iter_item_cmp(a: &DBIteratorItem, b: &DBIteratorItem) -> Ordering { + pub(crate) fn db_iter_item_cmp(a: &DBIteratorItem, b: &DBIteratorItem) -> Ordering { match (a, b) { // Always put errors first. (Err(_), _) => Ordering::Less, @@ -52,7 +52,7 @@ impl SplitDB { /// iterator will contain unique and sorted items from both input iterators. /// /// All errors from both inputs will be returned. - fn merge_iter<'a>(a: DBIterator<'a>, b: DBIterator<'a>) -> DBIterator<'a> { + pub(crate) fn merge_iter<'a>(a: DBIterator<'a>, b: DBIterator<'a>) -> DBIterator<'a> { // Merge the two iterators using the cmp function. The result will be an // iter of EitherOrBoth. let iter = itertools::merge_join_by(a, b, Self::db_iter_item_cmp); diff --git a/core/store/src/flat/store_helper.rs b/core/store/src/flat/store_helper.rs index ea5b1c6f0da..aa3faac386a 100644 --- a/core/store/src/flat/store_helper.rs +++ b/core/store/src/flat/store_helper.rs @@ -178,7 +178,7 @@ pub fn set_flat_state_values_inlining_migration_status( }) } -pub(crate) fn get_flat_state_value( +pub fn get_flat_state_value( store: &Store, shard_uid: ShardUId, key: &[u8], diff --git a/core/store/src/lib.rs b/core/store/src/lib.rs index abfd24d74c9..96ece091992 100644 --- a/core/store/src/lib.rs +++ b/core/store/src/lib.rs @@ -251,7 +251,7 @@ impl NodeStorage { Ok(match metadata::DbMetadata::read(self.hot_storage.as_ref())?.kind.unwrap() { metadata::DbKind::RPC => false, metadata::DbKind::Archive => true, - metadata::DbKind::Hot | metadata::DbKind::Cold => todo!(), + metadata::DbKind::Hot | metadata::DbKind::Cold => true, }) } @@ -360,6 +360,15 @@ impl Store { /// Loads state (`State` and `FlatState` columns) from given file. /// /// See [`Self::save_state_to_file`] for description of the file format. + #[tracing::instrument( + level = "info", + // FIXME: start moving things into tighter modules so that its easier to selectively trace + // specific things. + target = "store", + "Store::load_state_from_file", + skip_all, + fields(filename = %filename.display()) + )] pub fn load_state_from_file(&self, filename: &Path) -> io::Result<()> { let file = File::open(filename)?; let mut file = std::io::BufReader::new(file); @@ -500,7 +509,7 @@ impl StoreUpdate { ) { assert!(column.is_rc(), "can't update refcount: {column}"); let value = refcount::encode_negative_refcount(decrease); - self.transaction.update_refcount(column, key.to_vec(), value) + self.transaction.update_refcount(column, key.to_vec(), value.to_vec()) } /// Same as `self.decrement_refcount_by(column, key, 1)`. @@ -574,6 +583,24 @@ impl StoreUpdate { self.transaction.merge(other.transaction) } + #[tracing::instrument( + level = "trace", + target = "store::update", + // FIXME: start moving things into tighter modules so that its easier to selectively trace + // specific things. + "StoreUpdate::commit", + skip_all, + fields( + transaction.ops.len = self.transaction.ops.len(), + total_bytes, + inserts, + sets, + rc_ops, + deletes, + delete_all_ops, + delete_range_ops + ) + )] pub fn commit(self) -> io::Result<()> { debug_assert!( { @@ -596,26 +623,76 @@ impl StoreUpdate { "Transaction overwrites itself: {:?}", self ); - let _span = tracing::trace_span!(target: "store", "commit").entered(); - for op in &self.transaction.ops { - match op { - DBOp::Insert { col, key, value } => { - tracing::trace!(target: "store", db_op = "insert", col = %col, key = %StorageKey(key), size = value.len(), value = %AbbrBytes(value),) - } - DBOp::Set { col, key, value } => { - tracing::trace!(target: "store", db_op = "set", col = %col, key = %StorageKey(key), size = value.len(), value = %AbbrBytes(value)) - } - DBOp::UpdateRefcount { col, key, value } => { - tracing::trace!(target: "store", db_op = "update_rc", col = %col, key = %StorageKey(key), size = value.len(), value = %AbbrBytes(value)) - } - DBOp::Delete { col, key } => { - tracing::trace!(target: "store", db_op = "delete", col = %col, key = %StorageKey(key)) - } - DBOp::DeleteAll { col } => { - tracing::trace!(target: "store", db_op = "delete_all", col = %col) - } - DBOp::DeleteRange { col, from, to } => { - tracing::trace!(target: "store", db_op = "delete_range", col = %col, from = %StorageKey(from), to = %StorageKey(to)) + let span = tracing::Span::current(); + if !span.is_disabled() { + let [mut insert_count, mut set_count, mut update_rc_count] = [0u64; 3]; + let [mut delete_count, mut delete_all_count, mut delete_range_count] = [0u64; 3]; + let mut total_bytes = 0; + for op in &self.transaction.ops { + total_bytes += op.bytes(); + let count = match op { + DBOp::Set { .. } => &mut set_count, + DBOp::Insert { .. } => &mut insert_count, + DBOp::UpdateRefcount { .. } => &mut update_rc_count, + DBOp::Delete { .. } => &mut delete_count, + DBOp::DeleteAll { .. } => &mut delete_all_count, + DBOp::DeleteRange { .. } => &mut delete_range_count, + }; + *count += 1; + } + span.record("inserts", insert_count); + span.record("sets", set_count); + span.record("rc_ops", update_rc_count); + span.record("deletes", delete_count); + span.record("delete_all_ops", delete_all_count); + span.record("delete_range_ops", delete_range_count); + span.record("total_bytes", total_bytes); + } + if tracing::event_enabled!(target: "store::update::transactions", tracing::Level::TRACE) { + for op in &self.transaction.ops { + match op { + DBOp::Insert { col, key, value } => tracing::trace!( + target: "store::update::transactions", + db_op = "insert", + %col, + key = %StorageKey(key), + size = value.len(), + value = %AbbrBytes(value), + ), + DBOp::Set { col, key, value } => tracing::trace!( + target: "store::update::transactions", + db_op = "set", + %col, + key = %StorageKey(key), + size = value.len(), + value = %AbbrBytes(value) + ), + DBOp::UpdateRefcount { col, key, value } => tracing::trace!( + target: "store::update::transactions", + db_op = "update_rc", + %col, + key = %StorageKey(key), + size = value.len(), + value = %AbbrBytes(value) + ), + DBOp::Delete { col, key } => tracing::trace!( + target: "store::update::transactions", + db_op = "delete", + %col, + key = %StorageKey(key) + ), + DBOp::DeleteAll { col } => tracing::trace!( + target: "store::update::transactions", + db_op = "delete_all", + %col + ), + DBOp::DeleteRange { col, from, to } => tracing::trace!( + target: "store::update::transactions", + db_op = "delete_range", + %col, + from = %StorageKey(from), + to = %StorageKey(to) + ), } } } diff --git a/core/store/src/opener.rs b/core/store/src/opener.rs index 1569f66d438..cdcdd81b573 100644 --- a/core/store/src/opener.rs +++ b/core/store/src/opener.rs @@ -127,7 +127,7 @@ impl From for StoreOpenerError { fn get_default_kind(archive: bool, temp: Temperature) -> DbKind { match (temp, archive) { (Temperature::Hot, false) => DbKind::RPC, - (Temperature::Hot, true) => DbKind::Archive, + (Temperature::Hot, true) => DbKind::Hot, (Temperature::Cold, _) => DbKind::Cold, } } diff --git a/core/store/src/test_utils.rs b/core/store/src/test_utils.rs index 5004b81c68a..3aab48d8d6f 100644 --- a/core/store/src/test_utils.rs +++ b/core/store/src/test_utils.rs @@ -4,13 +4,13 @@ use crate::flat::{ }; use crate::metadata::{DbKind, DbVersion, DB_VERSION}; use crate::{ - get, get_delayed_receipt_indices, DBCol, NodeStorage, ShardTries, StateSnapshotConfig, Store, - TrieConfig, + get, get_delayed_receipt_indices, get_promise_yield_indices, DBCol, NodeStorage, ShardTries, + StateSnapshotConfig, Store, TrieConfig, }; use itertools::Itertools; use near_primitives::account::id::AccountId; use near_primitives::hash::CryptoHash; -use near_primitives::receipt::{DataReceipt, Receipt, ReceiptEnum}; +use near_primitives::receipt::{DataReceipt, PromiseYieldTimeout, Receipt, ReceiptEnum}; use near_primitives::shard_layout::{ShardUId, ShardVersion}; use near_primitives::state::FlatStateValue; use near_primitives::trie_key::TrieKey; @@ -94,8 +94,8 @@ impl TestTriesBuilder { self } - pub fn with_flat_storage(mut self) -> Self { - self.enable_flat_storage = true; + pub fn with_flat_storage(mut self, enable: bool) -> Self { + self.enable_flat_storage = enable; self } @@ -275,6 +275,19 @@ pub fn gen_receipts(rng: &mut impl Rng, max_size: usize) -> Vec { .collect() } +pub fn gen_timeouts(rng: &mut impl Rng, max_size: usize) -> Vec { + let alphabet = gen_alphabet(); + let accounts = gen_accounts_from_alphabet(rng, 1, max_size, &alphabet); + accounts + .iter() + .map(|account_id| PromiseYieldTimeout { + account_id: account_id.clone(), + data_id: CryptoHash::default(), + expires_at: 0, + }) + .collect() +} + /// Generates up to max_size random sequence of changes: both insertion and deletions. /// Deletions are represented as (key, None). /// Keys are randomly constructed from alphabet, and they have max_length size. @@ -353,3 +366,21 @@ pub fn get_all_delayed_receipts( } receipts } + +pub fn get_all_promise_yield_timeouts( + tries: &ShardTries, + shard_uid: &ShardUId, + state_root: &StateRoot, +) -> Vec { + let state_update = &tries.new_trie_update(*shard_uid, *state_root); + let mut promise_yield_indices = get_promise_yield_indices(state_update).unwrap(); + + let mut timeouts = vec![]; + while promise_yield_indices.first_index < promise_yield_indices.next_available_index { + let key = TrieKey::PromiseYieldTimeout { index: promise_yield_indices.first_index }; + let timeout = get(state_update, &key).unwrap().unwrap(); + promise_yield_indices.first_index += 1; + timeouts.push(timeout); + } + timeouts +} diff --git a/core/store/src/trie/config.rs b/core/store/src/trie/config.rs index 0df26551840..aff4685deee 100644 --- a/core/store/src/trie/config.rs +++ b/core/store/src/trie/config.rs @@ -1,4 +1,4 @@ -use crate::config::TrieCacheConfig; +use crate::config::{PrefetchConfig, TrieCacheConfig}; use crate::StoreConfig; use near_primitives::shard_layout::ShardUId; use near_primitives::types::AccountId; @@ -31,6 +31,9 @@ pub struct TrieConfig { pub sweat_prefetch_receivers: Vec, /// List of allowed predecessor accounts for SWEAT prefetching. pub sweat_prefetch_senders: Vec, + pub claim_sweat_prefetch_config: Vec, + pub kaiching_prefetch_config: Vec, + /// List of shards we will load into memory. pub load_mem_tries_for_shards: Vec, /// Whether mem-trie should be loaded for each tracked shard. @@ -58,6 +61,9 @@ impl TrieConfig { Err(e) => error!(target: "config", "invalid account id {account}: {e}"), } } + this.claim_sweat_prefetch_config = config.claim_sweat_prefetch_config.clone(); + this.kaiching_prefetch_config = config.kaiching_prefetch_config.clone(); + this.load_mem_tries_for_shards = config.load_mem_tries_for_shards.clone(); this.load_mem_tries_for_tracked_shards = config.load_mem_tries_for_tracked_shards; @@ -79,4 +85,13 @@ impl TrieConfig { pub fn deletions_queue_capacity(&self) -> usize { self.shard_cache_config.shard_cache_deletions_queue_capacity } + + /// Checks if any of prefetching related configs was enabled. + pub fn prefetch_enabled(&self) -> bool { + self.enable_receipt_prefetching + || (!self.sweat_prefetch_receivers.is_empty() + && !self.sweat_prefetch_senders.is_empty()) + || !self.claim_sweat_prefetch_config.is_empty() + || !self.kaiching_prefetch_config.is_empty() + } } diff --git a/core/store/src/trie/insert_delete.rs b/core/store/src/trie/insert_delete.rs index 57c7fa05be7..af460c0ce88 100644 --- a/core/store/src/trie/insert_delete.rs +++ b/core/store/src/trie/insert_delete.rs @@ -320,6 +320,7 @@ impl Trie { let mut partial = partial; let root_node = handle; let mut path: Vec = Vec::new(); + let mut key_deleted = true; loop { path.push(handle); let TrieNodeWithSize { node, memory_usage } = memory.destroy(handle); @@ -327,6 +328,7 @@ impl Trie { match node { TrieNode::Empty => { memory.store_at(handle, TrieNodeWithSize::empty()); + key_deleted = false; break; } TrieNode::Leaf(key, value) => { @@ -338,25 +340,33 @@ impl Trie { let leaf_node = TrieNode::Leaf(key, value); let memory_usage = leaf_node.memory_usage_direct(memory); memory.store_at(handle, TrieNodeWithSize::new(leaf_node, memory_usage)); + key_deleted = false; break; } } TrieNode::Branch(mut children, value) => { if partial.is_empty() { - if let Some(value) = &value { - self.delete_value(memory, value)?; - } - if children.iter().count() == 0 { - memory.store_at(handle, TrieNodeWithSize::empty()); - } else { - Trie::calc_memory_usage_and_store( - memory, + if value.is_none() { + // Key being deleted doesn't exist. + memory.store_at( handle, - children_memory_usage, - TrieNode::Branch(children, None), - None, + TrieNodeWithSize::new( + TrieNode::Branch(children, value), + memory_usage, + ), ); + key_deleted = false; + break; } + self.delete_value(memory, &value.unwrap())?; + Trie::calc_memory_usage_and_store( + memory, + handle, + children_memory_usage, + TrieNode::Branch(children, None), + None, + ); + // if needed, branch will be squashed at the end of the function. break; } else { let child = &mut children[partial.at(0)]; @@ -386,6 +396,7 @@ impl Trie { memory_usage, ), ); + key_deleted = false; break; } } @@ -415,71 +426,85 @@ impl Trie { handle, TrieNodeWithSize::new(TrieNode::Extension(key, child), memory_usage), ); + key_deleted = false; break; } } } } - self.fix_nodes(memory, path)?; + self.fix_nodes(memory, path, key_deleted)?; Ok(root_node) } + /// Iterates over nodes in `path`, changing their types where needed, + /// if `key_deleted` is true, so trie structure has to change. + /// If `key_deleted` is false, only recomputes memory usages along the path. fn fix_nodes( &self, memory: &mut NodesStorage, path: Vec, + key_deleted: bool, ) -> Result<(), StorageError> { let mut child_memory_usage = 0; for handle in path.into_iter().rev() { let TrieNodeWithSize { node, memory_usage } = memory.destroy(handle); let memory_usage = memory_usage + child_memory_usage; - match node { - TrieNode::Empty => { - memory.store_at(handle, TrieNodeWithSize::empty()); - } - TrieNode::Leaf(key, value) => { - memory.store_at( - handle, - TrieNodeWithSize::new(TrieNode::Leaf(key, value), memory_usage), - ); - } - TrieNode::Branch(mut children, value) => { - for child in children.0.iter_mut() { - if let Some(NodeHandle::InMemory(h)) = child { - if let TrieNode::Empty = memory.node_ref(*h).node { - *child = None + if key_deleted { + match node { + TrieNode::Empty => { + memory.store_at(handle, TrieNodeWithSize::empty()); + } + TrieNode::Leaf(key, value) => { + memory.store_at( + handle, + TrieNodeWithSize::new(TrieNode::Leaf(key, value), memory_usage), + ); + } + TrieNode::Branch(mut children, value) => { + for child in children.0.iter_mut() { + if let Some(NodeHandle::InMemory(h)) = child { + if let TrieNode::Empty = memory.node_ref(*h).node { + *child = None + } } } - } - let num_children = children.iter().count(); - if num_children == 0 { - if let Some(value) = value { - let empty = NibbleSlice::new(&[]).encoded(true).into_vec(); - let leaf_node = TrieNode::Leaf(empty, value); - let memory_usage = leaf_node.memory_usage_direct(memory); - memory.store_at(handle, TrieNodeWithSize::new(leaf_node, memory_usage)); + let num_children = children.iter().count(); + if num_children == 0 { + if let Some(value) = value { + let empty = NibbleSlice::new(&[]).encoded(true).into_vec(); + let leaf_node = TrieNode::Leaf(empty, value); + let memory_usage = leaf_node.memory_usage_direct(memory); + memory.store_at( + handle, + TrieNodeWithSize::new(leaf_node, memory_usage), + ); + } else { + memory.store_at(handle, TrieNodeWithSize::empty()); + } + } else if num_children == 1 && value.is_none() { + // Branch with one child becomes extension + // Extension followed by leaf becomes leaf + // Extension followed by extension becomes extension + let idx = children.iter().next().unwrap().0; + let child = children[idx].take().unwrap(); + let key = NibbleSlice::new(&[(idx << 4) as u8]) + .encoded_leftmost(1, false) + .into_vec(); + self.fix_extension_node(memory, handle, key, child)?; } else { - memory.store_at(handle, TrieNodeWithSize::empty()); + let node = TrieNodeWithSize::new( + TrieNode::Branch(children, value), + memory_usage, + ); + memory.store_at(handle, node); } - } else if num_children == 1 && value.is_none() { - // Branch with one child becomes extension - // Extension followed by leaf becomes leaf - // Extension followed by extension becomes extension - let idx = children.iter().next().unwrap().0; - let child = children[idx].take().unwrap(); - let key = NibbleSlice::new(&[(idx << 4) as u8]) - .encoded_leftmost(1, false) - .into_vec(); + } + TrieNode::Extension(key, child) => { self.fix_extension_node(memory, handle, key, child)?; - } else { - let node = - TrieNodeWithSize::new(TrieNode::Branch(children, value), memory_usage); - memory.store_at(handle, node); } } - TrieNode::Extension(key, child) => { - self.fix_extension_node(memory, handle, key, child)?; - } + } else { + memory.store_at(handle, TrieNodeWithSize { node, memory_usage }); } child_memory_usage = memory.node_ref(handle).memory_usage; } @@ -534,6 +559,7 @@ impl Trie { Ok(()) } + #[tracing::instrument(level = "debug", target = "store::trie", "Trie::flatten_nodes", skip_all)] pub(crate) fn flatten_nodes( old_root: &CryptoHash, memory: NodesStorage, @@ -562,16 +588,16 @@ impl Trie { new_children[i - 1] = Some(last_hash); } while i < 16 { - match children[i].clone() { + match &children[i] { Some(NodeHandle::InMemory(handle)) => { stack.push(( node, FlattenNodesCrumb::AtChild(new_children, i + 1), )); - stack.push((handle, FlattenNodesCrumb::Entering)); + stack.push((*handle, FlattenNodesCrumb::Entering)); continue 'outer; } - Some(NodeHandle::Hash(hash)) => new_children[i] = Some(hash), + Some(NodeHandle::Hash(hash)) => new_children[i] = Some(*hash), None => {} } i += 1; diff --git a/core/store/src/trie/iterator.rs b/core/store/src/trie/iterator.rs index 7c0ae5d4ffb..da0d386212a 100644 --- a/core/store/src/trie/iterator.rs +++ b/core/store/src/trie/iterator.rs @@ -299,7 +299,7 @@ impl<'a> TrieIterator<'a> { } // TODO(#9446) remove function when shifting to flat storage iteration for resharding - pub(crate) fn get_trie_items( + pub fn get_trie_items( &mut self, path_begin: &[u8], path_end: &[u8], diff --git a/core/store/src/trie/mem/loading.rs b/core/store/src/trie/mem/loading.rs index a14242f5c3b..9267c3f1028 100644 --- a/core/store/src/trie/mem/loading.rs +++ b/core/store/src/trie/mem/loading.rs @@ -197,7 +197,7 @@ mod tests { use rand::{Rng, SeedableRng}; fn check(keys: Vec>) { - let shard_tries = TestTriesBuilder::new().with_flat_storage().build(); + let shard_tries = TestTriesBuilder::new().with_flat_storage(true).build(); let shard_uid = ShardUId::single_shard(); let changes = keys.iter().map(|key| (key.to_vec(), Some(key.to_vec()))).collect::>(); let changes = simplify_changes(&changes); diff --git a/core/store/src/trie/mem/updating.rs b/core/store/src/trie/mem/updating.rs index 6360c2a3143..2eb3400a5f4 100644 --- a/core/store/src/trie/mem/updating.rs +++ b/core/store/src/trie/mem/updating.rs @@ -1306,6 +1306,7 @@ mod tests { key.push(nibble0 << 4 | nibble1); } } + let mut value_length = rand::thread_rng().gen_range(0..=10); if value_length == 10 { value_length = 8000; // make a long value that is not inlined diff --git a/core/store/src/trie/mod.rs b/core/store/src/trie/mod.rs index f8be9140c09..61960191b89 100644 --- a/core/store/src/trie/mod.rs +++ b/core/store/src/trie/mod.rs @@ -33,7 +33,7 @@ use near_primitives::types::{AccountId, StateRoot, StateRootNode}; use near_vm_runner::ContractCode; pub use raw_node::{Children, RawTrieNode, RawTrieNodeWithSize}; use std::cell::RefCell; -use std::collections::HashMap; +use std::collections::BTreeMap; use std::fmt::Write; use std::hash::Hash; use std::rc::Rc; @@ -80,7 +80,7 @@ pub struct TrieCosts { } /// Whether a key lookup will be performed through flat storage or through iterating the trie -#[derive(PartialEq, Eq)] +#[derive(Clone, Copy, PartialEq, Eq)] pub enum KeyLookupMode { FlatStorage, Trie, @@ -449,12 +449,12 @@ impl TrieRefcountSubtraction { /// Helps produce a list of additions and subtractions to the trie, /// especially in the case where deletions don't carry the full value. pub struct TrieRefcountDeltaMap { - map: HashMap>, i32)>, + map: BTreeMap>, i32)>, } impl TrieRefcountDeltaMap { pub fn new() -> Self { - Self { map: HashMap::new() } + Self { map: BTreeMap::new() } } pub fn add(&mut self, hash: CryptoHash, data: Vec, refcount: u32) { @@ -469,8 +469,9 @@ impl TrieRefcountDeltaMap { } pub fn into_changes(self) -> (Vec, Vec) { - let mut insertions = Vec::new(); - let mut deletions = Vec::new(); + let num_insertions = self.map.iter().filter(|(_h, (_v, rc))| *rc > 0).count(); + let mut insertions = Vec::with_capacity(num_insertions); + let mut deletions = Vec::with_capacity(self.map.len().saturating_sub(num_insertions)); for (hash, (value, rc)) in self.map.into_iter() { if rc > 0 { insertions.push(TrieRefcountAddition { @@ -666,6 +667,7 @@ impl Trie { self.flat_storage_chunk_view.clone(), ); trie.recorder = Some(RefCell::new(TrieRecorder::new())); + trie.charge_gas_for_trie_node_access = self.charge_gas_for_trie_node_access; trie } @@ -682,6 +684,15 @@ impl Trie { .unwrap_or_default() } + /// Size of the recorded state proof plus some additional size added to cover removals. + /// An upper-bound estimation of the true recorded size after finalization. + pub fn recorded_storage_size_upper_bound(&self) -> usize { + self.recorder + .as_ref() + .map(|recorder| recorder.borrow().recorded_storage_size_upper_bound()) + .unwrap_or_default() + } + /// Constructs a Trie from the partial storage (i.e. state proof) that /// was returned from recorded_storage(). If used to access the same trie /// nodes as when the partial storage was generated, this trie will behave @@ -741,9 +752,16 @@ impl Trie { #[cfg(test)] fn memory_usage_verify(&self, memory: &NodesStorage, handle: NodeHandle) -> u64 { + // Cannot compute memory usage naively if given only partial storage. + if self.storage.as_partial_storage().is_some() { + return 0; + } + // We don't want to impact recorded storage by retrieving nodes for + // this sanity check. if self.recorder.is_some() { return 0; } + let TrieNodeWithSize { node, memory_usage } = match handle { NodeHandle::InMemory(h) => memory.node_ref(h).clone(), NodeHandle::Hash(h) => self.retrieve_node(&h).expect("storage failure").1, @@ -1822,7 +1840,7 @@ mod tests { fn test_contains_key() { let sid = ShardUId::single_shard(); let bid = CryptoHash::default(); - let tries = TestTriesBuilder::new().with_flat_storage().build(); + let tries = TestTriesBuilder::new().with_flat_storage(true).build(); let initial = vec![ (vec![99, 44, 100, 58, 58, 49], Some(vec![1])), (vec![99, 44, 100, 58, 58, 50], Some(vec![1])), @@ -1916,7 +1934,9 @@ mod tests { let trie = tries.get_trie_for_shard(ShardUId::single_shard(), state_root); // Those known keys. - for (key, value) in trie_changes.into_iter().collect::>() { + for (key, value) in + trie_changes.into_iter().collect::>() + { if let Some(value) = value { let want = Some(Ok((key.clone(), value))); let mut iterator = trie.iter().unwrap(); diff --git a/core/store/src/trie/prefetching_trie_storage.rs b/core/store/src/trie/prefetching_trie_storage.rs index 43b8c558ada..8cb4b53264c 100644 --- a/core/store/src/trie/prefetching_trie_storage.rs +++ b/core/store/src/trie/prefetching_trie_storage.rs @@ -1,3 +1,4 @@ +use crate::config::PrefetchConfig; use crate::sync_utils::Monitor; use crate::{ metrics, DBCol, MissingTrieValueContext, StorageError, Store, Trie, TrieCache, @@ -77,11 +78,16 @@ pub struct PrefetchApi { /// multiple times. pub(crate) prefetching: PrefetchStagingArea, + store: Store, + shard_cache: TrieCache, + pub enable_receipt_prefetching: bool, /// Configured accounts will be prefetched as SWEAT token account, if predecessor is listed as receiver. pub sweat_prefetch_receivers: Vec, /// List of allowed predecessor accounts for SWEAT prefetching. pub sweat_prefetch_senders: Vec, + pub claim_sweat_prefetch_config: Vec, + pub kaiching_prefetch_config: Vec, pub shard_uid: ShardUId, } @@ -406,7 +412,8 @@ impl PrefetchApi { let sweat_prefetch_receivers = trie_config.sweat_prefetch_receivers.clone(); let sweat_prefetch_senders = trie_config.sweat_prefetch_senders.clone(); let enable_receipt_prefetching = trie_config.enable_receipt_prefetching; - + let claim_sweat_prefetch_config = trie_config.claim_sweat_prefetch_config.clone(); + let kaiching_prefetch_config = trie_config.kaiching_prefetch_config.clone(); let this = Self { work_queue_tx, work_queue_rx, @@ -414,18 +421,15 @@ impl PrefetchApi { enable_receipt_prefetching, sweat_prefetch_receivers, sweat_prefetch_senders, + claim_sweat_prefetch_config, + kaiching_prefetch_config, shard_uid, + store, + shard_cache, }; let (shutdown_tx, shutdown_rx) = crossbeam::channel::bounded(1); let handles = (0..NUM_IO_THREADS) - .map(|_| { - this.start_io_thread( - store.clone(), - shard_cache.clone(), - shard_uid, - shutdown_rx.clone(), - ) - }) + .map(|_| this.start_io_thread(shard_uid, shutdown_rx.clone())) .collect(); let handle = PrefetchingThreadsHandle { shutdown_channel: Some(shutdown_tx), handles }; (this, handle) @@ -442,15 +446,26 @@ impl PrefetchApi { }) } + pub fn make_storage(&self) -> Rc { + Rc::new(TriePrefetchingStorage::new( + self.store.clone(), + self.shard_uid, + self.shard_cache.clone(), + self.prefetching.clone(), + )) + } + pub fn start_io_thread( &self, - store: Store, - shard_cache: TrieCache, shard_uid: ShardUId, shutdown_rx: crossbeam::channel::Receiver<()>, ) -> thread::JoinHandle<()> { - let prefetcher_storage = - TriePrefetchingStorage::new(store, shard_uid, shard_cache, self.prefetching.clone()); + let prefetcher_storage = TriePrefetchingStorage::new( + self.store.clone(), + self.shard_uid, + self.shard_cache.clone(), + self.prefetching.clone(), + ); let work_queue = self.work_queue_rx.clone(); let metric_prefetch_sent = metrics::PREFETCH_SENT.with_label_values(&[&shard_uid.shard_id.to_string()]); @@ -476,13 +491,22 @@ impl PrefetchApi { Trie::new(Rc::new(prefetcher_storage.clone()), trie_root, None); let storage_key = trie_key.to_vec(); metric_prefetch_sent.inc(); - if let Ok(_maybe_value) = prefetcher_trie.get(&storage_key) { - near_o11y::io_trace!(count: "prefetch"); - } else { - // This may happen in rare occasions and can be ignored safely. - // See comments in `TriePrefetchingStorage::retrieve_raw_bytes`. - near_o11y::io_trace!(count: "prefetch_failure"); - metric_prefetch_fail.inc(); + match prefetcher_trie.get(&storage_key) { + Ok(_maybe_value) => { + near_o11y::io_trace!(count: "prefetch"); + } + Err(e) => { + tracing::debug!( + target: "store::trie::prefetch", + message = "prefetching failure", + error = %e, + key = ?trie_key + ); + // This may happen in rare occasions and can be ignored safely. + // See comments in `TriePrefetchingStorage::retrieve_raw_bytes`. + near_o11y::io_trace!(count: "prefetch_failure"); + metric_prefetch_fail.inc(); + } } } } @@ -494,8 +518,12 @@ impl PrefetchApi { /// /// Queued up work will not be finished. But trie keys that are already /// being fetched will finish. - pub fn clear_queue(&self) { - while let Ok(_dropped) = self.work_queue_rx.try_recv() {} + pub fn clear_queue(&self) -> usize { + let mut count = 0; + while let Ok(_dropped) = self.work_queue_rx.try_recv() { + count += 1; + } + count } /// Clear prefetched staging area from data that has not been picked up by the main thread. diff --git a/core/store/src/trie/resharding.rs b/core/store/src/trie/resharding.rs index baa46e7a756..0d71dcec6ac 100644 --- a/core/store/src/trie/resharding.rs +++ b/core/store/src/trie/resharding.rs @@ -1,10 +1,13 @@ use crate::flat::FlatStateChanges; -use crate::{get, get_delayed_receipt_indices, set, ShardTries, StoreUpdate, Trie, TrieUpdate}; +use crate::{ + get, get_delayed_receipt_indices, get_promise_yield_indices, set, ShardTries, StoreUpdate, + Trie, TrieUpdate, +}; use borsh::BorshDeserialize; use bytesize::ByteSize; use near_primitives::account::id::AccountId; use near_primitives::errors::StorageError; -use near_primitives::receipt::Receipt; +use near_primitives::receipt::{PromiseYieldTimeout, Receipt}; use near_primitives::shard_layout::ShardUId; use near_primitives::state_part::PartId; use near_primitives::trie_key::trie_key_parsers::parse_account_id_from_raw_key; @@ -39,7 +42,8 @@ impl ShardTries { account_id_to_shard_uid: &dyn Fn(&AccountId) -> ShardUId, ) -> Result, StorageError> { let mut trie_updates: HashMap<_, _> = self.get_trie_updates(state_roots); - let mut insert_receipts = Vec::new(); + let mut inserted_receipts = Vec::new(); + let mut inserted_timeouts = Vec::new(); for ConsolidatedStateChange { trie_key, value } in changes.changes { match &trie_key { TrieKey::DelayedReceiptIndices => {} @@ -51,13 +55,36 @@ impl ShardTries { value, err, )) })?; - insert_receipts.push((*index, receipt)); + // Accumulate insertions so that they can be sorted by index and applied to + // the child tries in the correct order. + inserted_receipts.push((*index, receipt)); + } + None => { + // For DelayedReceipt deletions we cannot infer the account information + // from the state change. Instead, the erased receipts are passed directly + // in `changes.processed_delayed_receipts`. } - None => {} }, TrieKey::PromiseYieldIndices => {} - TrieKey::PromiseYieldTimeout { .. } => todo!(), - TrieKey::PromiseYieldReceipt { .. } => todo!(), + TrieKey::PromiseYieldTimeout { index } => match value { + Some(value) => { + let timeout = + PromiseYieldTimeout::try_from_slice(&value).map_err(|err| { + StorageError::StorageInconsistentState(format!( + "invalid PromiseYield queue entry {:?}, err: {}", + value, err, + )) + })?; + // Accumulate insertions so that they can be sorted by index and applied to + // the child tries in the correct order. + inserted_timeouts.push((*index, timeout)); + } + None => { + // For PromiseYieldTimeout deletions we cannot infer the account information + // from the state change. Instead, the erased timeouts are passed directly + // in `changes.processed_yield_timeouts`. + } + }, TrieKey::Account { account_id } | TrieKey::ContractCode { account_id } | TrieKey::AccessKey { account_id, .. } @@ -65,6 +92,7 @@ impl ShardTries { | TrieKey::PostponedReceiptId { receiver_id: account_id, .. } | TrieKey::PendingDataCount { receiver_id: account_id, .. } | TrieKey::PostponedReceipt { receiver_id: account_id, .. } + | TrieKey::PromiseYieldReceipt { receiver_id: account_id, .. } | TrieKey::ContractData { account_id, .. } => { let new_shard_uid = account_id_to_shard_uid(account_id); // we can safely unwrap here because the caller of this function guarantees trie_updates @@ -84,18 +112,26 @@ impl ShardTries { update.commit(StateChangeCause::Resharding); } - insert_receipts.sort_by_key(|it| it.0); - - let insert_receipts: Vec<_> = - insert_receipts.into_iter().map(|(_, receipt)| receipt).collect(); - + inserted_receipts.sort_by_key(|it| it.0); + let inserted_receipts: Vec<_> = + inserted_receipts.into_iter().map(|(_, receipt)| receipt).collect(); apply_delayed_receipts_to_children_states_impl( &mut trie_updates, - &insert_receipts, + &inserted_receipts, &changes.processed_delayed_receipts, account_id_to_shard_uid, )?; + inserted_timeouts.sort_by_key(|it| it.0); + let inserted_timeouts: Vec<_> = + inserted_timeouts.into_iter().map(|(_, timeout)| timeout).collect(); + apply_promise_yield_timeouts_to_children_states_impl( + &mut trie_updates, + &inserted_timeouts, + &changes.processed_yield_timeouts, + account_id_to_shard_uid, + )?; + Ok(trie_updates) } @@ -112,10 +148,10 @@ impl ShardTries { account_id_to_shard_id: &dyn Fn(&AccountId) -> ShardUId, ) -> Result<(StoreUpdate, HashMap), StorageError> { self.add_values_to_children_states_impl(state_roots, values, &|raw_key| { - // Here changes on DelayedReceipts or DelayedReceiptsIndices will be excluded - // This is because we cannot migrate delayed receipts part by part. They have to be - // reconstructed in the new states after all DelayedReceipts are ready in the original - // shard. + // Here changes on DelayedReceipt, DelayedReceiptIndices, PromiseYieldTimeout, and + // PromiseYieldIndices will be excluded. Both the delayed receipts and the yield + // timeouts are organized in queues; they cannot be handled part by part because + // they need to be re-indexed contiguously when migrated to the child shards. if let Some(account_id) = parse_account_id_from_raw_key(raw_key).map_err(|e| { let err = format!("error parsing account id from trie key {:?}: {:?}", raw_key, e); StorageError::StorageInconsistentState(err) @@ -182,6 +218,22 @@ impl ShardTries { self.finalize_and_apply_trie_updates(trie_updates) } + pub fn apply_promise_yield_timeouts_to_children_states( + &self, + state_roots: &HashMap, + timeouts: &[PromiseYieldTimeout], + account_id_to_shard_uid: &dyn Fn(&AccountId) -> ShardUId, + ) -> Result<(StoreUpdate, HashMap), StorageError> { + let mut trie_updates: HashMap<_, _> = self.get_trie_updates(state_roots); + apply_promise_yield_timeouts_to_children_states_impl( + &mut trie_updates, + timeouts, + &[], + account_id_to_shard_uid, + )?; + self.finalize_and_apply_trie_updates(trie_updates) + } + fn finalize_and_apply_trie_updates( &self, updates: HashMap, @@ -279,8 +331,97 @@ fn apply_delayed_receipts_to_children_states_impl( Ok(()) } -/// Retrieve delayed receipts starting with `start_index` until `memory_limit` is hit -/// return None if there is no delayed receipts with index >= start_index +fn apply_promise_yield_timeouts_to_children_states_impl( + trie_updates: &mut HashMap, + insert_timeouts: &[PromiseYieldTimeout], + delete_timeouts: &[PromiseYieldTimeout], + account_id_to_shard_uid: &dyn Fn(&AccountId) -> ShardUId, +) -> Result<(), StorageError> { + // TODO: we can remove this check once yield execution is stabilized. + // For now it prevents populating promise yield indices for the child shards with default + // values if the feature has not been enabled. + if insert_timeouts.is_empty() && delete_timeouts.is_empty() { + return Ok(()); + } + + let mut promise_yield_indices_by_shard = HashMap::new(); + for (shard_uid, update) in trie_updates.iter() { + promise_yield_indices_by_shard.insert(*shard_uid, get_promise_yield_indices(update)?); + } + + for timeout in insert_timeouts { + let new_shard_uid: ShardUId = account_id_to_shard_uid(&timeout.account_id); + if !trie_updates.contains_key(&new_shard_uid) { + let err = format!( + "Account {} is in new shard {:?} but state_roots only contains {:?}", + timeout.account_id, + new_shard_uid, + trie_updates.keys(), + ); + return Err(StorageError::StorageInconsistentState(err)); + } + // we already checked that new_shard_uid is in trie_updates and + // promise_yield_indices_by_shard so we can safely unwrap here + let promise_yield_indices = promise_yield_indices_by_shard.get_mut(&new_shard_uid).unwrap(); + set( + trie_updates.get_mut(&new_shard_uid).unwrap(), + TrieKey::PromiseYieldTimeout { index: promise_yield_indices.next_available_index }, + timeout, + ); + promise_yield_indices.next_available_index = + promise_yield_indices.next_available_index.checked_add(1).ok_or_else(|| { + StorageError::StorageInconsistentState( + "Next available index for PromiseYield timeout exceeded the integer limit" + .to_string(), + ) + })?; + } + + for timeout in delete_timeouts { + let new_shard_uid: ShardUId = account_id_to_shard_uid(&timeout.account_id); + if !trie_updates.contains_key(&new_shard_uid) { + let err = format!( + "Account {} is in new shard {:?} but state_roots only contains {:?}", + timeout.account_id, + new_shard_uid, + trie_updates.keys(), + ); + return Err(StorageError::StorageInconsistentState(err)); + } + let promise_yield_indices = promise_yield_indices_by_shard.get_mut(&new_shard_uid).unwrap(); + + let trie_update = trie_updates.get_mut(&new_shard_uid).unwrap(); + let trie_key = TrieKey::PromiseYieldTimeout { index: promise_yield_indices.first_index }; + + let stored_timeout = get::(trie_update, &trie_key)? + .expect("removed PromiseYield timeout does not exist in new state"); + // check that the timeout to remove is at the front of the timeout queue + assert_eq!(&stored_timeout, timeout); + trie_update.remove(trie_key); + promise_yield_indices.first_index += 1; + } + + // commit the trie_updates and update state_roots + for (shard_uid, trie_update) in trie_updates { + set( + trie_update, + TrieKey::PromiseYieldIndices, + promise_yield_indices_by_shard.get(shard_uid).unwrap(), + ); + // StateChangeCause should always be Resharding for processing resharding. + // We do not want to commit the state_changes from resharding as they are already handled while + // processing parent shard + trie_update.commit(StateChangeCause::Resharding); + } + Ok(()) +} + +/// Retrieve delayed receipts starting with `start_index` until `memory_limit` is hit. +/// +/// Returns an updated start_index (the first index which was not read in this batch) +/// and a vec of delayed receipts which were read. +/// +/// Returns None if there are no delayed receipts with index >= start_index. pub fn get_delayed_receipts( state_update: &TrieUpdate, start_index: Option, @@ -317,18 +458,67 @@ pub fn get_delayed_receipts( Ok(Some((delayed_receipt_indices.first_index, receipts))) } +/// Retrieve PromiseYield timeouts starting with `start_index` until `memory_limit` is hit. +/// +/// Returns an updated start_index (the first index which was not read in this batch) +/// and a vec of timeouts which were read. +/// +/// Returns None if there are no timeouts with index >= start_index. +pub fn get_promise_yield_timeouts( + state_update: &TrieUpdate, + start_index: Option, + memory_limit: ByteSize, +) -> Result)>, StorageError> { + let mut promise_yield_indices = get_promise_yield_indices(state_update)?; + if let Some(start_index) = start_index { + if start_index >= promise_yield_indices.next_available_index { + return Ok(None); + } + promise_yield_indices.first_index = start_index.max(promise_yield_indices.first_index); + } + let mut used_memory = 0; + let mut timeouts = vec![]; + + while used_memory < memory_limit.as_u64() + && promise_yield_indices.first_index < promise_yield_indices.next_available_index + { + let key = TrieKey::PromiseYieldTimeout { index: promise_yield_indices.first_index }; + let data = state_update.get(&key)?.ok_or_else(|| { + StorageError::StorageInconsistentState(format!( + "PromiseYield timeout #{} should be in the state", + promise_yield_indices.first_index + )) + })?; + used_memory += data.len() as u64; + promise_yield_indices.first_index += 1; + + let timeout = PromiseYieldTimeout::try_from_slice(&data).map_err(|_| { + StorageError::StorageInconsistentState("Failed to deserialize".to_string()) + })?; + timeouts.push(timeout); + } + Ok(Some((promise_yield_indices.first_index, timeouts))) +} + #[cfg(test)] mod tests { - use crate::resharding::{apply_delayed_receipts_to_children_states_impl, get_delayed_receipts}; + use crate::resharding::{ + apply_delayed_receipts_to_children_states_impl, + apply_promise_yield_timeouts_to_children_states_impl, get_delayed_receipts, + get_promise_yield_timeouts, + }; use crate::test_utils::{ - gen_changes, gen_receipts, get_all_delayed_receipts, test_populate_trie, TestTriesBuilder, + gen_changes, gen_receipts, gen_timeouts, get_all_delayed_receipts, + get_all_promise_yield_timeouts, test_populate_trie, TestTriesBuilder, }; use crate::{set, ShardTries, ShardUId, Trie}; use near_primitives::account::id::AccountId; use near_primitives::hash::hash; - use near_primitives::receipt::{DelayedReceiptIndices, Receipt}; + use near_primitives::receipt::{ + DelayedReceiptIndices, PromiseYieldIndices, PromiseYieldTimeout, Receipt, + }; use near_primitives::trie_key::TrieKey; use near_primitives::types::{NumShards, StateChangeCause, StateRoot}; use rand::Rng; @@ -434,6 +624,61 @@ mod tests { } } + #[test] + fn test_get_promise_yield_timeouts() { + let mut rng = rand::thread_rng(); + for _ in 0..20 { + let memory_limit = bytesize::ByteSize::b(rng.gen_range(200..1000)); + let all_timeouts = gen_timeouts(&mut rng, 200); + + // push timeouts to trie + let tries = TestTriesBuilder::new().build(); + let mut trie_update = tries.new_trie_update(ShardUId::single_shard(), Trie::EMPTY_ROOT); + let mut promise_yield_indices = PromiseYieldIndices::default(); + + for (i, timeout) in all_timeouts.iter().enumerate() { + set(&mut trie_update, TrieKey::PromiseYieldTimeout { index: i as u64 }, timeout); + } + promise_yield_indices.next_available_index = all_timeouts.len() as u64; + set(&mut trie_update, TrieKey::PromiseYieldIndices, &promise_yield_indices); + trie_update.commit(StateChangeCause::Resharding); + let (_, trie_changes, _) = trie_update.finalize().unwrap(); + let mut store_update = tries.store_update(); + let state_root = + tries.apply_all(&trie_changes, ShardUId::single_shard(), &mut store_update); + store_update.commit().unwrap(); + + assert_eq!( + all_timeouts, + get_all_promise_yield_timeouts(&tries, &ShardUId::single_shard(), &state_root) + ); + let mut start_index = 0; + + let trie_update = tries.new_trie_update(ShardUId::single_shard(), state_root); + while let Some((next_index, timeouts)) = + get_promise_yield_timeouts(&trie_update, Some(start_index), memory_limit).unwrap() + { + assert_eq!(timeouts, all_timeouts[start_index as usize..next_index as usize]); + start_index = next_index; + + let total_memory_use: u64 = timeouts + .iter() + .map(|timeout| borsh::object_length(&timeout).unwrap() as u64) + .sum(); + let memory_use_without_last_timeout: u64 = timeouts[..timeouts.len() - 1] + .iter() + .map(|timeout| borsh::object_length(&timeout).unwrap() as u64) + .sum(); + + assert!( + total_memory_use >= memory_limit.as_u64() + || next_index == all_timeouts.len() as u64 + ); + assert!(memory_use_without_last_timeout < memory_limit.as_u64()); + } + } + } + fn test_apply_delayed_receipts( tries: &ShardTries, new_receipts: &[Receipt], @@ -507,4 +752,78 @@ mod tests { } } } + + fn test_apply_promise_yield_timeouts( + tries: &ShardTries, + new_timeouts: &[PromiseYieldTimeout], + delete_timeouts: &[PromiseYieldTimeout], + expected_all_timeouts: &[PromiseYieldTimeout], + state_roots: HashMap, + account_id_to_shard_id: &dyn Fn(&AccountId) -> ShardUId, + ) -> HashMap { + let mut trie_updates: HashMap<_, _> = tries.get_trie_updates(&state_roots); + apply_promise_yield_timeouts_to_children_states_impl( + &mut trie_updates, + new_timeouts, + delete_timeouts, + account_id_to_shard_id, + ) + .unwrap(); + let (state_update, new_state_roots) = + tries.finalize_and_apply_trie_updates(trie_updates).unwrap(); + state_update.commit().unwrap(); + + let timeouts_by_shard: HashMap<_, _> = new_state_roots + .iter() + .map(|(shard_uid, state_root)| { + let timeouts = get_all_promise_yield_timeouts(tries, shard_uid, state_root); + (shard_uid, timeouts) + }) + .collect(); + + let mut expected_timeouts_by_shard: HashMap<_, _> = + state_roots.iter().map(|(shard_uid, _)| (shard_uid, vec![])).collect(); + for timeout in expected_all_timeouts { + let shard_uid = account_id_to_shard_id(&timeout.account_id); + expected_timeouts_by_shard.get_mut(&shard_uid).unwrap().push(timeout.clone()); + } + assert_eq!(expected_timeouts_by_shard, timeouts_by_shard); + + new_state_roots + } + + #[test] + fn test_apply_promise_yield_timeouts_to_new_states() { + let mut rng = rand::thread_rng(); + + let tries = TestTriesBuilder::new().build(); + let num_shards = 4; + + for _ in 0..10 { + let mut state_roots: HashMap<_, _> = (0..num_shards) + .map(|x| (ShardUId { version: 1, shard_id: x as u32 }, Trie::EMPTY_ROOT)) + .collect(); + let mut all_timeouts = vec![]; + let mut start_index = 0; + for _ in 0..10 { + let timeouts = gen_timeouts(&mut rng, 100); + let new_start_index = rng.gen_range(start_index..all_timeouts.len() + 1); + + all_timeouts.extend_from_slice(&timeouts); + state_roots = test_apply_promise_yield_timeouts( + &tries, + &timeouts, + &all_timeouts[start_index..new_start_index], + &all_timeouts[new_start_index..], + state_roots, + &|account_id| ShardUId { + shard_id: (hash(account_id.as_bytes()).0[0] as NumShards % num_shards) + as u32, + version: 1, + }, + ); + start_index = new_start_index; + } + } + } } diff --git a/core/store/src/trie/shard_tries.rs b/core/store/src/trie/shard_tries.rs index 6c3c35ca482..39e87a5261d 100644 --- a/core/store/src/trie/shard_tries.rs +++ b/core/store/src/trie/shard_tries.rs @@ -21,7 +21,7 @@ use near_primitives::types::{ use rayon::prelude::{IntoParallelRefIterator, ParallelIterator}; use std::collections::HashMap; use std::rc::Rc; -use std::sync::{Arc, RwLock}; +use std::sync::{Arc, Mutex, RwLock}; use tracing::info; struct ShardTriesInner { @@ -29,9 +29,9 @@ struct ShardTriesInner { trie_config: TrieConfig, mem_tries: RwLock>>>, /// Cache reserved for client actor to use - caches: RwLock>, + caches: Mutex>, /// Cache for readers. - view_caches: RwLock>, + view_caches: Mutex>, flat_storage_manager: FlatStorageManager, /// Prefetcher state, such as IO threads, per shard. prefetchers: RwLock>, @@ -62,8 +62,8 @@ impl ShardTries { store, trie_config, mem_tries: RwLock::new(HashMap::new()), - caches: RwLock::new(caches), - view_caches: RwLock::new(view_caches), + caches: Mutex::new(caches), + view_caches: Mutex::new(view_caches), flat_storage_manager, prefetchers: Default::default(), state_snapshot: Arc::new(RwLock::new(None)), @@ -91,6 +91,22 @@ impl ShardTries { TrieUpdate::new(self.get_view_trie_for_shard(shard_uid, state_root)) } + #[tracing::instrument( + level = "trace", + target = "store::trie::shard_tries", + "ShardTries::get_trie_cache_for", + skip_all, + fields(is_view) + )] + fn get_trie_cache_for(&self, shard_uid: ShardUId, is_view: bool) -> TrieCache { + let caches_to_use = if is_view { &self.0.view_caches } else { &self.0.caches }; + let mut caches = caches_to_use.lock().expect(POISONED_LOCK_ERR); + caches + .entry(shard_uid) + .or_insert_with(|| TrieCache::new(&self.0.trie_config, shard_uid, is_view)) + .clone() + } + fn get_trie_for_shard_internal( &self, shard_uid: ShardUId, @@ -98,23 +114,13 @@ impl ShardTries { is_view: bool, block_hash: Option, ) -> Trie { - let caches_to_use = if is_view { &self.0.view_caches } else { &self.0.caches }; - let cache = { - let mut caches = caches_to_use.write().expect(POISONED_LOCK_ERR); - caches - .entry(shard_uid) - .or_insert_with(|| TrieCache::new(&self.0.trie_config, shard_uid, is_view)) - .clone() - }; + let cache = self.get_trie_cache_for(shard_uid, is_view); // Do not enable prefetching on view caches. // 1) Performance of view calls is not crucial. // 2) A lot of the prefetcher code assumes there is only one "main-thread" per shard active. // If you want to enable it for view calls, at least make sure they don't share // the `PrefetchApi` instances with the normal calls. - let prefetch_enabled = !is_view - && (self.0.trie_config.enable_receipt_prefetching - || (!self.0.trie_config.sweat_prefetch_receivers.is_empty() - && !self.0.trie_config.sweat_prefetch_senders.is_empty())); + let prefetch_enabled = !is_view && self.0.trie_config.prefetch_enabled(); let prefetch_api = prefetch_enabled.then(|| { self.0 .prefetchers @@ -161,13 +167,7 @@ impl ShardTries { block_hash: &CryptoHash, ) -> Result { let (store, flat_storage_manager) = self.get_state_snapshot(block_hash)?; - let cache = { - let mut caches = self.0.view_caches.write().expect(POISONED_LOCK_ERR); - caches - .entry(shard_uid) - .or_insert_with(|| TrieCache::new(&self.0.trie_config, shard_uid, true)) - .clone() - }; + let cache = self.get_trie_cache_for(shard_uid, true); let storage = Rc::new(TrieCachingStorage::new(store, cache, shard_uid, true, None)); let flat_storage_chunk_view = flat_storage_manager.chunk_view(shard_uid, *block_hash); @@ -208,20 +208,19 @@ impl ShardTries { &self.0.state_snapshot_config } - pub fn trie_config(&self) -> &TrieConfig { - &self.0.trie_config - } - pub(crate) fn state_snapshot(&self) -> &Arc>> { &self.0.state_snapshot } + #[tracing::instrument( + level = "trace", + target = "store::trie::shard_tries", + "ShardTries::update_cache", + skip_all, + fields(ops.len = ops.len()), + )] pub fn update_cache(&self, ops: Vec<(&CryptoHash, Option<&[u8]>)>, shard_uid: ShardUId) { - let mut caches = self.0.caches.write().expect(POISONED_LOCK_ERR); - let cache = caches - .entry(shard_uid) - .or_insert_with(|| TrieCache::new(&self.0.trie_config, shard_uid, false)) - .clone(); + let cache = self.get_trie_cache_for(shard_uid, false); cache.update_cache(ops); } @@ -231,7 +230,7 @@ impl ShardTries { shard_uid: ShardUId, store_update: &mut StoreUpdate, ) { - let mut ops = Vec::new(); + let mut ops = Vec::with_capacity(deletions.len()); for TrieRefcountSubtraction { trie_node_or_value_hash, rc, .. } in deletions.iter() { let key = TrieCachingStorage::get_key_from_shard_uid_and_hash( shard_uid, @@ -250,7 +249,7 @@ impl ShardTries { shard_uid: ShardUId, store_update: &mut StoreUpdate, ) { - let mut ops = Vec::new(); + let mut ops = Vec::with_capacity(insertions.len()); for TrieRefcountAddition { trie_node_or_value_hash, trie_node_or_value, rc } in insertions.iter() { @@ -278,6 +277,13 @@ impl ShardTries { trie_changes.new_root } + #[tracing::instrument( + level = "trace", + target = "store::trie::shard_tries", + "ShardTries::apply_insertions", + fields(num_insertions = trie_changes.insertions().len(), shard_id = shard_uid.shard_id()), + skip_all, + )] pub fn apply_insertions( &self, trie_changes: &TrieChanges, @@ -294,6 +300,13 @@ impl ShardTries { self.apply_insertions_inner(&trie_changes.insertions, shard_uid, store_update) } + #[tracing::instrument( + level = "trace", + target = "store::trie::shard_tries", + "ShardTries::apply_deletions", + fields(num_deletions = trie_changes.deletions().len(), shard_id = shard_uid.shard_id()), + skip_all, + )] pub fn apply_deletions( &self, trie_changes: &TrieChanges, @@ -378,12 +391,13 @@ impl ShardTries { /// Note that flat storage needs to be handled separately pub fn delete_trie_for_shard(&self, shard_uid: ShardUId, store_update: &mut StoreUpdate) { // Clear both caches and remove state values from store - self.0.caches.write().expect(POISONED_LOCK_ERR).remove(&shard_uid); - self.0.view_caches.write().expect(POISONED_LOCK_ERR).remove(&shard_uid); + let _cache = self.0.caches.lock().expect(POISONED_LOCK_ERR).remove(&shard_uid); + let _view_cache = self.0.view_caches.lock().expect(POISONED_LOCK_ERR).remove(&shard_uid); remove_all_state_values(store_update, shard_uid); } - /// Remove trie from memory for shards not included in the given list. + /// Retains in-memory tries for given shards, i.e. unload tries from memory for shards that are NOT + /// in the given list. Should be called to unload obsolete tries from memory. pub fn retain_mem_tries(&self, shard_uids: &[ShardUId]) { info!(target: "memtrie", "Current memtries: {:?}. Keeping memtries for shards {:?}...", self.0.mem_tries.read().unwrap().keys(), shard_uids); @@ -411,12 +425,25 @@ impl ShardTries { Ok(()) } - /// Returns whether mem-trie is loaded for the given shard. - pub fn is_mem_trie_loaded(&self, shard_uid: &ShardUId) -> bool { - self.0.mem_tries.read().unwrap().contains_key(shard_uid) + /// Loads in-memory trie upon catchup, if it is enabled. + /// Requires state root because `ChunkExtra` is not available at the time mem-trie is being loaded. + pub fn load_mem_trie_on_catchup( + &self, + shard_uid: &ShardUId, + state_root: &StateRoot, + ) -> Result<(), StorageError> { + if !self.0.trie_config.load_mem_tries_for_tracked_shards { + return Ok(()); + } + // It should not happen that memtrie is already loaded for a shard + // for which we just did state sync. + debug_assert!(!self.0.mem_tries.read().unwrap().contains_key(shard_uid)); + self.load_mem_trie(shard_uid, Some(*state_root)) } - /// Should be called upon startup to load in-memory tries for enabled shards. + /// Loads in-memory tries upon startup. The given shard_uids are possible candidates to load, + /// but which exact shards to load depends on configuration. This may only be called when flat + /// storage is ready. pub fn load_mem_tries_for_enabled_shards( &self, tracked_shards: &[ShardUId], @@ -445,7 +472,7 @@ impl ShardTries { /// Retrieves the in-memory tries for the shard. pub fn get_mem_tries(&self, shard_uid: ShardUId) -> Option>> { - let guard = self.0.mem_tries.write().unwrap(); + let guard = self.0.mem_tries.read().unwrap(); guard.get(&shard_uid).cloned() } @@ -522,6 +549,13 @@ impl WrappedTrieChanges { /// Save state changes into Store. /// /// NOTE: the changes are drained from `self`. + #[tracing::instrument( + level = "debug", + target = "store::trie::shard_tries", + "ShardTries::state_changes_into", + fields(num_state_changes = self.state_changes.len(), shard_id = self.shard_uid.shard_id()), + skip_all, + )] pub fn state_changes_into(&mut self, store_update: &mut StoreUpdate) { for mut change_with_trie_key in self.state_changes.drain(..) { assert!( @@ -541,40 +575,18 @@ impl WrappedTrieChanges { continue; } - let storage_key = if cfg!(feature = "serialize_all_state_changes") { - // Serialize all kinds of state changes without any filtering. - // Without this it's not possible to replay state changes to get an identical state root. - - // This branch will become the default in the near future. - - match change_with_trie_key.trie_key.get_account_id() { - // If a TrieKey itself doesn't identify the Shard, then we need to add shard id to the row key. - None => KeyForStateChanges::delayed_receipt_key_from_trie_key( - &self.block_hash, - &change_with_trie_key.trie_key, - &self.shard_uid, - ), - // TrieKey has enough information to identify the shard it comes from. - _ => KeyForStateChanges::from_trie_key( - &self.block_hash, - &change_with_trie_key.trie_key, - ), - } - } else { - // This branch is the current neard behavior. - // Only a subset of state changes get serialized. - - // Filtering trie keys for user facing RPC reporting. - // NOTE: If the trie key is not one of the account specific, it may cause key conflict - // when the node tracks multiple shards. See #2563. - match &change_with_trie_key.trie_key { - TrieKey::Account { .. } - | TrieKey::ContractCode { .. } - | TrieKey::AccessKey { .. } - | TrieKey::ContractData { .. } => {} - _ => continue, - }; - KeyForStateChanges::from_trie_key(&self.block_hash, &change_with_trie_key.trie_key) + let storage_key = match change_with_trie_key.trie_key.get_account_id() { + // If a TrieKey itself doesn't identify the Shard, then we need to add shard id to the row key. + None => KeyForStateChanges::delayed_receipt_key_from_trie_key( + &self.block_hash, + &change_with_trie_key.trie_key, + &self.shard_uid, + ), + // TrieKey has enough information to identify the shard it comes from. + _ => KeyForStateChanges::from_trie_key( + &self.block_hash, + &change_with_trie_key.trie_key, + ), }; store_update.set( @@ -585,6 +597,12 @@ impl WrappedTrieChanges { } } + #[tracing::instrument( + level = "debug", + target = "store::trie::shard_tries", + "ShardTries::trie_changes_into", + skip_all + )] pub fn trie_changes_into(&mut self, store_update: &mut StoreUpdate) -> std::io::Result<()> { store_update.set_ser( DBCol::TrieChanges, @@ -727,11 +745,7 @@ mod test { let trie_config = TrieConfig { shard_cache_config: trie_cache_config.clone(), view_shard_cache_config: trie_cache_config, - enable_receipt_prefetching: false, - sweat_prefetch_receivers: Vec::new(), - sweat_prefetch_senders: Vec::new(), - load_mem_tries_for_shards: Vec::new(), - load_mem_tries_for_tracked_shards: false, + ..TrieConfig::default() }; let shard_uids = Vec::from([ShardUId::single_shard()]); ShardTries::new( @@ -814,26 +828,26 @@ mod test { let tries = create_trie(); let trie_caches = &tries.0.caches; // Assert only one cache for one shard exists - assert_eq!(trie_caches.read().unwrap().len(), 1); + assert_eq!(trie_caches.lock().unwrap().len(), 1); // Assert the shard uid is correct - assert!(trie_caches.read().unwrap().get(&shard_uid).is_some()); + assert!(trie_caches.lock().unwrap().get(&shard_uid).is_some()); // Read from cache let key = CryptoHash::hash_borsh("alice"); let val: Vec = Vec::from([0, 1, 2, 3, 4]); - assert!(trie_caches.read().unwrap().get(&shard_uid).unwrap().get(&key).is_none()); + assert!(trie_caches.lock().unwrap().get(&shard_uid).unwrap().get(&key).is_none()); let insert_ops = Vec::from([(&key, Some(val.as_slice()))]); tries.update_cache(insert_ops, shard_uid); assert_eq!( - trie_caches.read().unwrap().get(&shard_uid).unwrap().get(&key).unwrap().to_vec(), + trie_caches.lock().unwrap().get(&shard_uid).unwrap().get(&key).unwrap().to_vec(), val ); let deletions_ops = Vec::from([(&key, None)]); tries.update_cache(deletions_ops, shard_uid); - assert!(trie_caches.read().unwrap().get(&shard_uid).unwrap().get(&key).is_none()); + assert!(trie_caches.lock().unwrap().get(&shard_uid).unwrap().get(&key).is_none()); } #[test] @@ -847,11 +861,7 @@ mod test { let trie_config = TrieConfig { shard_cache_config: trie_cache_config.clone(), view_shard_cache_config: trie_cache_config, - enable_receipt_prefetching: false, - sweat_prefetch_receivers: Vec::new(), - sweat_prefetch_senders: Vec::new(), - load_mem_tries_for_shards: Vec::new(), - load_mem_tries_for_tracked_shards: false, + ..TrieConfig::default() }; let shard_uids = Vec::from([ShardUId { shard_id: 0, version: 0 }]); let shard_uid = *shard_uids.first().unwrap(); @@ -872,7 +882,7 @@ mod test { let insert_ops = Vec::from([(&key, Some(val.as_slice()))]); trie.update_cache(insert_ops, shard_uid); assert_eq!( - trie_caches.read().unwrap().get(&shard_uid).unwrap().get(&key).unwrap().to_vec(), + trie_caches.lock().unwrap().get(&shard_uid).unwrap().get(&key).unwrap().to_vec(), val ); @@ -881,7 +891,7 @@ mod test { let val: Vec = vec![0; TrieConfig::max_cached_value_size()]; let insert_ops = Vec::from([(&key, Some(val.as_slice()))]); trie.update_cache(insert_ops, shard_uid); - assert!(trie_caches.read().unwrap().get(&shard_uid).unwrap().get(&key).is_none()); + assert!(trie_caches.lock().unwrap().get(&shard_uid).unwrap().get(&key).is_none()); } #[test] @@ -905,8 +915,8 @@ mod test { store_update.commit().unwrap(); // verify if data and caches are deleted - assert!(tries.0.caches.read().unwrap().get(&shard_uid).is_none()); - assert!(tries.0.view_caches.read().unwrap().get(&shard_uid).is_none()); + assert!(tries.0.caches.lock().unwrap().get(&shard_uid).is_none()); + assert!(tries.0.view_caches.lock().unwrap().get(&shard_uid).is_none()); let store = tries.get_store(); let key_prefix = shard_uid.to_bytes(); let mut iter = store.iter_prefix(DBCol::State, &key_prefix); diff --git a/core/store/src/trie/state_parts.rs b/core/store/src/trie/state_parts.rs index f2482d35511..fecf2b6e03f 100644 --- a/core/store/src/trie/state_parts.rs +++ b/core/store/src/trie/state_parts.rs @@ -1147,7 +1147,7 @@ mod tests { fn get_trie_nodes_for_part_with_flat_storage() { let value_len = 1000usize; - let tries = TestTriesBuilder::new().with_flat_storage().build(); + let tries = TestTriesBuilder::new().with_flat_storage(true).build(); let shard_uid = ShardUId::single_shard(); let block_hash = CryptoHash::default(); let part_id = PartId::new(1, 3); diff --git a/core/store/src/trie/trie_recording.rs b/core/store/src/trie/trie_recording.rs index cc411db5b3f..d44260c8238 100644 --- a/core/store/src/trie/trie_recording.rs +++ b/core/store/src/trie/trie_recording.rs @@ -8,11 +8,14 @@ use std::sync::Arc; pub struct TrieRecorder { recorded: HashMap>, size: usize, + /// Counts removals performed while recording. + /// recorded_storage_size_upper_bound takes it into account when calculating the total size. + removal_counter: usize, } impl TrieRecorder { pub fn new() -> Self { - Self { recorded: HashMap::new(), size: 0 } + Self { recorded: HashMap::new(), size: 0, removal_counter: 0 } } pub fn record(&mut self, hash: &CryptoHash, node: Arc<[u8]>) { @@ -22,6 +25,10 @@ impl TrieRecorder { } } + pub fn record_removal(&mut self) { + self.removal_counter = self.removal_counter.saturating_add(1) + } + pub fn recorded_storage(&mut self) -> PartialStorage { let mut nodes: Vec<_> = self.recorded.drain().map(|(_key, value)| value).collect(); nodes.sort(); @@ -32,6 +39,15 @@ impl TrieRecorder { debug_assert!(self.size == self.recorded.values().map(|v| v.len()).sum::()); self.size } + + /// Size of the recorded state proof plus some additional size added to cover removals. + /// An upper-bound estimation of the true recorded size after finalization. + /// See https://github.com/near/nearcore/issues/10890 and https://github.com/near/nearcore/pull/11000 for details. + pub fn recorded_storage_size_upper_bound(&self) -> usize { + // Charge 2000 bytes for every removal + let removals_size = self.removal_counter.saturating_mul(2000); + self.recorded_storage_size().saturating_add(removals_size) + } } #[cfg(test)] @@ -43,18 +59,20 @@ mod trie_recording_tests { }; use crate::trie::mem::metrics::MEM_TRIE_NUM_LOOKUPS; use crate::trie::TrieNodesCount; - use crate::{DBCol, Store, Trie}; + use crate::{DBCol, KeyLookupMode, PartialStorage, ShardTries, Store, Trie}; use borsh::BorshDeserialize; + use near_primitives::challenge::PartialState; use near_primitives::hash::{hash, CryptoHash}; use near_primitives::shard_layout::{get_block_shard_uid, ShardUId}; use near_primitives::state::ValueRef; use near_primitives::types::chunk_extra::ChunkExtra; use near_primitives::types::StateRoot; + use rand::prelude::SliceRandom; use rand::{random, thread_rng, Rng}; use std::collections::{HashMap, HashSet}; use std::num::NonZeroU32; - const NUM_ITERATIONS_PER_TEST: usize = 100; + const NUM_ITERATIONS_PER_TEST: usize = 300; /// Prepared on-disk trie and flat storage for testing. struct PreparedTrie { @@ -76,14 +94,18 @@ mod trie_recording_tests { /// storage with some dummy block info. If `use_missing_keys` is true, /// the keys to test with will also include some keys that are not in the /// trie. - fn prepare_trie(use_missing_keys: bool) -> PreparedTrie { - let tries_for_building = TestTriesBuilder::new().with_flat_storage().build(); + fn prepare_trie( + use_missing_keys: bool, + p_existing_key: f64, + p_missing_key: f64, + ) -> PreparedTrie { + let tries_for_building = TestTriesBuilder::new().with_flat_storage(true).build(); let shard_uid = ShardUId::single_shard(); let trie_changes = gen_larger_changes(&mut thread_rng(), 50); let trie_changes = simplify_changes(&trie_changes); if trie_changes.is_empty() { // try again - return prepare_trie(use_missing_keys); + return prepare_trie(use_missing_keys, p_existing_key, p_missing_key); } let state_root = test_populate_trie( &tries_for_building, @@ -115,19 +137,32 @@ mod trie_recording_tests { .iter() .map(|(key, value)| (key.clone(), value.clone().unwrap())) .collect::>(); - let (keys_to_get, keys_to_get_ref) = trie_changes + let existing_keys: HashSet<_> = trie_changes + .into_iter() + .map(|(key, _)| key) + .filter(|_| thread_rng().gen_bool(p_existing_key)) + .collect(); + let missing_keys = if use_missing_keys { + existing_keys + .iter() + .cloned() + .map(|mut key| { + *key.last_mut().unwrap() = 100; + key + }) + .filter(|key| !existing_keys.contains(key) && thread_rng().gen_bool(p_missing_key)) + .collect::>() + .into_iter() + .collect::>() + } else { + vec![] + }; + let mut keys: Vec<_> = + existing_keys.iter().cloned().chain(missing_keys.into_iter()).collect(); + keys.shuffle(&mut thread_rng()); + let updates = keys .iter() - .map(|(key, _)| { - let mut key = key.clone(); - if use_missing_keys { - key.push(100); - } - key - }) - .partition::, _>(|_| random()); - let updates = trie_changes - .iter() - .map(|(key, _)| { + .map(|key| { let value = if thread_rng().gen_bool(0.5) { Some(vec![thread_rng().gen_range(0..10) as u8]) } else { @@ -137,6 +172,8 @@ mod trie_recording_tests { }) .filter(|_| random()) .collect::>(); + let (keys_to_get, keys_to_get_ref) = + keys.into_iter().filter(|_| random()).partition::, _>(|_| random()); PreparedTrie { store: tries_for_building.get_store(), shard_uid, @@ -175,15 +212,48 @@ mod trie_recording_tests { update.commit().unwrap(); } + fn get_trie_for_shard( + tries: &ShardTries, + shard_uid: ShardUId, + state_root: StateRoot, + use_flat_storage: bool, + ) -> Trie { + if use_flat_storage { + tries.get_trie_with_block_hash_for_shard( + shard_uid, + state_root, + &CryptoHash::default(), + false, + ) + } else { + tries.get_trie_for_shard(shard_uid, state_root) + } + } + + /// Assert equality of partial storages with human-readable output. + fn assert_partial_storage(storage: &PartialStorage, other_storage: &PartialStorage) { + let PartialState::TrieValues(nodes) = &storage.nodes; + let PartialState::TrieValues(other_nodes) = &other_storage.nodes; + let nodes: HashSet> = HashSet::from_iter(nodes.into_iter().map(|key| key.to_vec())); + let other_nodes: HashSet> = + HashSet::from_iter(other_nodes.into_iter().map(|key| key.to_vec())); + let d: Vec<&Vec> = other_nodes.difference(&nodes).collect(); + assert_eq!(d, Vec::<&Vec>::default(), "Missing nodes in first storage"); + let d: Vec<&Vec> = nodes.difference(&other_nodes).collect(); + assert_eq!(d, Vec::<&Vec>::default(), "Missing nodes in second storage"); + } + /// Verifies that when operating on a trie, the results are completely consistent /// regardless of whether we're operating on the real storage (with or without chunk /// cache), while recording reads, or when operating on recorded partial storage. fn test_trie_recording_consistency( enable_accounting_cache: bool, use_missing_keys: bool, - use_in_memory_tries: bool, + use_flat_storage: bool, ) { for _ in 0..NUM_ITERATIONS_PER_TEST { + let p_existing_key = thread_rng().gen_range(0.3..1.0); + let p_missing_key = thread_rng().gen_range(0.7..1.0); let PreparedTrie { store, shard_uid, @@ -192,30 +262,39 @@ mod trie_recording_tests { keys_to_get_ref, updates, state_root, - } = prepare_trie(use_missing_keys); - let tries = if use_in_memory_tries { - TestTriesBuilder::new().with_store(store.clone()).with_in_memory_tries().build() - } else { - TestTriesBuilder::new().with_store(store.clone()).build() - }; + } = prepare_trie(use_missing_keys, p_existing_key, p_missing_key); + let tries = TestTriesBuilder::new() + .with_store(store.clone()) + .with_flat_storage(use_flat_storage) + .build(); + let lookup_mode = + if use_flat_storage { KeyLookupMode::FlatStorage } else { KeyLookupMode::Trie }; let mem_trie_lookup_counts_before = MEM_TRIE_NUM_LOOKUPS.get(); - if use_in_memory_tries { - // Delete the on-disk state so that we really know we're using - // in-memory tries. - destructively_delete_in_memory_state_from_disk(&store, &data_in_trie); + // Check that while using flat storage counters are all zero. + // Only use get_optimized_ref(), because get() will actually + // dereference values which can cause trie reads. + if use_flat_storage { + let trie = get_trie_for_shard(&tries, shard_uid, state_root, use_flat_storage); + for key in data_in_trie.keys() { + trie.get_optimized_ref(key, lookup_mode).unwrap(); + } + assert_eq!( + trie.get_trie_nodes_count(), + TrieNodesCount { db_reads: 0, mem_reads: 0 } + ); } // Let's capture the baseline node counts - this is what will happen // in production. - let trie = tries.get_trie_for_shard(shard_uid, state_root); + let trie = get_trie_for_shard(&tries, shard_uid, state_root, use_flat_storage); trie.accounting_cache.borrow_mut().set_enabled(enable_accounting_cache); for key in &keys_to_get { assert_eq!(trie.get(key).unwrap(), data_in_trie.get(key).cloned()); } for key in &keys_to_get_ref { assert_eq!( - trie.get_optimized_ref(key, crate::KeyLookupMode::Trie) + trie.get_optimized_ref(key, lookup_mode) .unwrap() .map(|value| value.into_value_ref()), data_in_trie.get(key).map(|value| ValueRef::new(&value)) @@ -227,14 +306,15 @@ mod trie_recording_tests { // Now let's do this again while recording, and make sure that the counters // we get are exactly the same. - let trie = tries.get_trie_for_shard(shard_uid, state_root).recording_reads(); + let trie = get_trie_for_shard(&tries, shard_uid, state_root, use_flat_storage) + .recording_reads(); trie.accounting_cache.borrow_mut().set_enabled(enable_accounting_cache); for key in &keys_to_get { assert_eq!(trie.get(key).unwrap(), data_in_trie.get(key).cloned()); } for key in &keys_to_get_ref { assert_eq!( - trie.get_optimized_ref(key, crate::KeyLookupMode::Trie) + trie.get_optimized_ref(key, lookup_mode) .unwrap() .map(|value| value.into_value_ref()), data_in_trie.get(key).map(|value| ValueRef::new(&value)) @@ -242,23 +322,24 @@ mod trie_recording_tests { } assert_eq!(trie.get_trie_nodes_count(), baseline_trie_nodes_count); trie.update(updates.iter().cloned()).unwrap(); - - // Now, let's check that when doing the same lookups with the captured partial storage, - // we still get the same counters. - let partial_storage = trie.recorded_storage().unwrap(); - println!( - "Partial storage has {} nodes from {} entries", - partial_storage.nodes.len(), - data_in_trie.len() - ); - let trie = Trie::from_recorded_storage(partial_storage, state_root, false); + let baseline_partial_storage = trie.recorded_storage().unwrap(); + + // Now let's do this again with memtries enabled. Check that counters + // are the same. + assert_eq!(MEM_TRIE_NUM_LOOKUPS.get(), mem_trie_lookup_counts_before); + tries.load_mem_trie(&shard_uid, None).unwrap(); + // Delete the on-disk state so that we really know we're using + // in-memory tries. + destructively_delete_in_memory_state_from_disk(&store, &data_in_trie); + let trie = get_trie_for_shard(&tries, shard_uid, state_root, use_flat_storage) + .recording_reads(); trie.accounting_cache.borrow_mut().set_enabled(enable_accounting_cache); for key in &keys_to_get { assert_eq!(trie.get(key).unwrap(), data_in_trie.get(key).cloned()); } for key in &keys_to_get_ref { assert_eq!( - trie.get_optimized_ref(key, crate::KeyLookupMode::Trie) + trie.get_optimized_ref(key, lookup_mode) .unwrap() .map(|value| value.into_value_ref()), data_in_trie.get(key).map(|value| ValueRef::new(&value)) @@ -267,143 +348,24 @@ mod trie_recording_tests { assert_eq!(trie.get_trie_nodes_count(), baseline_trie_nodes_count); trie.update(updates.iter().cloned()).unwrap(); - if use_in_memory_tries { - // sanity check that we did indeed use in-memory tries. - assert!(MEM_TRIE_NUM_LOOKUPS.get() > mem_trie_lookup_counts_before); - } - } - } - - #[test] - fn test_trie_recording_consistency_no_accounting_cache() { - test_trie_recording_consistency(false, false, false); - } - - #[test] - fn test_trie_recording_consistency_with_accounting_cache() { - test_trie_recording_consistency(true, false, false); - } - - #[test] - fn test_trie_recording_consistency_no_accounting_cache_with_missing_keys() { - test_trie_recording_consistency(false, true, false); - } - - #[test] - fn test_trie_recording_consistency_with_accounting_cache_and_missing_keys() { - test_trie_recording_consistency(true, true, false); - } - - #[test] - fn test_trie_recording_consistency_memtrie_no_accounting_cache() { - test_trie_recording_consistency(false, false, true); - } - - #[test] - fn test_trie_recording_consistency_memtrie_with_accounting_cache() { - test_trie_recording_consistency(true, false, true); - } - - #[test] - fn test_trie_recording_consistency_memtrie_no_accounting_cache_with_missing_keys() { - test_trie_recording_consistency(false, true, true); - } - - #[test] - fn test_trie_recording_consistency_memtrie_with_accounting_cache_and_missing_keys() { - test_trie_recording_consistency(true, true, true); - } - - /// Verifies that when operating on a trie, the results are completely consistent - /// regardless of whether we're operating on the real storage (with or without chunk - /// cache), while recording reads, or when operating on recorded partial storage. - /// This test additionally verifies this when flat storage is used. - fn test_trie_recording_consistency_with_flat_storage( - enable_accounting_cache: bool, - use_missing_keys: bool, - use_in_memory_tries: bool, - ) { - for _ in 0..NUM_ITERATIONS_PER_TEST { - let PreparedTrie { - store, - shard_uid, - data_in_trie, - keys_to_get, - keys_to_get_ref, - updates, - state_root, - } = prepare_trie(use_missing_keys); - let tries = if use_in_memory_tries { - TestTriesBuilder::new() - .with_store(store.clone()) - .with_flat_storage() - .with_in_memory_tries() - .build() - } else { - TestTriesBuilder::new().with_store(store.clone()).with_flat_storage().build() - }; - let mem_trie_lookup_counts_before = MEM_TRIE_NUM_LOOKUPS.get(); - - if use_in_memory_tries { - // Delete the on-disk state so that we really know we're using - // in-memory tries. - destructively_delete_in_memory_state_from_disk(&store, &data_in_trie); - } - // Check that the trie is using flat storage, so that counters are all zero. - // Only use get_optimized_ref(), because get() will actually dereference values which can - // cause trie reads. - let trie = tries.get_trie_with_block_hash_for_shard( - shard_uid, - state_root, - &CryptoHash::default(), - false, - ); - for key in data_in_trie.keys() { - trie.get_optimized_ref(key, crate::KeyLookupMode::FlatStorage).unwrap(); - } - assert_eq!(trie.get_trie_nodes_count(), TrieNodesCount { db_reads: 0, mem_reads: 0 }); - - // Now, let's capture the baseline node counts - this is what will happen - // in production. - let trie = tries.get_trie_with_block_hash_for_shard( - shard_uid, - state_root, - &CryptoHash::default(), - false, + // Now, let's check that when doing the same lookups with the captured partial storage, + // we still get the same counters. + let partial_storage = trie.recorded_storage().unwrap(); + assert_partial_storage(&baseline_partial_storage, &partial_storage); + println!( + "Partial storage has {} nodes from {} entries", + partial_storage.nodes.len(), + data_in_trie.len() ); + let trie = + Trie::from_recorded_storage(partial_storage.clone(), state_root, use_flat_storage); trie.accounting_cache.borrow_mut().set_enabled(enable_accounting_cache); for key in &keys_to_get { assert_eq!(trie.get(key).unwrap(), data_in_trie.get(key).cloned()); } for key in &keys_to_get_ref { assert_eq!( - trie.get_optimized_ref(key, crate::KeyLookupMode::FlatStorage) - .unwrap() - .map(|value| value.into_value_ref()), - data_in_trie.get(key).map(|value| ValueRef::new(&value)) - ); - } - let baseline_trie_nodes_count = trie.get_trie_nodes_count(); - println!("Baseline trie nodes count: {:?}", baseline_trie_nodes_count); - trie.update(updates.iter().cloned()).unwrap(); - - // Let's do this again, but this time recording reads. We'll make sure - // the counters are exactly the same even when we're recording. - let trie = tries - .get_trie_with_block_hash_for_shard( - shard_uid, - state_root, - &CryptoHash::default(), - false, - ) - .recording_reads(); - trie.accounting_cache.borrow_mut().set_enabled(enable_accounting_cache); - for key in &keys_to_get { - assert_eq!(trie.get(key).unwrap(), data_in_trie.get(key).cloned()); - } - for key in &keys_to_get_ref { - assert_eq!( - trie.get_optimized_ref(key, crate::KeyLookupMode::FlatStorage) + trie.get_optimized_ref(key, lookup_mode) .unwrap() .map(|value| value.into_value_ref()), data_in_trie.get(key).map(|value| ValueRef::new(&value)) @@ -412,22 +374,16 @@ mod trie_recording_tests { assert_eq!(trie.get_trie_nodes_count(), baseline_trie_nodes_count); trie.update(updates.iter().cloned()).unwrap(); - // Now, let's check that when doing the same lookups with the captured partial storage, - // we still get the same counters. - let partial_storage = trie.recorded_storage().unwrap(); - println!( - "Partial storage has {} nodes from {} entries", - partial_storage.nodes.len(), - data_in_trie.len() - ); - let trie = Trie::from_recorded_storage(partial_storage, state_root, true); + // Build a Trie using recorded storage and enable recording_reads on this Trie + let trie = Trie::from_recorded_storage(partial_storage, state_root, use_flat_storage) + .recording_reads(); trie.accounting_cache.borrow_mut().set_enabled(enable_accounting_cache); for key in &keys_to_get { assert_eq!(trie.get(key).unwrap(), data_in_trie.get(key).cloned()); } for key in &keys_to_get_ref { assert_eq!( - trie.get_optimized_ref(key, crate::KeyLookupMode::FlatStorage) + trie.get_optimized_ref(key, lookup_mode) .unwrap() .map(|value| value.into_value_ref()), data_in_trie.get(key).map(|value| ValueRef::new(&value)) @@ -435,8 +391,9 @@ mod trie_recording_tests { } assert_eq!(trie.get_trie_nodes_count(), baseline_trie_nodes_count); trie.update(updates.iter().cloned()).unwrap(); + assert_partial_storage(&baseline_partial_storage, &trie.recorded_storage().unwrap()); - if use_in_memory_tries { + if !keys_to_get.is_empty() || !keys_to_get_ref.is_empty() { // sanity check that we did indeed use in-memory tries. assert!(MEM_TRIE_NUM_LOOKUPS.get() > mem_trie_lookup_counts_before); } @@ -444,44 +401,42 @@ mod trie_recording_tests { } #[test] - fn test_trie_recording_consistency_with_flat_storage_no_accounting_cache() { - test_trie_recording_consistency_with_flat_storage(false, false, false); + fn test_trie_recording_consistency_no_accounting_cache() { + test_trie_recording_consistency(false, false, false); } #[test] - fn test_trie_recording_consistency_with_flat_storage_with_accounting_cache() { - test_trie_recording_consistency_with_flat_storage(true, false, false); + fn test_trie_recording_consistency_with_accounting_cache() { + test_trie_recording_consistency(true, false, false); } #[test] - fn test_trie_recording_consistency_with_flat_storage_no_accounting_cache_with_missing_keys() { - test_trie_recording_consistency_with_flat_storage(false, true, false); + fn test_trie_recording_consistency_no_accounting_cache_with_missing_keys() { + test_trie_recording_consistency(false, true, false); } #[test] - fn test_trie_recording_consistency_with_flat_storage_with_accounting_cache_and_missing_keys() { - test_trie_recording_consistency_with_flat_storage(true, true, false); + fn test_trie_recording_consistency_with_accounting_cache_and_missing_keys() { + test_trie_recording_consistency(true, true, false); } #[test] - fn test_trie_recording_consistency_with_flat_storage_memtrie_no_accounting_cache() { - test_trie_recording_consistency_with_flat_storage(false, false, true); + fn test_trie_recording_consistency_with_flat_storage_no_accounting_cache() { + test_trie_recording_consistency(false, false, true); } #[test] - fn test_trie_recording_consistency_with_flat_storage_memtrie_with_accounting_cache() { - test_trie_recording_consistency_with_flat_storage(true, false, true); + fn test_trie_recording_consistency_with_flat_storage_with_accounting_cache() { + test_trie_recording_consistency(true, false, true); } #[test] - fn test_trie_recording_consistency_with_flat_storage_memtrie_no_accounting_cache_with_missing_keys( - ) { - test_trie_recording_consistency_with_flat_storage(false, true, true); + fn test_trie_recording_consistency_with_flat_storage_no_accounting_cache_with_missing_keys() { + test_trie_recording_consistency(false, true, true); } #[test] - fn test_trie_recording_consistency_with_flat_storage_memtrie_with_accounting_cache_and_missing_keys( - ) { - test_trie_recording_consistency_with_flat_storage(true, true, true); + fn test_trie_recording_consistency_with_flat_storage_with_accounting_cache_and_missing_keys() { + test_trie_recording_consistency(true, true, true); } } diff --git a/core/store/src/trie/trie_storage.rs b/core/store/src/trie/trie_storage.rs index ae2154ccc28..3e177c5dc59 100644 --- a/core/store/src/trie/trie_storage.rs +++ b/core/store/src/trie/trie_storage.rs @@ -117,8 +117,11 @@ impl TrieCacheInner { shard_cache_deletions_size: metrics::SHARD_CACHE_DELETIONS_SIZE .with_label_values(&metrics_labels), }; + // Assuming the values are actual all empty and we store a full hashmap of overhead. + let max_elements = total_size_limit.div_ceil(Self::PER_ENTRY_OVERHEAD); + let max_elements = usize::try_from(max_elements).unwrap(); Self { - cache: LruCache::unbounded(), + cache: LruCache::new(max_elements), deletions: BoundedQueue::new(deletions_queue_capacity), total_size: 0, total_size_limit, diff --git a/core/store/src/trie/trie_tests.rs b/core/store/src/trie/trie_tests.rs index 7a6cf242ef4..ebae5aae21f 100644 --- a/core/store/src/trie/trie_tests.rs +++ b/core/store/src/trie/trie_tests.rs @@ -419,36 +419,20 @@ mod trie_storage_tests { assert_eq!(count_delta.mem_reads, 1); } - // Checks that when branch restructuring is triggered on updating trie, - // impacted child is still recorded. - // - // Needed when branch has two children, one of which is removed, branch - // could be converted to extension, so reading of the only remaining child - // is also required. - #[test] - fn test_memtrie_recorded_branch_restructuring() { + fn test_memtrie_and_disk_updates_consistency(updates: Vec<(Vec, Option>)>) { init_test_logger(); + let base_changes = vec![ + (vec![7], Some(vec![1])), + (vec![7, 0], Some(vec![2])), + (vec![7, 1], Some(vec![3])), + ]; let tries = TestTriesBuilder::new().build(); let shard_uid = ShardUId::single_shard(); - let state_root = test_populate_trie( - &tries, - &Trie::EMPTY_ROOT, - shard_uid, - vec![ - (vec![7], Some(vec![1])), - (vec![7, 0], Some(vec![2])), - (vec![7, 1], Some(vec![3])), - ], - ); + let state_root = + test_populate_trie(&tries, &Trie::EMPTY_ROOT, shard_uid, base_changes.clone()); let trie = tries.get_trie_for_shard(shard_uid, state_root).recording_reads(); - let changes = trie - .update(vec![ - (vec![7], Some(vec![10])), - (vec![7, 0], None), - (vec![7, 6], Some(vec![8])), - ]) - .unwrap(); + let changes = trie.update(updates.clone()).unwrap(); tracing::info!("Changes: {:?}", changes); let recorded_normal = trie.recorded_storage(); @@ -469,29 +453,14 @@ mod trie_storage_tests { let tries = TestTriesBuilder::new() .with_store(store) - .with_flat_storage() + .with_flat_storage(true) .with_in_memory_tries() .build(); let shard_uid = ShardUId::single_shard(); - let state_root = test_populate_trie( - &tries, - &Trie::EMPTY_ROOT, - shard_uid, - vec![ - (vec![7], Some(vec![1])), - (vec![7, 0], Some(vec![2])), - (vec![7, 1], Some(vec![3])), - ], - ); + let state_root = test_populate_trie(&tries, &Trie::EMPTY_ROOT, shard_uid, base_changes); let trie = tries.get_trie_for_shard(shard_uid, state_root).recording_reads(); - let changes = trie - .update(vec![ - (vec![7], Some(vec![10])), - (vec![7, 0], None), - (vec![7, 6], Some(vec![8])), - ]) - .unwrap(); + let changes = trie.update(updates).unwrap(); tracing::info!("Changes: {:?}", changes); @@ -499,4 +468,29 @@ mod trie_storage_tests { assert_eq!(recorded_normal, recorded_memtrie); } + + // Checks that when branch restructuring is triggered on updating trie, + // impacted child is recorded on memtrie. + // + // Needed when branch has two children, one of which is removed, branch + // could be converted to extension, so reading of the only remaining child + // is also required. + #[test] + fn test_memtrie_recorded_branch_restructuring() { + test_memtrie_and_disk_updates_consistency(vec![ + (vec![7], Some(vec![1])), + (vec![7, 0], Some(vec![2])), + (vec![7, 1], Some(vec![3])), + ]); + } + + // Checks that when non-existent key is removed, only nodes along the path + // to it is recorded. + // Needed because old disk trie logic was always reading neighbouring children + // along the path to recompute memory usages, which is not needed if trie + // structure doesn't change. + #[test] + fn test_memtrie_recorded_delete_non_existent_key() { + test_memtrie_and_disk_updates_consistency(vec![(vec![8], None)]); + } } diff --git a/core/store/src/trie/update.rs b/core/store/src/trie/update.rs index eba79d8d589..2bb715fa6ad 100644 --- a/core/store/src/trie/update.rs +++ b/core/store/src/trie/update.rs @@ -115,6 +115,9 @@ impl TrieUpdate { pub fn remove(&mut self, trie_key: TrieKey) { self.prospective.insert(trie_key.to_vec(), TrieKeyValueUpdate { trie_key, value: None }); + if let Some(recorder) = &self.trie.recorder { + recorder.borrow_mut().record_removal(); + } } pub fn commit(&mut self, event: StateChangeCause) { @@ -137,11 +140,24 @@ impl TrieUpdate { /// This Function returns the [`Trie`] with which the [`TrieUpdate`] has been initially /// constructed. It can be reused to construct another `TrieUpdate` or to operate with `Trie` /// in any other way as desired. + #[tracing::instrument( + level = "debug", + target = "store::trie", + "TrieUpdate::finalize", + skip_all, + fields( + committed.len = self.committed.len(), + mem_reads = tracing::field::Empty, + db_reads = tracing::field::Empty + ) + )] pub fn finalize( self, ) -> Result<(Trie, TrieChanges, Vec), StorageError> { assert!(self.prospective.is_empty(), "Finalize cannot be called with uncommitted changes."); + let span = tracing::Span::current(); let TrieUpdate { trie, committed, .. } = self; + let start_counts = trie.accounting_cache.borrow().get_trie_nodes_count(); let mut state_changes = Vec::with_capacity(committed.len()); let trie_changes = trie.update(committed.into_iter().map(|(k, changes_with_trie_key)| { @@ -154,6 +170,11 @@ impl TrieUpdate { state_changes.push(changes_with_trie_key); (k, data) }))?; + let end_counts = trie.accounting_cache.borrow().get_trie_nodes_count(); + if let Some(iops_delta) = end_counts.checked_sub(&start_counts) { + span.record("mem_reads", iops_delta.mem_reads); + span.record("db_reads", iops_delta.db_reads); + } Ok((trie, trie_changes, state_changes)) } diff --git a/docs/architecture/gas/estimator.md b/docs/architecture/gas/estimator.md index 0f33dac3de9..b16ba54be69 100644 --- a/docs/architecture/gas/estimator.md +++ b/docs/architecture/gas/estimator.md @@ -8,7 +8,7 @@ all users collude to make the system as slow as possible. This benchmarking suite is used to check that the gas parameters defined in the protocol are correct. Correct in this context means, a chunk filled with 1 Pgas (**P**eta gas) will take at most 1 second to be applied. Or more generally, -per 1 Tgas of execution, we spend no more than 1ms wall-clock time. +per 1 Tgas of execution, we spend no more than 1ms wall-clock time. For now, nearcore timing is the only one that matters. Things will become more complicated once there are multiple client implementations. But knowing that @@ -84,9 +84,9 @@ variations. This gives an approximation for IO bytes, as seen on the interface between the operating system and nearcore. To convert to gas, we use three constants to multiply with instruction count, read bytes, and write bytes. -We run qemu inside a Docker container, to make sure the qemu and qemu plugin -versions match with system libraries. Make sure to add `--docker` when running -with `--metric icount`. +We run qemu inside a Docker container using the Podman runtime, to make sure the qemu and qemu +plugin versions match with system libraries. Make sure to add `--containerize` when running with +`--metric icount`. The great thing about `icount` is that you can run it on different machines and it will always return the same result. It is not 100% deterministic but very diff --git a/docs/practices/testing/README.md b/docs/practices/testing/README.md index 20ce69817bc..f8d283546b5 100644 --- a/docs/practices/testing/README.md +++ b/docs/practices/testing/README.md @@ -47,7 +47,7 @@ It requires nextest harness which can be installed by running `cargo install car Expensive and python tests are not part of CI, and are run by a custom nightly runner. The results of the latest runs are available -[here](https://nayduck.near.org/#/). Today, test runs launch approximately +[here](https://nayduck.nearone.org/#/). Today, test runs launch approximately every 5-6 hours. For the latest results look at the **second** run, since the first one has some tests still scheduled to run. diff --git a/docs/practices/workflows/gas_estimations.md b/docs/practices/workflows/gas_estimations.md index 10bde049697..ec2228a67c2 100644 --- a/docs/practices/workflows/gas_estimations.md +++ b/docs/practices/workflows/gas_estimations.md @@ -48,13 +48,13 @@ cargo run --release -p runtime-params-estimator --features required -- \ ``` You might also want to run a hardware-agnostic estimation using the following -command. It uses `docker` and `qemu` under the hood, so it will be quite a bit -slower. You will need to install `docker` to run this command. +command. It uses `podman` and `qemu` under the hood, so it will be quite a bit +slower. You will need to install `podman` to run this command. ```bash cargo run --release -p runtime-params-estimator --features required -- \ --accounts-num 20000 --additional-accounts-num 2000000 \ - --iters 3 --warmup-iters 1 --metric icount --docker \ + --iters 3 --warmup-iters 1 --metric icount --containerize \ --costs=ActionReceiptCreation,ActionTransfer,ActionCreateAccount,ActionFunctionCallBase ``` diff --git a/docs/practices/workflows/io_trace.md b/docs/practices/workflows/io_trace.md index 82c4715f702..f754ac88f94 100644 --- a/docs/practices/workflows/io_trace.md +++ b/docs/practices/workflows/io_trace.md @@ -15,7 +15,7 @@ slow receipts. ## Setup -When compiling neard (or the paramater estimator) with `feature=io_trace` it +When compiling neard (or the parameter estimator) with `feature=io_trace` it instruments the binary code with fine-grained database operations tracking. *Aside: We don't enable it by default because we are afraid the overhead could be diff --git a/docs/practices/workflows/otel_traces.md b/docs/practices/workflows/otel_traces.md index 9ed47a73fdf..2682fcddaad 100644 --- a/docs/practices/workflows/otel_traces.md +++ b/docs/practices/workflows/otel_traces.md @@ -64,13 +64,13 @@ If the traces are not coming through quite yet, consider using the ability to se configuration at runtime. Create `$NEARD_HOME/log_config.json` file with the following contents: ```json -{ "opentelemetry_level": "INFO" } +{ "opentelemetry": "info" } ``` Or optionally with `rust_log` setting to reduce logging on stdout: ```json -{ "opentelemetry_level": "INFO", "rust_log": "WARN" } +{ "opentelemetry": "info", "rust_log": "WARN" } ``` and invoke `sudo pkill -HUP neard`. Double check that the collector is running as well. diff --git a/integration-tests/Cargo.toml b/integration-tests/Cargo.toml index 94bcc438f73..2f3606f20ce 100644 --- a/integration-tests/Cargo.toml +++ b/integration-tests/Cargo.toml @@ -67,10 +67,14 @@ testlib.workspace = true [dev-dependencies] assert_matches.workspace = true +aurora-engine-transactions.workspace = true +aurora-engine-types.workspace = true derive-enum-from-into.workspace = true +ethabi.workspace = true insta.workspace = true near-undo-block.workspace = true rlp.workspace = true +sha3.workspace = true [features] performance_stats = [ diff --git a/integration-tests/src/runtime_utils.rs b/integration-tests/src/runtime_utils.rs index 3f5ef923473..acf239a8099 100644 --- a/integration-tests/src/runtime_utils.rs +++ b/integration-tests/src/runtime_utils.rs @@ -36,7 +36,7 @@ pub fn get_runtime_and_trie_from_genesis(genesis: &Genesis) -> (Runtime, ShardTr let shard_layout = &genesis.config.shard_layout; let tries = TestTriesBuilder::new() .with_shard_layout(shard_layout.version(), shard_layout.shard_ids().count() as NumShards) - .with_flat_storage() + .with_flat_storage(true) .build(); let runtime = Runtime::new(); let mut account_ids: HashSet = HashSet::new(); diff --git a/integration-tests/src/tests/client/challenges.rs b/integration-tests/src/tests/client/challenges.rs index 1b9a6af6b0c..e75fff55b1b 100644 --- a/integration-tests/src/tests/client/challenges.rs +++ b/integration-tests/src/tests/client/challenges.rs @@ -15,8 +15,9 @@ use near_primitives::challenge::{ use near_primitives::hash::CryptoHash; use near_primitives::merkle::PartialMerkleTree; use near_primitives::num_rational::Ratio; +use near_primitives::reed_solomon::ReedSolomonWrapper; use near_primitives::shard_layout::ShardUId; -use near_primitives::sharding::{EncodedShardChunk, ReedSolomonWrapper}; +use near_primitives::sharding::EncodedShardChunk; use near_primitives::stateless_validation::ChunkEndorsement; use near_primitives::test_utils::create_test_signer; use near_primitives::transaction::SignedTransaction; diff --git a/integration-tests/src/tests/client/features/account_id_in_function_call_permission.rs b/integration-tests/src/tests/client/features/account_id_in_function_call_permission.rs index 75fe9f9df91..17beeee9ef1 100644 --- a/integration-tests/src/tests/client/features/account_id_in_function_call_permission.rs +++ b/integration-tests/src/tests/client/features/account_id_in_function_call_permission.rs @@ -19,7 +19,6 @@ fn test_account_id_in_function_call_permission_upgrade() { near_primitives::version::ProtocolFeature::AccountIdInFunctionCallPermission .protocol_version() - 1; - let new_protocol_version = old_protocol_version + 1; // Prepare TestEnv with a contract at the old protocol version. let mut env = { @@ -70,7 +69,7 @@ fn test_account_id_in_function_call_permission_upgrade() { } }; - env.upgrade_protocol(new_protocol_version); + env.upgrade_protocol_to_latest_version(); // Re-run the transaction, now it fails due to invalid account id. { diff --git a/integration-tests/src/tests/client/features/fix_contract_loading_cost.rs b/integration-tests/src/tests/client/features/fix_contract_loading_cost.rs index a2a64996902..956881d0014 100644 --- a/integration-tests/src/tests/client/features/fix_contract_loading_cost.rs +++ b/integration-tests/src/tests/client/features/fix_contract_loading_cost.rs @@ -44,7 +44,7 @@ fn unchanged_gas_cost() { let old_gas = old_result.receipts_outcome[0].outcome.gas_burnt; assert_matches!(old_result.status, FinalExecutionStatus::SuccessValue(_)); - env.upgrade_protocol(new_protocol_version); + env.upgrade_protocol_to_latest_version(); let new_result = env.call_main(&account); let new_gas = new_result.receipts_outcome[0].outcome.gas_burnt; @@ -77,7 +77,7 @@ fn preparation_error_gas_cost() { let old_gas = old_result.receipts_outcome[0].outcome.gas_burnt; assert_matches!(old_result.status, FinalExecutionStatus::Failure(_)); - env.upgrade_protocol(new_protocol_version); + env.upgrade_protocol_to_latest_version(); let new_result = env.call_main(&account); let new_gas = new_result.receipts_outcome[0].outcome.gas_burnt; @@ -87,7 +87,7 @@ fn preparation_error_gas_cost() { assert_ne!(old_gas, new_gas); // Runtime parameter values for version of the protocol upgrade let loading_base = 35_445_963; - let loading_byte = 216_750; + let loading_byte = 1_089_295; let loading_cost = loading_base + contract_size as u64 * loading_byte; assert_eq!(old_gas + loading_cost, new_gas); } diff --git a/integration-tests/src/tests/client/features/flat_storage.rs b/integration-tests/src/tests/client/features/flat_storage.rs index 15282191c10..d82d7c7befa 100644 --- a/integration-tests/src/tests/client/features/flat_storage.rs +++ b/integration-tests/src/tests/client/features/flat_storage.rs @@ -113,7 +113,7 @@ fn test_flat_storage_upgrade() { env.produce_block(0, tip.height + i + 1); } if i == 0 { - env.upgrade_protocol(new_protocol_version); + env.upgrade_protocol_to_latest_version(); } let final_transaction_result = diff --git a/integration-tests/src/tests/client/features/increase_deployment_cost.rs b/integration-tests/src/tests/client/features/increase_deployment_cost.rs index 4c8bec93994..42ee74652db 100644 --- a/integration-tests/src/tests/client/features/increase_deployment_cost.rs +++ b/integration-tests/src/tests/client/features/increase_deployment_cost.rs @@ -47,7 +47,7 @@ fn test_deploy_cost_increased() { let tx = env.tx_from_actions(actions.clone(), &signer, signer.account_id.clone()); let old_outcome = env.execute_tx(tx).unwrap(); - env.upgrade_protocol(new_protocol_version); + env.upgrade_protocol_to_latest_version(); let tx = env.tx_from_actions(actions, &signer, signer.account_id.clone()); let new_outcome = env.execute_tx(tx).unwrap(); diff --git a/integration-tests/src/tests/client/features/increase_storage_compute_cost.rs b/integration-tests/src/tests/client/features/increase_storage_compute_cost.rs index 0664efe3fc4..9407e3ebf32 100644 --- a/integration-tests/src/tests/client/features/increase_storage_compute_cost.rs +++ b/integration-tests/src/tests/client/features/increase_storage_compute_cost.rs @@ -27,7 +27,14 @@ use nearcore::test_utils::TestEnvNightshadeSetupExt; /// Tracked in https://github.com/near/nearcore/issues/8938 const INCREASED_STORAGE_COSTS_PROTOCOL_VERSION: u32 = 61; +enum Expectation { + ShouldFail, + ShouldSucceed, +} + /// Test that `storage_write` compute limit is respected in new version. +// TODO(10979): Fix and enable this test. +#[ignore] #[test] fn test_storage_write() { // `insert_strings(from: u64, to: u64)` makes (`to` - `from`) `storage_write` calls. @@ -37,7 +44,7 @@ fn test_storage_write() { 0u64.to_le_bytes().into_iter().chain(num_writes.to_le_bytes()).collect(); let num_transactions = 200; let uses_storage = true; - let fails = false; + let expectation = Expectation::ShouldSucceed; let gas_divider = 1; assert_compute_limit_reached( method_name, @@ -45,11 +52,13 @@ fn test_storage_write() { num_transactions, uses_storage, gas_divider, - fails, + expectation, ); } /// Test that `storage_remove` compute limit is respected in new version. +// TODO(10979): Fix and enable this test. +#[ignore] #[test] fn test_storage_remove() { // `delete_strings(from: u64, to: u64)` makes (`to` - `from`) `storage_remove` calls. @@ -59,7 +68,7 @@ fn test_storage_remove() { 0u64.to_le_bytes().into_iter().chain(num_deletes.to_le_bytes()).collect(); let num_transactions = 10; let uses_storage = true; - let fails = false; + let expectation = Expectation::ShouldSucceed; let gas_divider = 10; assert_compute_limit_reached( method_name, @@ -67,12 +76,14 @@ fn test_storage_remove() { num_transactions, uses_storage, gas_divider, - fails, + expectation, ); } /// Test that `storage_write` compute limit is respected in new version, /// specifically when running out of gas. +// TODO(10979): Fix and enable this test. +#[ignore] #[test] fn test_storage_write_gas_exceeded() { // `insert_strings(from: u64, to: u64)` makes (`to` - `from`) `storage_write` calls. @@ -83,7 +94,7 @@ fn test_storage_write_gas_exceeded() { 0u64.to_le_bytes().into_iter().chain(num_writes.to_le_bytes()).collect(); let num_transactions = 10; let uses_storage = true; - let fails = true; + let expectation = Expectation::ShouldFail; let gas_divider = 1; assert_compute_limit_reached( method_name, @@ -91,11 +102,13 @@ fn test_storage_write_gas_exceeded() { num_transactions, uses_storage, gas_divider, - fails, + expectation, ); } /// Check receipts that don't touch storage are unaffected by the new compute costs. +// TODO(10979): Fix and enable this test. +#[ignore] #[test] fn test_non_storage() { // `sum_n(u64)` just does some WASM computation. @@ -113,7 +126,7 @@ fn test_non_storage() { let method_args: Vec = 10_030_000u64.to_le_bytes().to_vec(); let num_transactions = 2; let uses_storage = false; - let fails: bool = false; + let expectation = Expectation::ShouldSucceed; let gas_divider = 10; assert_compute_limit_reached( method_name, @@ -121,11 +134,13 @@ fn test_non_storage() { num_transactions, uses_storage, gas_divider, - fails, + expectation, ); } /// Test the case where a function call fails and the limit is unaffected by compute costs. +// TODO(10979): Fix and enable this test. +#[ignore] #[test] fn test_non_storage_gas_exceeded() { // `loop_forever()` loops until either gas is exhausted. @@ -135,14 +150,14 @@ fn test_non_storage_gas_exceeded() { let num_transactions = 2; let uses_storage = false; let gas_divider = 10; - let fails = true; + let expectation = Expectation::ShouldFail; assert_compute_limit_reached( method_name, method_args, num_transactions, uses_storage, gas_divider, - fails, + expectation, ); } @@ -167,7 +182,7 @@ fn assert_compute_limit_reached( num_transactions: u64, uses_storage: bool, gas_divider: u64, - should_fail: bool, + expectation: Expectation, ) { // The immediate protocol upgrade needs to be set for this test to pass in // the release branch where the protocol upgrade date is set. @@ -225,7 +240,7 @@ fn assert_compute_limit_reached( method_name.clone(), method_args.clone(), num_transactions, - should_fail, + &expectation, old_config.as_ref(), &mut nonce, ); @@ -237,7 +252,7 @@ fn assert_compute_limit_reached( "should saturate gas limit, only burnt {gas_burnt} when limit was {gas_limit}" ); - env.upgrade_protocol(new_protocol_version); + env.upgrade_protocol_to_latest_version(); let new_chunk = produce_saturated_chunk( &mut env, @@ -246,7 +261,7 @@ fn assert_compute_limit_reached( method_name, method_args, num_transactions, - should_fail, + &expectation, new_config.as_ref(), &mut nonce, ); @@ -283,7 +298,7 @@ fn produce_saturated_chunk( method_name: String, args: Vec, num_transactions: u64, - should_fail: bool, + expectation: &Expectation, config: &RuntimeConfig, nonce: &mut u64, ) -> std::sync::Arc { @@ -317,7 +332,7 @@ fn produce_saturated_chunk( // chunk being much cheaper than everything that follows. Which makes it // look like compute costs work even if they don't! let result = env.execute_tx(tx_factory()).unwrap(); - if !should_fail { + if let Expectation::ShouldSucceed = expectation { result.assert_success(); } @@ -392,7 +407,7 @@ fn produce_saturated_chunk( } // check all transactions are successfully executed (unless the test // explicitly wants failing receipts) - if !should_fail { + if let Expectation::ShouldSucceed = expectation { for id in tx_ids { env.clients[0].chain.get_final_transaction_result(&id).unwrap().assert_success(); } diff --git a/integration-tests/src/tests/client/features/limit_contract_functions_number.rs b/integration-tests/src/tests/client/features/limit_contract_functions_number.rs index 82cddd4a345..a1d5ec92638 100644 --- a/integration-tests/src/tests/client/features/limit_contract_functions_number.rs +++ b/integration-tests/src/tests/client/features/limit_contract_functions_number.rs @@ -18,7 +18,6 @@ fn verify_contract_limits_upgrade( expected_prepare_err: PrepareError, ) { let old_protocol_version = feature.protocol_version() - 1; - let new_protocol_version = feature.protocol_version(); let epoch_length = 5; // Prepare TestEnv with a contract at the old protocol version. @@ -52,7 +51,7 @@ fn verify_contract_limits_upgrade( let account = "test0".parse().unwrap(); let old_outcome = env.call_main(&account); - env.upgrade_protocol(new_protocol_version); + env.upgrade_protocol_to_latest_version(); let new_outcome = env.call_main(&account); diff --git a/integration-tests/src/tests/client/features/lower_storage_key_limit.rs b/integration-tests/src/tests/client/features/lower_storage_key_limit.rs index 89c1b8eff76..1897c415307 100644 --- a/integration-tests/src/tests/client/features/lower_storage_key_limit.rs +++ b/integration-tests/src/tests/client/features/lower_storage_key_limit.rs @@ -24,7 +24,6 @@ fn protocol_upgrade() { let old_protocol_version = near_primitives::version::ProtocolFeature::LowerStorageKeyLimit.protocol_version() - 1; - let new_protocol_version = old_protocol_version + 1; let new_storage_key_limit = 2usize.pow(11); // 2 KB let args: Vec = vec![1u8; new_storage_key_limit + 1] .into_iter() @@ -95,7 +94,7 @@ fn protocol_upgrade() { assert_matches!(final_result.status, FinalExecutionStatus::SuccessValue(_)); } - env.upgrade_protocol(new_protocol_version); + env.upgrade_protocol_to_latest_version(); // Re-run the transaction, check that execution fails. { diff --git a/integration-tests/src/tests/client/features/multinode_test_loop_example.rs b/integration-tests/src/tests/client/features/multinode_test_loop_example.rs index 8ad9b1619bc..a5fc5a36ee6 100644 --- a/integration-tests/src/tests/client/features/multinode_test_loop_example.rs +++ b/integration-tests/src/tests/client/features/multinode_test_loop_example.rs @@ -1,12 +1,11 @@ use derive_enum_from_into::{EnumFrom, EnumTryInto}; use near_async::messaging::{noop, IntoMultiSender, IntoSender, MessageWithCallback, SendAsync}; use near_async::test_loop::adhoc::{handle_adhoc_events, AdhocEvent, AdhocEventSender}; -use near_async::test_loop::event_handler::{ - ignore_events, LoopEventHandler, LoopHandlerContext, TryIntoOrSelf, -}; +use near_async::test_loop::delay_sender::DelaySender; +use near_async::test_loop::event_handler::{ignore_events, LoopEventHandler, TryIntoOrSelf}; use near_async::test_loop::futures::{ - drive_async_computations, drive_delayed_action_runners, drive_futures, - TestLoopAsyncComputationEvent, TestLoopDelayedActionEvent, TestLoopTask, + drive_async_computations, drive_futures, TestLoopAsyncComputationEvent, + TestLoopDelayedActionEvent, TestLoopTask, }; use near_async::test_loop::TestLoopBuilder; use near_async::time::Duration; @@ -319,12 +318,8 @@ fn test_client_with_multi_test_loop() { test.register_handler(drive_async_computations().widen().for_index(idx)); // Delayed actions. - test.register_handler( - drive_delayed_action_runners::().widen().for_index(idx), - ); - test.register_handler( - drive_delayed_action_runners::().widen().for_index(idx), - ); + test.register_delayed_action_handler_for_index::(idx); + test.register_delayed_action_handler_for_index::(idx); // Messages to the client. test.register_handler( @@ -358,8 +353,12 @@ fn test_client_with_multi_test_loop() { } // Handles network routing. Outgoing messages are handled by emitting incoming messages to the // appropriate component of the appropriate node index. - test.register_handler(route_network_messages_to_client(NETWORK_DELAY)); - test.register_handler(route_shards_manager_network_messages(NETWORK_DELAY)); + test.register_handler(route_network_messages_to_client(test.sender(), NETWORK_DELAY)); + test.register_handler(route_shards_manager_network_messages( + test.sender(), + test.clock(), + NETWORK_DELAY, + )); // Bootstrap the test by starting the components. // We use adhoc events for these, just so that the visualizer can see these as events rather @@ -367,14 +366,16 @@ fn test_client_with_multi_test_loop() { // the send_adhoc_event part and the test would still work. for idx in 0..NUM_CLIENTS { let sender = test.sender().for_index(idx); + let shutting_down = test.shutting_down(); test.sender().for_index(idx).send_adhoc_event("start_client", move |data| { - data.client.start(&mut sender.into_delayed_action_runner()); + data.client.start(&mut sender.into_delayed_action_runner(shutting_down)); }); let sender = test.sender().for_index(idx); + let shutting_down = test.shutting_down(); test.sender().for_index(idx).send_adhoc_event("start_shards_manager", move |data| { data.shards_manager.periodically_resend_chunk_requests( - &mut sender.into_delayed_action_runner(), + &mut sender.into_delayed_action_runner(shutting_down), Duration::milliseconds(100), ); }) @@ -443,7 +444,7 @@ fn test_client_with_multi_test_loop() { // Give the test a chance to finish off remaining important events in the event loop, which can // be important for properly shutting down the nodes. - test.finish_remaining_events(Duration::seconds(1)); + test.shutdown_and_drain_remaining_events(Duration::seconds(1)); } /// Handles outgoing network messages, and turns them into incoming client messages. @@ -453,112 +454,107 @@ pub fn route_network_messages_to_client< + From + From, >( + sender: DelaySender<(usize, Event)>, network_delay: Duration, ) -> LoopEventHandler { // let mut route_back_lookup: HashMap = HashMap::new(); // let mut next_hash: u64 = 0; - LoopEventHandler::new( - move |event: (usize, Event), - data: &mut Data, - context: &LoopHandlerContext<(usize, Event)>| { - let (idx, event) = event; - let message = event.try_into_or_self().map_err(|event| (idx, event.into()))?; - let PeerManagerMessageRequest::NetworkRequests(request) = message else { - return Err((idx, message.into())); - }; - - let client_senders = (0..data.num_accounts()) + LoopEventHandler::new(move |event: (usize, Event), data: &mut Data| { + let (idx, event) = event; + let message = event.try_into_or_self().map_err(|event| (idx, event.into()))?; + let PeerManagerMessageRequest::NetworkRequests(request) = message else { + return Err((idx, message.into())); + }; + + let client_senders = (0..data.num_accounts()) .map(|idx| { - context - .sender + sender .with_additional_delay(network_delay) .for_index(idx) .into_wrapped_multi_sender::() }) .collect::>(); - match request { - NetworkRequests::Block { block } => { - for other_idx in 0..data.num_accounts() { - if other_idx != idx { - drop(client_senders[other_idx].send_async(BlockResponse { - block: block.clone(), - peer_id: PeerId::random(), - was_requested: false, - })); - } - } - } - NetworkRequests::Approval { approval_message } => { - let other_idx = data.index_for_account(&approval_message.target); + match request { + NetworkRequests::Block { block } => { + for other_idx in 0..data.num_accounts() { if other_idx != idx { - drop(client_senders[other_idx].send_async(BlockApproval( - approval_message.approval, - PeerId::random(), - ))); - } else { - tracing::warn!("Dropping message to self"); + drop(client_senders[other_idx].send_async(BlockResponse { + block: block.clone(), + peer_id: PeerId::random(), + was_requested: false, + })); } } - NetworkRequests::ForwardTx(account, transaction) => { - let other_idx = data.index_for_account(&account); - if other_idx != idx { - drop(client_senders[other_idx].send_async(ProcessTxRequest { - transaction, - is_forwarded: true, - check_only: false, - })) - } else { - tracing::warn!("Dropping message to self"); - } + } + NetworkRequests::Approval { approval_message } => { + let other_idx = data.index_for_account(&approval_message.target); + if other_idx != idx { + drop( + client_senders[other_idx] + .send_async(BlockApproval(approval_message.approval, PeerId::random())), + ); + } else { + tracing::warn!("Dropping message to self"); } - NetworkRequests::ChunkEndorsement(target, endorsement) => { - let other_idx = data.index_for_account(&target); - if other_idx != idx { + } + NetworkRequests::ForwardTx(account, transaction) => { + let other_idx = data.index_for_account(&account); + if other_idx != idx { + drop(client_senders[other_idx].send_async(ProcessTxRequest { + transaction, + is_forwarded: true, + check_only: false, + })) + } else { + tracing::warn!("Dropping message to self"); + } + } + NetworkRequests::ChunkEndorsement(target, endorsement) => { + let other_idx = data.index_for_account(&target); + if other_idx != idx { + drop( + client_senders[other_idx].send_async(ChunkEndorsementMessage(endorsement)), + ); + } else { + tracing::warn!("Dropping message to self"); + } + } + NetworkRequests::ChunkStateWitness(targets, witness) => { + let other_idxes = targets + .iter() + .map(|account| data.index_for_account(account)) + .collect::>(); + for other_idx in &other_idxes { + if *other_idx != idx { drop( - client_senders[other_idx] - .send_async(ChunkEndorsementMessage(endorsement)), + client_senders[*other_idx] + .send_async(ChunkStateWitnessMessage(witness.clone())), ); } else { - tracing::warn!("Dropping message to self"); - } - } - NetworkRequests::ChunkStateWitness(targets, witness) => { - let other_idxes = targets - .iter() - .map(|account| data.index_for_account(account)) - .collect::>(); - for other_idx in &other_idxes { - if *other_idx != idx { - drop( - client_senders[*other_idx] - .send_async(ChunkStateWitnessMessage(witness.clone())), - ); - } else { - tracing::warn!( + tracing::warn!( "ChunkStateWitness asked to send to nodes {:?}, but {} is ourselves, so skipping that", other_idxes, idx); - } } } - NetworkRequests::ChunkStateWitnessAck(target, witness_ack) => { - let other_idx = data.index_for_account(&target); - if other_idx != idx { - drop( - client_senders[other_idx] - .send_async(ChunkStateWitnessAckMessage(witness_ack)), - ); - } else { - tracing::warn!("Dropping state-witness-ack message to self"); - } + } + NetworkRequests::ChunkStateWitnessAck(target, witness_ack) => { + let other_idx = data.index_for_account(&target); + if other_idx != idx { + drop( + client_senders[other_idx] + .send_async(ChunkStateWitnessAckMessage(witness_ack)), + ); + } else { + tracing::warn!("Dropping state-witness-ack message to self"); } - // TODO: Support more network message types as we expand the test. - _ => return Err((idx, PeerManagerMessageRequest::NetworkRequests(request).into())), } + // TODO: Support more network message types as we expand the test. + _ => return Err((idx, PeerManagerMessageRequest::NetworkRequests(request).into())), + } - Ok(()) - }, - ) + Ok(()) + }) } // TODO: This would be a good starting point for turning this into a test util. diff --git a/integration-tests/src/tests/client/features/nearvm.rs b/integration-tests/src/tests/client/features/nearvm.rs index 3a2bbc1b1d1..51332f66bcc 100644 --- a/integration-tests/src/tests/client/features/nearvm.rs +++ b/integration-tests/src/tests/client/features/nearvm.rs @@ -17,7 +17,6 @@ fn test_nearvm_upgrade() { let old_protocol_version = near_primitives::version::ProtocolFeature::NearVmRuntime.protocol_version() - 1; - let new_protocol_version = old_protocol_version + 1; // Prepare TestEnv with a contract at the old protocol version. let mut env = { @@ -74,7 +73,7 @@ fn test_nearvm_upgrade() { capture.drain() }; - env.upgrade_protocol(new_protocol_version); + env.upgrade_protocol_to_latest_version(); // Re-run the transaction. let logs_at_new_version = { diff --git a/integration-tests/src/tests/client/features/orphan_chunk_state_witness.rs b/integration-tests/src/tests/client/features/orphan_chunk_state_witness.rs index aae878ad51b..e953b054757 100644 --- a/integration-tests/src/tests/client/features/orphan_chunk_state_witness.rs +++ b/integration-tests/src/tests/client/features/orphan_chunk_state_witness.rs @@ -9,15 +9,14 @@ use near_client::{Client, ProcessingDoneTracker, ProcessingDoneWaiter}; use near_crypto::Signature; use near_network::types::{NetworkRequests, PeerManagerMessageRequest}; use near_o11y::testonly::init_integration_logger; -use near_primitives::merkle::{Direction, MerklePathItem}; use near_primitives::sharding::{ ChunkHash, ReceiptProof, ShardChunkHeader, ShardChunkHeaderInner, ShardChunkHeaderInnerV2, ShardProof, }; -use near_primitives::stateless_validation::ChunkStateWitness; +use near_primitives::stateless_validation::EncodedChunkStateWitness; +use near_primitives::stateless_validation::SignedEncodedChunkStateWitness; +use near_primitives::types::AccountId; use near_primitives_core::checked_feature; -use near_primitives_core::hash::CryptoHash; -use near_primitives_core::types::AccountId; use near_primitives_core::version::PROTOCOL_VERSION; use nearcore::test_utils::TestEnvNightshadeSetupExt; @@ -25,7 +24,7 @@ struct OrphanWitnessTestEnv { env: TestEnv, block1: Block, block2: Block, - witness: ChunkStateWitness, + signed_witness: SignedEncodedChunkStateWitness, excluded_validator: AccountId, excluded_validator_idx: usize, chunk_producer: AccountId, @@ -158,7 +157,8 @@ fn setup_orphan_witness_test() -> OrphanWitnessTestEnv { } _ => Some(request), }); - let witness = witness_opt.unwrap(); + let signed_witness = witness_opt.unwrap(); + let witness = signed_witness.witness_bytes.decode().unwrap().0; env.propagate_chunk_endorsements(false); @@ -169,7 +169,7 @@ fn setup_orphan_witness_test() -> OrphanWitnessTestEnv { block2.header().height(), "There should be no missing chunks." ); - assert_eq!(witness.inner.chunk_header.chunk_hash(), block2.chunks()[0].chunk_hash()); + assert_eq!(witness.chunk_header.chunk_hash(), block2.chunks()[0].chunk_hash()); for client_idx in clients_without_excluded { let blocks_processed = env.clients[client_idx] @@ -187,7 +187,7 @@ fn setup_orphan_witness_test() -> OrphanWitnessTestEnv { env, block1, block2, - witness, + signed_witness, excluded_validator, excluded_validator_idx, chunk_producer: block2_chunk_producer, @@ -208,7 +208,7 @@ fn test_orphan_witness_valid() { mut env, block1, block2, - witness, + signed_witness, excluded_validator, excluded_validator_idx, .. @@ -216,7 +216,7 @@ fn test_orphan_witness_valid() { // `excluded_validator` receives witness for chunk belonging to `block2`, but it doesn't have `block1`. // The witness should become an orphaned witness and it should be saved to the orphan pool. - env.client(&excluded_validator).process_chunk_state_witness(witness, None).unwrap(); + env.client(&excluded_validator).process_chunk_state_witness(signed_witness, None).unwrap(); let block_processed = env .client(&excluded_validator) @@ -239,14 +239,16 @@ fn test_orphan_witness_bad_signature() { return; } - let OrphanWitnessTestEnv { mut env, mut witness, excluded_validator, .. } = + let OrphanWitnessTestEnv { mut env, mut signed_witness, excluded_validator, .. } = setup_orphan_witness_test(); // Modify the witness to contain an invalid signature - witness.signature = Signature::default(); + signed_witness.signature = Signature::default(); - let error = - env.client(&excluded_validator).process_chunk_state_witness(witness, None).unwrap_err(); + let error = env + .client(&excluded_validator) + .process_chunk_state_witness(signed_witness, None) + .unwrap_err(); let error_message = format!("{error}").to_lowercase(); tracing::info!(target:"test", "Error message: {}", error_message); assert!(error_message.contains("invalid signature")); @@ -261,15 +263,17 @@ fn test_orphan_witness_signature_from_wrong_peer() { return; } - let OrphanWitnessTestEnv { mut env, mut witness, excluded_validator, .. } = + let OrphanWitnessTestEnv { mut env, mut signed_witness, excluded_validator, .. } = setup_orphan_witness_test(); // Sign the witness using another validator's key. // Only witnesses from the chunk producer that produced this witness should be accepted. - resign_witness(&mut witness, env.client(&excluded_validator)); + resign_witness(&mut signed_witness, env.client(&excluded_validator)); - let error = - env.client(&excluded_validator).process_chunk_state_witness(witness, None).unwrap_err(); + let error = env + .client(&excluded_validator) + .process_chunk_state_witness(signed_witness, None) + .unwrap_err(); let error_message = format!("{error}").to_lowercase(); tracing::info!(target:"test", "Error message: {}", error_message); assert!(error_message.contains("invalid signature")); @@ -284,16 +288,23 @@ fn test_orphan_witness_invalid_shard_id() { return; } - let OrphanWitnessTestEnv { mut env, mut witness, excluded_validator, chunk_producer, .. } = - setup_orphan_witness_test(); + let OrphanWitnessTestEnv { + mut env, + mut signed_witness, + excluded_validator, + chunk_producer, + .. + } = setup_orphan_witness_test(); // Set invalid shard_id in the witness header - modify_witness_header_inner(&mut witness, |header| header.shard_id = 10000000); - resign_witness(&mut witness, env.client(&chunk_producer)); + modify_witness_header_inner(&mut signed_witness, |header| header.shard_id = 10000000); + resign_witness(&mut signed_witness, env.client(&chunk_producer)); // The witness should be rejected - let error = - env.client(&excluded_validator).process_chunk_state_witness(witness, None).unwrap_err(); + let error = env + .client(&excluded_validator) + .process_chunk_state_witness(signed_witness, None) + .unwrap_err(); let error_message = format!("{error}").to_lowercase(); tracing::info!(target:"test", "Error message: {}", error_message); assert!(error_message.contains("shard")); @@ -308,23 +319,18 @@ fn test_orphan_witness_too_large() { return; } - let OrphanWitnessTestEnv { mut env, mut witness, excluded_validator, chunk_producer, .. } = + let OrphanWitnessTestEnv { mut env, signed_witness, excluded_validator, .. } = setup_orphan_witness_test(); - // Modify the witness to be larger than the allowed limit - let dummy_merkle_path_item = - MerklePathItem { hash: CryptoHash::default(), direction: Direction::Left }; - let max_size_usize: usize = - default_orphan_state_witness_max_size().as_u64().try_into().unwrap(); - let items_count = max_size_usize / std::mem::size_of::() + 1; - let big_path = vec![dummy_merkle_path_item; items_count]; - let big_receipt_proof = - ReceiptProof(Vec::new(), ShardProof { from_shard_id: 0, to_shard_id: 0, proof: big_path }); - witness.inner.source_receipt_proofs.insert(ChunkHash::default(), big_receipt_proof); - resign_witness(&mut witness, env.client(&chunk_producer)); - + let witness = signed_witness.witness_bytes.decode().unwrap().0; // The witness should not be saved too the pool, as it's too big - let outcome = env.client(&excluded_validator).handle_orphan_state_witness(witness).unwrap(); + let outcome = env + .client(&excluded_validator) + .handle_orphan_state_witness( + witness, + default_orphan_state_witness_max_size().as_u64() as usize + 1, + ) + .unwrap(); assert!(matches!(outcome, HandleOrphanWitnessOutcome::TooBig(_))) } @@ -340,7 +346,7 @@ fn test_orphan_witness_far_from_head() { let OrphanWitnessTestEnv { mut env, - mut witness, + mut signed_witness, chunk_producer, block1, excluded_validator, @@ -348,10 +354,12 @@ fn test_orphan_witness_far_from_head() { } = setup_orphan_witness_test(); let bad_height = 10000; - modify_witness_header_inner(&mut witness, |header| header.height_created = bad_height); - resign_witness(&mut witness, env.client(&chunk_producer)); + modify_witness_header_inner(&mut signed_witness, |header| header.height_created = bad_height); + resign_witness(&mut signed_witness, env.client(&chunk_producer)); - let outcome = env.client(&excluded_validator).handle_orphan_state_witness(witness).unwrap(); + let witness = signed_witness.witness_bytes.decode().unwrap().0; + let outcome = + env.client(&excluded_validator).handle_orphan_state_witness(witness, 2000).unwrap(); assert_eq!( outcome, HandleOrphanWitnessOutcome::TooFarFromHead { @@ -373,39 +381,48 @@ fn test_orphan_witness_not_fully_validated() { return; } - let OrphanWitnessTestEnv { mut env, mut witness, chunk_producer, excluded_validator, .. } = - setup_orphan_witness_test(); + let OrphanWitnessTestEnv { + mut env, + mut signed_witness, + chunk_producer, + excluded_validator, + .. + } = setup_orphan_witness_test(); + let mut witness = signed_witness.witness_bytes.decode().unwrap().0; // Make the witness invalid in a way that won't be detected during orphan witness validation - witness.inner.source_receipt_proofs.insert( + witness.source_receipt_proofs.insert( ChunkHash::default(), ReceiptProof( vec![], ShardProof { from_shard_id: 100230230, to_shard_id: 383939, proof: vec![] }, ), ); - resign_witness(&mut witness, env.client(&chunk_producer)); + signed_witness.witness_bytes = EncodedChunkStateWitness::encode(&witness).unwrap().0; + resign_witness(&mut signed_witness, env.client(&chunk_producer)); // The witness should be accepted and saved into the pool, even though it's invalid. // There is no way to fully validate an orphan witness, so this is the correct behavior. // The witness will later be fully validated when the required block arrives. - env.client(&excluded_validator).process_chunk_state_witness(witness, None).unwrap(); + env.client(&excluded_validator).process_chunk_state_witness(signed_witness, None).unwrap(); } fn modify_witness_header_inner( - witness: &mut ChunkStateWitness, + signed_witness: &mut SignedEncodedChunkStateWitness, f: impl FnOnce(&mut ShardChunkHeaderInnerV2), ) { - match &mut witness.inner.chunk_header { + let mut witness = signed_witness.witness_bytes.decode().unwrap().0; + match &mut witness.chunk_header { ShardChunkHeader::V3(header) => match &mut header.inner { ShardChunkHeaderInner::V2(header_inner) => f(header_inner), _ => panic!(), }, _ => panic!(), }; + signed_witness.witness_bytes = EncodedChunkStateWitness::encode(&witness).unwrap().0; } -fn resign_witness(witness: &mut ChunkStateWitness, signer: &Client) { +fn resign_witness(witness: &mut SignedEncodedChunkStateWitness, signer: &Client) { witness.signature = - signer.validator_signer.as_ref().unwrap().sign_chunk_state_witness(&witness.inner).0; + signer.validator_signer.as_ref().unwrap().sign_chunk_state_witness(&witness.witness_bytes); } diff --git a/integration-tests/src/tests/client/features/simple_test_loop_example.rs b/integration-tests/src/tests/client/features/simple_test_loop_example.rs index 0aa5bd13a4a..7c9ad296d63 100644 --- a/integration-tests/src/tests/client/features/simple_test_loop_example.rs +++ b/integration-tests/src/tests/client/features/simple_test_loop_example.rs @@ -1,9 +1,9 @@ use derive_enum_from_into::{EnumFrom, EnumTryInto}; -use near_async::futures::DelayedActionRunnerExt; use near_async::messaging::{noop, IntoMultiSender, IntoSender}; +use near_async::test_loop::adhoc::{handle_adhoc_events, AdhocEvent, AdhocEventSender}; use near_async::test_loop::futures::{ - drive_async_computations, drive_delayed_action_runners, drive_futures, - TestLoopAsyncComputationEvent, TestLoopDelayedActionEvent, TestLoopTask, + drive_async_computations, drive_futures, TestLoopAsyncComputationEvent, + TestLoopDelayedActionEvent, TestLoopTask, }; use near_async::test_loop::TestLoopBuilder; use near_async::time::Duration; @@ -54,10 +54,17 @@ struct TestData { pub shards_manager: ShardsManager, } +impl AsMut for TestData { + fn as_mut(&mut self) -> &mut Self { + self + } +} + #[derive(EnumTryInto, Debug, EnumFrom)] #[allow(clippy::large_enum_variant)] enum TestEvent { Task(Arc), + Adhoc(AdhocEvent), AsyncComputation(TestLoopAsyncComputationEvent), ClientDelayedActions(TestLoopDelayedActionEvent), ClientEventFromNetwork(ClientSenderForNetworkMessage), @@ -230,18 +237,17 @@ fn test_client_with_simple_test_loop() { .widen(), ); test.register_handler(drive_futures().widen()); + test.register_handler(handle_adhoc_events::().widen()); test.register_handler(drive_async_computations().widen()); - test.register_handler(drive_delayed_action_runners::().widen()); + test.register_delayed_action_handler::(); test.register_handler(forward_client_request_to_shards_manager().widen()); // TODO: handle additional events. - test.sender().into_delayed_action_runner::().run_later( - "start_client", - Duration::ZERO, - |client, runner| { - client.start(runner); - }, - ); + let mut delayed_runner = + test.sender().into_delayed_action_runner::(test.shutting_down()); + test.sender().send_adhoc_event("start_client", move |data| { + data.client.start(&mut delayed_runner); + }); test.run_for(Duration::seconds(10)); - test.finish_remaining_events(Duration::seconds(1)); + test.shutdown_and_drain_remaining_events(Duration::seconds(1)); } diff --git a/integration-tests/src/tests/client/features/stateless_validation.rs b/integration-tests/src/tests/client/features/stateless_validation.rs index 0491e706354..c31fe409ac2 100644 --- a/integration-tests/src/tests/client/features/stateless_validation.rs +++ b/integration-tests/src/tests/client/features/stateless_validation.rs @@ -1,5 +1,7 @@ use near_epoch_manager::{EpochManager, EpochManagerAdapter}; -use near_primitives::stateless_validation::ChunkStateWitness; +use near_primitives::stateless_validation::{ + ChunkStateWitness, EncodedChunkStateWitness, SignedEncodedChunkStateWitness, +}; use near_store::test_utils::create_test_store; use rand::rngs::StdRng; use rand::{Rng, SeedableRng}; @@ -331,10 +333,14 @@ fn test_chunk_state_witness_bad_shard_id() { let previous_block = env.clients[0].chain.head().unwrap().prev_block_hash; let invalid_shard_id = 1000000000; let witness = ChunkStateWitness::new_dummy(upper_height, invalid_shard_id, previous_block); + let signed_witness = SignedEncodedChunkStateWitness { + witness_bytes: EncodedChunkStateWitness::encode(&witness).unwrap().0, + signature: Default::default(), + }; // Client should reject this ChunkStateWitness and the error message should mention "shard" tracing::info!(target: "test", "Processing invalid ChunkStateWitness"); - let res = env.clients[0].process_chunk_state_witness(witness, None); + let res = env.clients[0].process_chunk_state_witness(signed_witness, None); let error = res.unwrap_err(); let error_message = format!("{}", error).to_lowercase(); tracing::info!(target: "test", "error message: {}", error_message); diff --git a/integration-tests/src/tests/client/features/wallet_contract.rs b/integration-tests/src/tests/client/features/wallet_contract.rs index 9666c2a61a5..2c3f8e68fab 100644 --- a/integration-tests/src/tests/client/features/wallet_contract.rs +++ b/integration-tests/src/tests/client/features/wallet_contract.rs @@ -1,12 +1,15 @@ use assert_matches::assert_matches; +use aurora_engine_transactions::eip_2930::Transaction2930; +use aurora_engine_transactions::EthTransactionKind; +use aurora_engine_types::types::{Address, Wei}; +use ethabi::ethereum_types::U256; use near_chain_configs::{Genesis, NEAR_BASE}; use near_client::{test_utils::TestEnv, ProcessTxResponse}; -use near_crypto::{InMemorySigner, KeyType, SecretKey}; -use near_primitives::errors::{ - ActionError, ActionErrorKind, FunctionCallError, InvalidAccessKeyError, InvalidTxError, - TxExecutionError, -}; -use near_primitives::test_utils::eth_implicit_test_account; +use near_crypto::{InMemorySigner, KeyType, PublicKey, SecretKey}; +use near_primitives::account::id::AccountIdRef; +use near_primitives::account::{AccessKeyPermission, FunctionCallPermission}; +use near_primitives::errors::{InvalidAccessKeyError, InvalidTxError}; +use near_primitives::test_utils::{create_user_test_signer, eth_implicit_test_account}; use near_primitives::transaction::{ Action, AddKeyAction, DeployContractAction, FunctionCallAction, SignedTransaction, TransferAction, @@ -23,13 +26,9 @@ use near_vm_runner::ContractCode; use near_wallet_contract::{wallet_contract, wallet_contract_magic_bytes}; use nearcore::test_utils::TestEnvNightshadeSetupExt; use node_runtime::ZERO_BALANCE_ACCOUNT_STORAGE_LIMIT; -use rlp::RlpStream; -use testlib::runtime_utils::{alice_account, bob_account, carol_account}; +use testlib::runtime_utils::{alice_account, bob_account}; -use crate::{ - node::{Node, RuntimeNode}, - tests::client::process_blocks::produce_blocks_from_height, -}; +use crate::tests::client::process_blocks::produce_blocks_from_height; /// Try to process tx in the next blocks, check that tx and all generated receipts succeed. /// Return height of the next block. @@ -43,6 +42,7 @@ fn check_tx_processing( assert_eq!(env.clients[0].process_tx(tx, false, false), ProcessTxResponse::ValidTx); let next_height = produce_blocks_from_height(env, blocks_number, height); let final_outcome = env.clients[0].chain.get_final_transaction_result(&tx_hash).unwrap(); + println!("{final_outcome:?}"); assert_matches!(final_outcome.status, FinalExecutionStatus::SuccessValue(_)); next_height } @@ -65,6 +65,22 @@ fn view_request(env: &TestEnv, request: QueryRequest) -> QueryResponse { .unwrap() } +fn view_balance(env: &TestEnv, account: &AccountIdRef) -> u128 { + let request = QueryRequest::ViewAccount { account_id: account.into() }; + match view_request(&env, request).kind { + QueryResponseKind::ViewAccount(view) => view.amount, + _ => panic!("wrong query response"), + } +} + +fn view_nonce(env: &TestEnv, account: &AccountIdRef, pk: PublicKey) -> u64 { + let request = QueryRequest::ViewAccessKey { account_id: account.into(), public_key: pk }; + match view_request(&env, request).kind { + QueryResponseKind::AccessKey(view) => view.nonce, + _ => panic!("wrong query response"), + } +} + /// Tests that ETH-implicit account is created correctly, with Wallet Contract hash. #[test] fn test_eth_implicit_account_creation() { @@ -74,6 +90,7 @@ fn test_eth_implicit_account_creation() { let genesis = Genesis::test(vec!["test0".parse().unwrap(), "test1".parse().unwrap()], 1); let mut env = TestEnv::builder(&genesis.config).nightshade_runtimes(&genesis).build(); let genesis_block = env.clients[0].chain.get_block_by_height(0).unwrap(); + let chain_id = &genesis.config.chain_id; let signer = InMemorySigner::from_seed("test0".parse().unwrap(), KeyType::ED25519, "test0"); let eth_implicit_account_id = eth_implicit_test_account(); @@ -92,7 +109,7 @@ fn test_eth_implicit_account_creation() { env.produce_block(0, i); } - let magic_bytes = wallet_contract_magic_bytes(); + let magic_bytes = wallet_contract_magic_bytes(chain_id); // Verify the ETH-implicit account has zero balance and appropriate code hash. // Check that the account storage fits within zero balance account limit. @@ -127,6 +144,7 @@ fn test_transaction_from_eth_implicit_account_fail() { let genesis = Genesis::test(vec!["test0".parse().unwrap(), "test1".parse().unwrap()], 1); let mut env = TestEnv::builder(&genesis.config).nightshade_runtimes(&genesis).build(); let genesis_block = env.clients[0].chain.get_block_by_height(0).unwrap(); + let chain_id = &genesis.config.chain_id; let deposit_for_account_creation = NEAR_BASE; let mut height = 1; let blocks_number = 5; @@ -200,7 +218,7 @@ fn test_transaction_from_eth_implicit_account_fail() { assert_eq!(response, expected_tx_error); // Try to deploy the Wallet Contract again to the ETH-implicit account. Should fail because there is no access key. - let wallet_contract_code = wallet_contract().code().to_vec(); + let wallet_contract_code = wallet_contract(chain_id).code().to_vec(); let add_access_key_to_eth_implicit_account_tx = SignedTransaction::from_actions( nonce, eth_implicit_account_id.clone(), @@ -214,126 +232,258 @@ fn test_transaction_from_eth_implicit_account_fail() { assert_eq!(response, expected_tx_error); } -// TODO(eth-implicit) Remove this test and replace it with tests that directly call the `Wallet Contract` when it is ready. -/// Creating an ETH-implicit account with meta-transaction, then attempting to use it with another meta-transaction. -/// -/// The `create_account` parameter controls whether we create ETH-implicit account -/// before attempting to use it by making a function call. -/// Depending on `rlp_transaction` blob that is sent to the `Wallet Contract` -/// the transaction is either authorized or unauthorized. -/// The `authorized` parameter controls which case will be tested. -fn meta_tx_call_wallet_contract(create_account: bool, authorized: bool) { +#[test] +fn test_wallet_contract_interaction() { if !checked_feature!("stable", EthImplicitAccounts, PROTOCOL_VERSION) { return; } - let genesis = Genesis::test(vec![alice_account(), bob_account(), carol_account()], 3); + + let genesis = Genesis::test(vec!["test0".parse().unwrap(), alice_account(), bob_account()], 1); + let mut env = TestEnv::builder(&genesis.config).nightshade_runtimes(&genesis).build(); + + let genesis_block = env.clients[0].chain.get_block_by_height(0).unwrap(); + let mut height = 1; + let blocks_number = 10; + + // As the relayer, alice will be sending Near transactions which + // contain the Ethereum transactions the user signs. let relayer = alice_account(); - let node = RuntimeNode::new_from_genesis(&relayer, genesis); - let sender = bob_account(); + let mut relayer_signer = + NearSigner { account_id: &relayer, signer: create_user_test_signer(&relayer) }; + // Bob will receive a $NEAR transfer from the eth implicit account + let receiver = bob_account(); + // Generate an eth implicit account for the user let secret_key = SecretKey::from_seed(KeyType::SECP256K1, "test"); let public_key = secret_key.public_key(); let eth_implicit_account = derive_eth_implicit_account_id(public_key.unwrap_as_secp256k1()); - let other_public_key = SecretKey::from_seed(KeyType::SECP256K1, "test2").public_key(); - - // Although ETH-implicit account can be zero-balance, we pick 1 here in order to make transfer later from this account. - let transfer_amount = 1u128; - let actions = vec![Action::Transfer(TransferAction { deposit: transfer_amount })]; - - if create_account { - // Create ETH-implicit account by funding it. - node.user() - .meta_tx(sender.clone(), eth_implicit_account.clone(), relayer.clone(), actions) - .unwrap() - .assert_success(); - } - let target = carol_account(); - let initial_balance = node.view_balance(&target).expect("failed looking up balance"); - - // TODO(eth-implicit) Append appropriate values to the RLP stream when proper `Wallet Contract` is implemented. - let mut stream = RlpStream::new_list(3); - stream.append(&target.as_str()); - // The RLP trait `Encodable` is not implemented for `u128`. We must encode it as bytes. - // TODO(eth-implicit) Do not try to encode `u128` values directly, see https://github.com/near/nearcore/pull/10269#discussion_r1425585051. - stream.append(&transfer_amount.to_be_bytes().as_slice()); - if authorized { - stream.append(&public_key.key_data()); - } else { - stream.append(&other_public_key.key_data()); - } - let rlp_encoded_data = stream.out().to_vec(); + // Create ETH-implicit account by funding it. + // Although ETH-implicit account can be zero-balance, we pick a non-zero amount + // here in order to make transfer later from this account. + let deposit_for_account_creation = NEAR_BASE; + let actions = vec![Action::Transfer(TransferAction { deposit: deposit_for_account_creation })]; + let nonce = + view_nonce(&env, relayer_signer.account_id, relayer_signer.signer.public_key.clone()) + 1; + let block_hash = *genesis_block.hash(); + let signed_transaction = SignedTransaction::from_actions( + nonce, + relayer.clone(), + eth_implicit_account.clone(), + &relayer_signer.signer, + actions, + block_hash, + ); + height = check_tx_processing(&mut env, signed_transaction, height, blocks_number); - let args = serde_json::json!({ - "target": target.to_string(), - "rlp_transaction": rlp_encoded_data, - }) - .to_string() + // The relayer adds its key to the eth implicit account so that + // can sign Near transactions for the user. + let relayer_pk = relayer_signer.signer.public_key.clone(); + let action = Action::AddKey(Box::new(AddKeyAction { + public_key: relayer_pk, + access_key: AccessKey { + nonce: 0, + permission: AccessKeyPermission::FunctionCall(FunctionCallPermission { + allowance: None, + receiver_id: eth_implicit_account.to_string(), + method_names: vec!["rlp_execute".into()], + }), + }, + })); + let signed_transaction = create_rlp_execute_tx( + ð_implicit_account, + action, + 0, + ð_implicit_account, + &secret_key, + &mut relayer_signer, + &env, + ); + height = check_tx_processing(&mut env, signed_transaction, height, blocks_number); + + // Now the relayer can sign transactions for the implicit account directly + relayer_signer.account_id = ð_implicit_account; + + let init_wallet_balance = view_balance(&env, ð_implicit_account); + let init_receiver_balance = view_balance(&env, &receiver); + + // The user signs a transaction to transfer some $NEAR + let transfer_amount = NEAR_BASE / 7; + let action = Action::Transfer(TransferAction { deposit: transfer_amount }); + let signed_transaction = create_rlp_execute_tx( + &receiver, + action, + 1, + ð_implicit_account, + &secret_key, + &mut relayer_signer, + &env, + ); + check_tx_processing(&mut env, signed_transaction, height, blocks_number); + + let final_wallet_balance = view_balance(&env, ð_implicit_account); + let final_receiver_balance = view_balance(&env, &receiver); + + assert_eq!(final_receiver_balance - init_receiver_balance, transfer_amount); + let wallet_balance_diff = init_wallet_balance - final_wallet_balance; + // Wallet balance is a little lower due to gas fees. + assert!(wallet_balance_diff - transfer_amount < NEAR_BASE / 500); +} + +fn create_rlp_execute_tx( + target: &AccountIdRef, + mut action: Action, + nonce: u64, + eth_implicit_account: &AccountIdRef, + secret_key: &SecretKey, + near_signer: &mut NearSigner<'_>, + env: &TestEnv, +) -> SignedTransaction { + const CHAIN_ID: u64 = 399; + // handles 24 vs 18 decimal mismatch between $NEAR and $ETH + const MAX_YOCTO_NEAR: u128 = 1_000_000; + + // Construct Eth transaction from user's intended action + let value = match &mut action { + Action::Transfer(tx) => { + let raw_amount = tx.deposit; + tx.deposit = raw_amount % MAX_YOCTO_NEAR; + Wei::new_u128(raw_amount / MAX_YOCTO_NEAR) + } + Action::FunctionCall(fn_call) => { + let raw_amount = fn_call.deposit; + fn_call.deposit = raw_amount % MAX_YOCTO_NEAR; + Wei::new_u128(raw_amount / MAX_YOCTO_NEAR) + } + _ => Wei::zero(), + }; + let tx_data = abi_encode(target.to_string(), action); + let transaction = Transaction2930 { + chain_id: CHAIN_ID, + nonce: nonce.into(), + gas_price: U256::zero(), + gas_limit: U256::zero(), + to: Some(derive_address(target)), + value, + data: tx_data, + access_list: Vec::new(), + }; + let signed_tx = sign_eth_transaction(transaction, &secret_key); + let signed_tx_bytes: Vec = (&signed_tx).into(); + let tx_bytes_b64 = near_primitives::serialize::to_base64(&signed_tx_bytes); + let args = format!( + r#"{{ + "target": "{target}", + "tx_bytes_b64": "{tx_bytes_b64}" + }}"# + ) .into_bytes(); + // Construct Near transaction to `rlp_execute` method let actions = vec![Action::FunctionCall(Box::new(FunctionCallAction { - method_name: "execute_rlp".to_owned(), + method_name: "rlp_execute".into(), args, - gas: 30_000_000_000_000, + gas: 300_000_000_000_000, deposit: 0, }))]; - // Call Wallet Contract with JSON-encoded arguments: `target` and `rlp_transaction`. The `rlp_transaction`'s value is RLP-encoded. - let tx_result = - node.user().meta_tx(sender, eth_implicit_account.clone(), relayer, actions).unwrap(); - let wallet_contract_call_result = &tx_result.receipts_outcome[1].outcome.status; - - if create_account && authorized { - // If the public key recovered from the RLP transaction's signature is valid for this ETH-implicit account, - // the transaction will succeed. `target`'s balance will increase by `transfer_amount`. - tx_result.assert_success(); - let final_balance = node.view_balance(&target).expect("failed looking up balance"); - assert_eq!(final_balance, initial_balance + transfer_amount); - return; - } + let nonce = view_nonce(env, near_signer.account_id, near_signer.signer.public_key.clone()) + 1; + let block_hash = *env.clients[0].chain.get_head_block().unwrap().hash(); + SignedTransaction::from_actions( + nonce, + near_signer.account_id.into(), + eth_implicit_account.into(), + &near_signer.signer, + actions, + block_hash, + ) +} - if create_account { - // The public key recovered from the RLP transaction's signature isn't valid for this ETH-implicit account. - // The Wallet Contract will reject this transaction. - let expected_error = near_primitives::views::ExecutionStatusView::Failure( - TxExecutionError::ActionError( - ActionError { - index: Some(0), - kind: ActionErrorKind::FunctionCallError { - 0: FunctionCallError::ExecutionError( - "Smart contract panicked: Public key does not match the Wallet Contract address." - .to_string() - ) - } - } - ) - ); - assert_eq!(wallet_contract_call_result, &expected_error); - } else { - // The Wallet Contract function call is not executed because the account does not exist. - let expected_error = near_primitives::views::ExecutionStatusView::Failure( - TxExecutionError::ActionError(ActionError { - index: Some(0), - kind: ActionErrorKind::AccountDoesNotExist { account_id: eth_implicit_account }, - }), - ); - assert_eq!(wallet_contract_call_result, &expected_error); +struct NearSigner<'a> { + account_id: &'a AccountIdRef, + signer: InMemorySigner, +} + +fn abi_encode(target: String, action: Action) -> Vec { + const ADD_KEY_SELECTOR: &[u8] = &[0x75, 0x3c, 0xe5, 0xab]; + const TRANSFER_SELECTOR: &[u8] = &[0x3e, 0xd6, 0x41, 0x24]; + + let mut buf = Vec::new(); + match action { + Action::AddKey(add_key) => { + buf.extend_from_slice(ADD_KEY_SELECTOR); + let (public_key_kind, public_key) = match add_key.public_key { + PublicKey::ED25519(key) => (0, key.as_ref().to_vec()), + PublicKey::SECP256K1(key) => (1, key.as_ref().to_vec()), + }; + let nonce = add_key.access_key.nonce; + let (is_full_access, is_limited_allowance, allowance, receiver_id, method_names) = + match add_key.access_key.permission { + AccessKeyPermission::FullAccess => (true, false, 0, String::new(), Vec::new()), + AccessKeyPermission::FunctionCall(permission) => ( + false, + permission.allowance.is_some(), + permission.allowance.unwrap_or_default(), + permission.receiver_id, + permission.method_names, + ), + }; + let tokens = &[ + ethabi::Token::Uint(public_key_kind.into()), + ethabi::Token::Bytes(public_key), + ethabi::Token::Uint(nonce.into()), + ethabi::Token::Bool(is_full_access), + ethabi::Token::Bool(is_limited_allowance), + ethabi::Token::Uint(allowance.into()), + ethabi::Token::String(receiver_id), + ethabi::Token::Array(method_names.into_iter().map(ethabi::Token::String).collect()), + ]; + buf.extend_from_slice(ðabi::encode(tokens)); + } + Action::Transfer(tx) => { + buf.extend_from_slice(TRANSFER_SELECTOR); + let tokens = &[ethabi::Token::String(target), ethabi::Token::Uint(tx.deposit.into())]; + buf.extend_from_slice(ðabi::encode(tokens)); + } + _ => unimplemented!(), } + buf } -/// Wallet Contract function call is rejected because the ETH-implicit account does not exist. -#[test] -fn meta_tx_call_wallet_contract_account_does_not_exist() { - meta_tx_call_wallet_contract(false, true); +fn sign_eth_transaction(transaction: Transaction2930, sk: &SecretKey) -> EthTransactionKind { + let mut rlp_stream = rlp::RlpStream::new(); + rlp_stream.append(&aurora_engine_transactions::eip_2930::TYPE_BYTE); + transaction.rlp_append_unsigned(&mut rlp_stream); + let message_hash = keccak256(rlp_stream.as_raw()); + let signature = sk.sign(&message_hash); + let bytes: [u8; 65] = match signature { + near_crypto::Signature::SECP256K1(x) => x.into(), + _ => panic!("Expected SECP256K1 key"), + }; + let v = bytes[64]; + let r = U256::from_big_endian(&bytes[0..32]); + let s = U256::from_big_endian(&bytes[32..64]); + let signed_transaction = aurora_engine_transactions::eip_2930::SignedTransaction2930 { + transaction, + parity: v, + r, + s, + }; + EthTransactionKind::Eip2930(signed_transaction) } -/// Wallet Contract function call fails because the provided public key does not match the ETH-implicit address. -#[test] -fn meta_tx_call_wallet_contract_unauthorized() { - meta_tx_call_wallet_contract(true, false); +fn keccak256(bytes: &[u8]) -> [u8; 32] { + use sha3::{Digest, Keccak256}; + + Keccak256::digest(bytes).into() } -/// Wallet Contract function call is executed successfully. -#[test] -fn meta_tx_call_wallet_contract_authorized() { - meta_tx_call_wallet_contract(true, true); +fn derive_address(account_id: &AccountIdRef) -> Address { + let bytes = if account_id.as_str().starts_with("0x") { + let buf = hex::decode(&account_id.as_str()[2..42]).expect("account_id is hex encoded"); + return Address::try_from_slice(&buf).expect("slice is correct size"); + } else { + account_id.as_bytes() + }; + let hash = keccak256(bytes); + Address::try_from_slice(&hash[12..32]).expect("slice is correct size") } diff --git a/integration-tests/src/tests/client/process_blocks.rs b/integration-tests/src/tests/client/process_blocks.rs index 9377a167ce3..2f9ff147e94 100644 --- a/integration-tests/src/tests/client/process_blocks.rs +++ b/integration-tests/src/tests/client/process_blocks.rs @@ -2421,7 +2421,7 @@ fn test_catchup_gas_price_change() { } }); env.clients[1].chain.schedule_apply_state_parts(0, sync_hash, num_parts, &f).unwrap(); - env.clients[1].chain.set_state_finalize(0, sync_hash, Ok(())).unwrap(); + env.clients[1].chain.set_state_finalize(0, sync_hash).unwrap(); let chunk_extra_after_sync = env.clients[1].chain.get_chunk_extra(blocks[4].hash(), &ShardUId::single_shard()).unwrap(); let expected_chunk_extra = @@ -2703,7 +2703,7 @@ fn test_execution_metadata() { { "cost_category": "WASM_HOST_COST", "cost": "CONTRACT_LOADING_BYTES", - "gas_used": "18423750" + "gas_used": "92590075" }, { "cost_category": "WASM_HOST_COST", diff --git a/integration-tests/src/tests/client/resharding.rs b/integration-tests/src/tests/client/resharding.rs index 045f7e5e4db..aeccb3c75bf 100644 --- a/integration-tests/src/tests/client/resharding.rs +++ b/integration-tests/src/tests/client/resharding.rs @@ -21,7 +21,9 @@ use near_primitives::types::{BlockHeight, NumShards, ProtocolVersion, ShardId}; use near_primitives::utils::MaybeValidated; use near_primitives::version::ProtocolFeature; use near_primitives::version::PROTOCOL_VERSION; -use near_primitives::views::{ExecutionStatusView, FinalExecutionStatus, QueryRequest}; +use near_primitives::views::{ + ExecutionStatusView, FinalExecutionOutcomeView, FinalExecutionStatus, QueryRequest, +}; use near_primitives_core::num_rational::Rational32; use near_store::flat::FlatStorageStatus; use near_store::metadata::DbKind; @@ -36,6 +38,9 @@ use std::collections::{BTreeMap, HashMap, HashSet}; use std::sync::Arc; use tracing::debug; +#[cfg(feature = "nightly")] +use near_parameters::RuntimeConfig; + const SIMPLE_NIGHTSHADE_PROTOCOL_VERSION: ProtocolVersion = ProtocolFeature::SimpleNightshade.protocol_version(); @@ -46,6 +51,10 @@ const SIMPLE_NIGHTSHADE_V2_PROTOCOL_VERSION: ProtocolVersion = const SIMPLE_NIGHTSHADE_V3_PROTOCOL_VERSION: ProtocolVersion = ProtocolFeature::SimpleNightshadeV3.protocol_version(); +#[cfg(feature = "nightly")] +const SIMPLE_NIGHTSHADE_TESTONLY_PROTOCOL_VERSION: ProtocolVersion = + ProtocolFeature::SimpleNightshadeTestonly.protocol_version(); + const P_CATCHUP: f64 = 0.2; #[derive(Clone, Copy)] @@ -57,6 +66,9 @@ enum ReshardingType { // In the V2->V3 resharding outgoing receipts are reassigned to lowest index child. #[cfg(not(feature = "statelessnet_protocol"))] V3, + // In V3->TESTONLY resharding outgoing receipts are reassigned to lowest index child. + #[cfg(feature = "nightly")] + TESTONLY, } fn get_target_protocol_version(resharding_type: &ReshardingType) -> ProtocolVersion { @@ -65,16 +77,13 @@ fn get_target_protocol_version(resharding_type: &ReshardingType) -> ProtocolVers ReshardingType::V2 => SIMPLE_NIGHTSHADE_V2_PROTOCOL_VERSION, #[cfg(not(feature = "statelessnet_protocol"))] ReshardingType::V3 => SIMPLE_NIGHTSHADE_V3_PROTOCOL_VERSION, + #[cfg(feature = "nightly")] + ReshardingType::TESTONLY => SIMPLE_NIGHTSHADE_TESTONLY_PROTOCOL_VERSION, } } fn get_genesis_protocol_version(resharding_type: &ReshardingType) -> ProtocolVersion { - match resharding_type { - ReshardingType::V1 => SIMPLE_NIGHTSHADE_PROTOCOL_VERSION - 1, - ReshardingType::V2 => SIMPLE_NIGHTSHADE_V2_PROTOCOL_VERSION - 1, - #[cfg(not(feature = "statelessnet_protocol"))] - ReshardingType::V3 => SIMPLE_NIGHTSHADE_V3_PROTOCOL_VERSION - 1, - } + get_target_protocol_version(resharding_type) - 1 } fn get_parent_shard_uids(resharding_type: &ReshardingType) -> Vec { @@ -83,6 +92,8 @@ fn get_parent_shard_uids(resharding_type: &ReshardingType) -> Vec { ReshardingType::V2 => ShardLayout::get_simple_nightshade_layout(), #[cfg(not(feature = "statelessnet_protocol"))] ReshardingType::V3 => ShardLayout::get_simple_nightshade_layout_v2(), + #[cfg(feature = "nightly")] + ReshardingType::TESTONLY => ShardLayout::get_simple_nightshade_layout_v3(), }; shard_layout.shard_uids().collect() } @@ -99,6 +110,8 @@ fn get_expected_shards_num( ReshardingType::V2 => 4, #[cfg(not(feature = "statelessnet_protocol"))] ReshardingType::V3 => 5, + #[cfg(feature = "nightly")] + ReshardingType::TESTONLY => 6, } } else { match resharding_type { @@ -106,6 +119,8 @@ fn get_expected_shards_num( ReshardingType::V2 => 5, #[cfg(not(feature = "statelessnet_protocol"))] ReshardingType::V3 => 6, + #[cfg(feature = "nightly")] + ReshardingType::TESTONLY => 7, } } } @@ -511,8 +526,11 @@ impl TestReshardingEnv { /// This functions checks that the outcomes of all transactions and associated receipts /// have successful status /// If `allow_not_started` is true, allow transactions status to be NotStarted - /// Return successful transaction hashes - fn check_tx_outcomes(&mut self, allow_not_started: bool) -> Vec { + /// Returns a map from successful transaction hashes to the transaction outcomes + fn check_tx_outcomes( + &mut self, + allow_not_started: bool, + ) -> HashMap { tracing::debug!(target: "test", "checking tx outcomes"); let env = &mut self.env; let head = env.clients[0].chain.head().unwrap(); @@ -528,7 +546,7 @@ impl TestReshardingEnv { txs_to_check.extend(txs); } - let mut successful_txs = Vec::new(); + let mut successful_txs = HashMap::new(); for tx in txs_to_check { let id = &tx.get_hash(); @@ -556,19 +574,24 @@ impl TestReshardingEnv { continue; } let final_outcome = client.chain.get_final_transaction_result(id).unwrap(); + for outcome in &final_outcome.receipts_outcome { + assert_matches!( + outcome.outcome.status, + ExecutionStatusView::SuccessValue(_) + | ExecutionStatusView::SuccessReceiptId(_) + ); + } let outcome_status = final_outcome.status.clone(); if matches!(outcome_status, FinalExecutionStatus::SuccessValue(_)) { - successful_txs.push(tx.get_hash()); + successful_txs.insert(tx.get_hash(), final_outcome); } else { tracing::error!(target: "test", tx=?id, client=i, "tx failed"); panic!("tx failed {:?}", final_outcome); } - for outcome in final_outcome.receipts_outcome { - assert_matches!(outcome.outcome.status, ExecutionStatusView::SuccessValue(_)); - } } } + successful_txs } @@ -818,6 +841,17 @@ fn check_outgoing_receipts_reassigned_impl( assert!(outgoing_receipts.is_empty()); } } + #[cfg(feature = "nightly")] + ReshardingType::TESTONLY => { + // In V3->TESTONLY resharding the outgoing receipts should be reassigned + // to the lowest index child of the parent shard. + // We can't directly check that here but we can check that the + // non-lowest-index shards are not assigned any receipts. + // We check elsewhere that no receipts are lost so this should be sufficient. + if shard_id == 5 { + assert!(outgoing_receipts.is_empty()); + } + } } } @@ -1065,6 +1099,24 @@ fn test_shard_layout_upgrade_simple_v3_seed_44() { test_shard_layout_upgrade_simple_impl(ReshardingType::V3, 44, false); } +#[cfg(feature = "nightly")] +#[test] +fn test_shard_layout_upgrade_simple_testonly_seed_42() { + test_shard_layout_upgrade_simple_impl(ReshardingType::TESTONLY, 42, false); +} + +#[cfg(feature = "nightly")] +#[test] +fn test_shard_layout_upgrade_simple_testonly_seed_43() { + test_shard_layout_upgrade_simple_impl(ReshardingType::TESTONLY, 43, false); +} + +#[cfg(feature = "nightly")] +#[test] +fn test_shard_layout_upgrade_simple_testonly_seed_44() { + test_shard_layout_upgrade_simple_impl(ReshardingType::TESTONLY, 44, false); +} + fn test_resharding_with_different_db_kind_impl(resharding_type: ReshardingType) { init_test_logger(); @@ -1110,6 +1162,12 @@ fn test_resharding_with_different_db_kind_v3() { test_resharding_with_different_db_kind_impl(ReshardingType::V3); } +#[cfg(feature = "nightly")] +#[test] +fn test_resharding_with_different_db_kind_testonly() { + test_resharding_with_different_db_kind_impl(ReshardingType::TESTONLY); +} + /// In this test we are checking whether we are properly deleting trie state and flat state /// from the old shard layout after resharding. This is handled as a part of Garbage Collection (GC) fn test_shard_layout_upgrade_gc_impl(resharding_type: ReshardingType, rng_seed: u64) { @@ -1164,6 +1222,12 @@ fn test_shard_layout_upgrade_gc_v3() { test_shard_layout_upgrade_gc_impl(ReshardingType::V3, 44); } +#[cfg(feature = "nightly")] +#[test] +fn test_shard_layout_upgrade_gc_testonly() { + test_shard_layout_upgrade_gc_impl(ReshardingType::TESTONLY, 44); +} + const GAS_1: u64 = 300_000_000_000_000; const GAS_2: u64 = GAS_1 / 3; @@ -1417,7 +1481,7 @@ fn test_shard_layout_upgrade_cross_contract_calls_impl( let successful_txs = test_env.check_tx_outcomes(false); let new_accounts = - successful_txs.iter().flat_map(|tx_hash| new_accounts.get(tx_hash)).collect(); + successful_txs.iter().flat_map(|(tx_hash, _)| new_accounts.get(tx_hash)).collect(); test_env.check_accounts(new_accounts); @@ -1440,7 +1504,6 @@ fn test_shard_layout_upgrade_cross_contract_calls_v2_seed_42() { // Test cross contract calls // This test case tests postponed receipts and delayed receipts -#[cfg(not(feature = "statelessnet_protocol"))] #[test] fn test_shard_layout_upgrade_cross_contract_calls_v2_seed_43() { test_shard_layout_upgrade_cross_contract_calls_impl(ReshardingType::V2, 43); @@ -1477,6 +1540,171 @@ fn test_shard_layout_upgrade_cross_contract_calls_v3_seed_44() { test_shard_layout_upgrade_cross_contract_calls_impl(ReshardingType::V3, 44); } +// Test cross contract calls +// This test case tests postponed receipts and delayed receipts +#[cfg(feature = "nightly")] +#[test] +fn test_shard_layout_upgrade_cross_contract_calls_testonly_seed_42() { + test_shard_layout_upgrade_cross_contract_calls_impl(ReshardingType::TESTONLY, 42); +} + +// Test cross contract calls +// This test case tests postponed receipts and delayed receipts +#[cfg(feature = "nightly")] +#[test] +fn test_shard_layout_upgrade_cross_contract_calls_testonly_seed_43() { + test_shard_layout_upgrade_cross_contract_calls_impl(ReshardingType::TESTONLY, 43); +} + +// Test cross contract calls +// This test case tests postponed receipts and delayed receipts +#[cfg(feature = "nightly")] +#[test] +fn test_shard_layout_upgrade_cross_contract_calls_testonly_seed_44() { + test_shard_layout_upgrade_cross_contract_calls_impl(ReshardingType::TESTONLY, 44); +} + +#[cfg(feature = "nightly")] +fn generate_yield_create_tx( + account_id: &AccountId, + callback_method_name: String, + nonce: u64, + block_hash: &CryptoHash, +) -> SignedTransaction { + let signer = + InMemorySigner::from_seed(account_id.clone(), KeyType::ED25519, account_id.as_ref()); + + SignedTransaction::from_actions( + nonce, + account_id.clone(), + account_id.clone(), + &signer, + vec![Action::FunctionCall(Box::new(FunctionCallAction { + method_name: callback_method_name, + args: vec![], + gas: GAS_1, + deposit: 0, + }))], + *block_hash, + ) +} + +#[cfg(feature = "nightly")] +fn setup_test_env_with_promise_yield_txs( + test_env: &mut TestReshardingEnv, + epoch_length: u64, +) -> Vec { + let genesis_hash = *test_env.env.clients[0].chain.genesis_block().hash(); + + // Generates an account for each shard + let contract_accounts = gen_shard_accounts(); + + // Add transactions deploying nightly_rs_contract to each account + let mut init_txs = vec![]; + for account_id in &contract_accounts { + let signer = + InMemorySigner::from_seed(account_id.clone(), KeyType::ED25519, account_id.as_ref()); + let actions = vec![Action::DeployContract(DeployContractAction { + code: near_test_contracts::nightly_rs_contract().to_vec(), + })]; + let init_tx = SignedTransaction::from_actions( + 1, + account_id.clone(), + account_id.clone(), + &signer, + actions, + genesis_hash, + ); + init_txs.push(init_tx); + } + test_env.set_init_tx(init_txs); + + let mut yield_tx_hashes = vec![]; + let mut nonce = 100; + + // In these tests we set the epoch length equal to the yield timeout length. + assert!( + epoch_length + == RuntimeConfig::test().wasm_config.limit_config.yield_timeout_length_in_blocks, + ); + // Add transactions invoking promise_yield_create near the epoch boundaries. + for height in [ + epoch_length - 2, // create in first epoch, trigger timeout during resharding epoch + epoch_length - 1, + epoch_length, + 2 * epoch_length - 2, // create during resharding epoch, trigger timeout on upgraded layout + 2 * epoch_length - 1, + 2 * epoch_length, + 3 * epoch_length - 2, // both the create and the timeout will occur on upgraded layout + 3 * epoch_length - 1, + 3 * epoch_length, + ] { + let mut txs = vec![]; + for account_id in &contract_accounts { + let tx = generate_yield_create_tx( + account_id, + "call_yield_create_return_promise".to_string(), + nonce, + &genesis_hash, + ); + + nonce += 1; + + yield_tx_hashes.push(tx.get_hash()); + txs.push(tx); + } + + test_env.set_tx_at_height(height, txs); + } + + yield_tx_hashes +} + +// Test delivery of promise yield timeouts +#[cfg(feature = "nightly")] +fn test_shard_layout_upgrade_promise_yield_impl(resharding_type: ReshardingType, rng_seed: u64) { + init_test_logger(); + + // setup + let epoch_length = + RuntimeConfig::test().wasm_config.limit_config.yield_timeout_length_in_blocks; + let genesis_protocol_version = get_genesis_protocol_version(&resharding_type); + let target_protocol_version = get_target_protocol_version(&resharding_type); + + // reuse the test env for cross contract calls + let mut test_env = create_test_env_for_cross_contract_test( + genesis_protocol_version, + epoch_length, + rng_seed, + Some(resharding_type), + ); + + let yield_tx_hashes = setup_test_env_with_promise_yield_txs(&mut test_env, epoch_length); + + let drop_chunk_condition = DropChunkCondition::new(); + for _ in 1..5 * epoch_length { + test_env.step(&drop_chunk_condition, target_protocol_version); + test_env.check_receipt_id_to_shard_id(); + } + + let tx_outcomes = test_env.check_tx_outcomes(false); + for tx_hash in yield_tx_hashes { + // The yield callback returns a specific value when it is invoked by timeout + assert_eq!( + tx_outcomes.get(&tx_hash).unwrap().status, + FinalExecutionStatus::SuccessValue(vec![23u8]), + ); + } + + test_env.check_resharding_artifacts(0); +} + +#[cfg(feature = "nightly")] +#[test] +fn test_shard_layout_upgrade_promise_yield() { + test_shard_layout_upgrade_promise_yield_impl(ReshardingType::TESTONLY, 42); +} + fn test_shard_layout_upgrade_incoming_receipts_impl( resharding_type: ReshardingType, rng_seed: u64, @@ -1517,7 +1745,7 @@ fn test_shard_layout_upgrade_incoming_receipts_impl( let successful_txs = test_env.check_tx_outcomes(false); let new_accounts = - successful_txs.iter().flat_map(|tx_hash| new_accounts.get(tx_hash)).collect(); + successful_txs.iter().flat_map(|(tx_hash, _)| new_accounts.get(tx_hash)).collect(); test_env.check_accounts(new_accounts); test_env.check_resharding_artifacts(0); @@ -1565,6 +1793,24 @@ fn test_shard_layout_upgrade_incoming_receipts_v3_seed_44() { test_shard_layout_upgrade_incoming_receipts_impl(ReshardingType::V3, 44); } +#[cfg(feature = "nightly")] +#[test] +fn test_shard_layout_upgrade_incoming_receipts_testonly_seed_42() { + test_shard_layout_upgrade_incoming_receipts_impl(ReshardingType::TESTONLY, 42); +} + +#[cfg(feature = "nightly")] +#[test] +fn test_shard_layout_upgrade_incoming_receipts_testonly_seed_43() { + test_shard_layout_upgrade_incoming_receipts_impl(ReshardingType::TESTONLY, 43); +} + +#[cfg(feature = "nightly")] +#[test] +fn test_shard_layout_upgrade_incoming_receipts_testonly_seed_44() { + test_shard_layout_upgrade_incoming_receipts_impl(ReshardingType::TESTONLY, 44); +} + // Test cross contract calls // This test case tests when there are missing chunks in the produced blocks // This is to test that all the chunk management logic is correct, e.g. inclusion of outgoing @@ -1608,7 +1854,7 @@ fn test_missing_chunks( let successful_txs = test_env.check_tx_outcomes(true); let new_accounts: Vec<_> = - successful_txs.iter().flat_map(|tx_hash| new_accounts.get(tx_hash)).collect(); + successful_txs.iter().flat_map(|(tx_hash, _)| new_accounts.get(tx_hash)).collect(); test_env.check_accounts(new_accounts); test_env.check_resharding_artifacts(0); @@ -1806,13 +2052,13 @@ fn test_shard_layout_upgrade_error_handling_impl( } // corrupt the state snapshot if available to make resharding fail - currupt_state_snapshot(&test_env); + corrupt_state_snapshot(&test_env); } assert!(false, "no error was recorded, something is wrong in error handling"); } -fn currupt_state_snapshot(test_env: &TestReshardingEnv) { +fn corrupt_state_snapshot(test_env: &TestReshardingEnv) { let tries = test_env.env.clients[0].runtime_adapter.get_tries(); let Ok(snapshot_hash) = tries.get_state_snapshot_hash() else { return }; let (store, flat_storage_manager) = tries.get_state_snapshot(&snapshot_hash).unwrap(); diff --git a/integration-tests/src/tests/client/state_dump.rs b/integration-tests/src/tests/client/state_dump.rs index 79602a8400a..7ef4e359d33 100644 --- a/integration-tests/src/tests/client/state_dump.rs +++ b/integration-tests/src/tests/client/state_dump.rs @@ -301,7 +301,7 @@ fn run_state_sync_with_dumped_parts( .apply_state_part(0, &state_root, PartId::new(part_id, num_parts), &part, &epoch_id) .unwrap(); } - env.clients[1].chain.set_state_finalize(0, sync_hash, Ok(())).unwrap(); + env.clients[1].chain.set_state_finalize(0, sync_hash).unwrap(); tracing::info!("syncing node: state sync finished."); let synced_block = env.clients[1].chain.get_block(&sync_hash).unwrap(); diff --git a/integration-tests/src/tests/client/state_snapshot.rs b/integration-tests/src/tests/client/state_snapshot.rs index ee73b5855c9..8f8d78cd9f6 100644 --- a/integration-tests/src/tests/client/state_snapshot.rs +++ b/integration-tests/src/tests/client/state_snapshot.rs @@ -40,11 +40,7 @@ impl StateSnaptshotTestEnv { let trie_config = TrieConfig { shard_cache_config: trie_cache_config.clone(), view_shard_cache_config: trie_cache_config, - enable_receipt_prefetching: false, - sweat_prefetch_receivers: Vec::new(), - sweat_prefetch_senders: Vec::new(), - load_mem_tries_for_shards: Vec::new(), - load_mem_tries_for_tracked_shards: false, + ..TrieConfig::default() }; let flat_storage_manager = FlatStorageManager::new(store.clone()); let shard_uids = [ShardUId::single_shard()]; diff --git a/integration-tests/src/tests/client/sync_state_nodes.rs b/integration-tests/src/tests/client/sync_state_nodes.rs index 1c22a39a2a7..eb19a133a55 100644 --- a/integration-tests/src/tests/client/sync_state_nodes.rs +++ b/integration-tests/src/tests/client/sync_state_nodes.rs @@ -690,7 +690,7 @@ fn test_dump_epoch_missing_chunk_in_last_block() { } }); env.clients[1].chain.schedule_apply_state_parts(0, sync_hash, num_parts, &f).unwrap(); - env.clients[1].chain.set_state_finalize(0, sync_hash, Ok(())).unwrap(); + env.clients[1].chain.set_state_finalize(0, sync_hash).unwrap(); let last_chunk_height = epoch_length - num_last_chunks_missing; for height in 1..epoch_length { if height < last_chunk_height { diff --git a/integration-tests/src/tests/runtime/snapshots/integration_tests__tests__runtime__sanity_checks__receipts_gas_profile.snap b/integration-tests/src/tests/runtime/snapshots/integration_tests__tests__runtime__sanity_checks__receipts_gas_profile.snap index 8cfca701ac1..69980566408 100644 --- a/integration-tests/src/tests/runtime/snapshots/integration_tests__tests__runtime__sanity_checks__receipts_gas_profile.snap +++ b/integration-tests/src/tests/runtime/snapshots/integration_tests__tests__runtime__sanity_checks__receipts_gas_profile.snap @@ -47,7 +47,7 @@ expression: receipts_gas_profile CostGasUsed { cost_category: "ACTION_COST", cost: "FUNCTION_CALL_BASE", - gas_used: 20878753500000, + gas_used: 1800000000000, }, CostGasUsed { cost_category: "ACTION_COST", diff --git a/integration-tests/src/tests/runtime/snapshots/integration_tests__tests__runtime__sanity_checks__receipts_gas_profile_nightly.snap b/integration-tests/src/tests/runtime/snapshots/integration_tests__tests__runtime__sanity_checks__receipts_gas_profile_nightly.snap index 8cfca701ac1..69980566408 100644 --- a/integration-tests/src/tests/runtime/snapshots/integration_tests__tests__runtime__sanity_checks__receipts_gas_profile_nightly.snap +++ b/integration-tests/src/tests/runtime/snapshots/integration_tests__tests__runtime__sanity_checks__receipts_gas_profile_nightly.snap @@ -47,7 +47,7 @@ expression: receipts_gas_profile CostGasUsed { cost_category: "ACTION_COST", cost: "FUNCTION_CALL_BASE", - gas_used: 20878753500000, + gas_used: 1800000000000, }, CostGasUsed { cost_category: "ACTION_COST", diff --git a/integration-tests/src/tests/runtime/snapshots/integration_tests__tests__runtime__sanity_checks__receipts_gas_profile_nondeterministic.snap b/integration-tests/src/tests/runtime/snapshots/integration_tests__tests__runtime__sanity_checks__receipts_gas_profile_nondeterministic.snap index ed45e974852..1bccd3904ba 100644 --- a/integration-tests/src/tests/runtime/snapshots/integration_tests__tests__runtime__sanity_checks__receipts_gas_profile_nondeterministic.snap +++ b/integration-tests/src/tests/runtime/snapshots/integration_tests__tests__runtime__sanity_checks__receipts_gas_profile_nondeterministic.snap @@ -12,7 +12,7 @@ expression: receipts_gas_profile CostGasUsed { cost_category: "WASM_HOST_COST", cost: "CONTRACT_LOADING_BYTES", - gas_used: 8236500, + gas_used: 41393210, }, CostGasUsed { cost_category: "WASM_HOST_COST", diff --git a/integration-tests/src/tests/runtime/snapshots/integration_tests__tests__runtime__sanity_checks__receipts_gas_profile_nondeterministic_nightly.snap b/integration-tests/src/tests/runtime/snapshots/integration_tests__tests__runtime__sanity_checks__receipts_gas_profile_nondeterministic_nightly.snap index ed45e974852..1bccd3904ba 100644 --- a/integration-tests/src/tests/runtime/snapshots/integration_tests__tests__runtime__sanity_checks__receipts_gas_profile_nondeterministic_nightly.snap +++ b/integration-tests/src/tests/runtime/snapshots/integration_tests__tests__runtime__sanity_checks__receipts_gas_profile_nondeterministic_nightly.snap @@ -12,7 +12,7 @@ expression: receipts_gas_profile CostGasUsed { cost_category: "WASM_HOST_COST", cost: "CONTRACT_LOADING_BYTES", - gas_used: 8236500, + gas_used: 41393210, }, CostGasUsed { cost_category: "WASM_HOST_COST", diff --git a/integration-tests/src/tests/runtime/test_evil_contracts.rs b/integration-tests/src/tests/runtime/test_evil_contracts.rs index 92348c94520..9e50447fac0 100644 --- a/integration-tests/src/tests/runtime/test_evil_contracts.rs +++ b/integration-tests/src/tests/runtime/test_evil_contracts.rs @@ -104,7 +104,7 @@ fn test_self_delay() { 0, ) .unwrap(); - let expected_max_depth = 61u32; + let expected_max_depth = 60u32; assert_eq!( res.status, FinalExecutionStatus::SuccessValue(expected_max_depth.to_be_bytes().to_vec()), diff --git a/integration-tests/src/tests/runtime/test_yield_resume.rs b/integration-tests/src/tests/runtime/test_yield_resume.rs index fbdb5093d9a..67cbe853502 100644 --- a/integration-tests/src/tests/runtime/test_yield_resume.rs +++ b/integration-tests/src/tests/runtime/test_yield_resume.rs @@ -49,7 +49,7 @@ fn create_then_resume() { .function_call( "alice.near".parse().unwrap(), "test_contract".parse().unwrap(), - "call_yield_create", + "call_yield_create_return_data_id", yield_payload.clone(), MAX_GAS, 0, diff --git a/integration-tests/src/tests/standard_cases/mod.rs b/integration-tests/src/tests/standard_cases/mod.rs index 17332756d4b..a61a45bcac6 100644 --- a/integration-tests/src/tests/standard_cases/mod.rs +++ b/integration-tests/src/tests/standard_cases/mod.rs @@ -71,7 +71,8 @@ pub fn test_smart_contract_simple(node: impl Node) { transaction_result.status, FinalExecutionStatus::SuccessValue(10i32.to_le_bytes().to_vec()) ); - assert_eq!(transaction_result.receipts_outcome.len(), 2); + // Refund receipt may not be ready yet + assert!([1, 2].contains(&transaction_result.receipts_outcome.len())); let new_root = node_user.get_state_root(); assert_ne!(root, new_root); } @@ -114,7 +115,8 @@ pub fn test_smart_contract_self_call(node: impl Node) { transaction_result.status, FinalExecutionStatus::SuccessValue(10i32.to_le_bytes().to_vec()) ); - assert_eq!(transaction_result.receipts_outcome.len(), 2); + // Refund receipt may not be ready yet + assert!([1, 2].contains(&transaction_result.receipts_outcome.len())); let new_root = node_user.get_state_root(); assert_ne!(root, new_root); } @@ -138,7 +140,8 @@ pub fn test_smart_contract_bad_method_name(node: impl Node) { .into() ) ); - assert_eq!(transaction_result.receipts_outcome.len(), 2); + // Refund receipt may not be ready yet + assert!([1, 2].contains(&transaction_result.receipts_outcome.len())); let new_root = node_user.get_state_root(); assert_ne!(root, new_root); } @@ -162,7 +165,8 @@ pub fn test_smart_contract_empty_method_name_with_no_tokens(node: impl Node) { .into() ) ); - assert_eq!(transaction_result.receipts_outcome.len(), 2); + // Refund receipt may not be ready yet + assert!([1, 2].contains(&transaction_result.receipts_outcome.len())); let new_root = node_user.get_state_root(); assert_ne!(root, new_root); } @@ -209,7 +213,8 @@ pub fn test_smart_contract_with_args(node: impl Node) { transaction_result.status, FinalExecutionStatus::SuccessValue(5u64.to_le_bytes().to_vec()) ); - assert_eq!(transaction_result.receipts_outcome.len(), 2); + // Refund receipt may not be ready yet + assert!([1, 2].contains(&transaction_result.receipts_outcome.len())); let new_root = node_user.get_state_root(); assert_ne!(root, new_root); } @@ -265,7 +270,8 @@ pub fn test_upload_contract(node: impl Node) { ) .unwrap(); assert_eq!(transaction_result.status, FinalExecutionStatus::SuccessValue(Vec::new())); - assert_eq!(transaction_result.receipts_outcome.len(), 2); + // Refund receipt may not be ready yet + assert!([1, 2].contains(&transaction_result.receipts_outcome.len())); node_user.view_contract_code(&eve_dot_alice_account()).expect_err( "RpcError { code: -32000, message: \"Server error\", data: Some(String(\"contract code of account eve.alice.near does not exist while viewing\")) }"); @@ -311,7 +317,8 @@ pub fn test_send_money(node: impl Node) { let transaction_result = node_user.send_money(account_id.clone(), bob_account(), money_used).unwrap(); assert_eq!(transaction_result.status, FinalExecutionStatus::SuccessValue(Vec::new())); - assert_eq!(transaction_result.receipts_outcome.len(), 2); + // Refund receipt may not be ready yet + assert!([1, 2].contains(&transaction_result.receipts_outcome.len())); let new_root = node_user.get_state_root(); assert_ne!(root, new_root); assert_eq!(node_user.get_access_key_nonce_for_signer(account_id).unwrap(), 1); @@ -560,7 +567,8 @@ pub fn test_create_account(node: impl Node) { let create_account_cost = fee_helper.create_account_transfer_full_key_cost(); assert_eq!(transaction_result.status, FinalExecutionStatus::SuccessValue(Vec::new())); - assert_eq!(transaction_result.receipts_outcome.len(), 2); + // Refund receipt may not be ready yet + assert!([1, 2].contains(&transaction_result.receipts_outcome.len())); let new_root = node_user.get_state_root(); assert_ne!(root, new_root); assert_eq!(node_user.get_access_key_nonce_for_signer(account_id).unwrap(), 1); @@ -593,7 +601,8 @@ pub fn test_create_account_again(node: impl Node) { .unwrap(); assert_eq!(transaction_result.status, FinalExecutionStatus::SuccessValue(Vec::new())); - assert_eq!(transaction_result.receipts_outcome.len(), 2); + // Refund receipt may not be ready yet + assert!([1, 2].contains(&transaction_result.receipts_outcome.len())); let fee_helper = fee_helper(&node); let create_account_cost = fee_helper.create_account_transfer_full_key_cost(); @@ -1015,7 +1024,8 @@ pub fn test_access_key_smart_contract(node: impl Node) { prepaid_gas + exec_gas - transaction_result.receipts_outcome[0].outcome.gas_burnt, ); - assert_eq!(transaction_result.receipts_outcome.len(), 2); + // Refund receipt may not be ready yet + assert!([1, 2].contains(&transaction_result.receipts_outcome.len())); let new_root = node_user.get_state_root(); assert_ne!(root, new_root); diff --git a/integration-tests/src/user/runtime_user.rs b/integration-tests/src/user/runtime_user.rs index 316a759caa0..c9d43a68ad3 100644 --- a/integration-tests/src/user/runtime_user.rs +++ b/integration-tests/src/user/runtime_user.rs @@ -194,6 +194,7 @@ impl RuntimeUser { transactions } + // TODO(#10942) get rid of copy pasted code, it's outdated comparing to the original fn get_final_transaction_result(&self, hash: &CryptoHash) -> FinalExecutionOutcomeView { let mut outcomes = self.get_recursive_transaction_results(hash); let mut looking_for_id = *hash; diff --git a/nearcore/Cargo.toml b/nearcore/Cargo.toml index ef98151b9bf..cdb5d9f8e25 100644 --- a/nearcore/Cargo.toml +++ b/nearcore/Cargo.toml @@ -128,7 +128,6 @@ yield_resume = [ "near-primitives/yield_resume", ] -serialize_all_state_changes = ["near-store/serialize_all_state_changes"] nightly = [ "near-actix-test-utils/nightly", "near-async/nightly", @@ -156,7 +155,6 @@ nightly = [ "protocol_feature_fix_contract_loading_cost", "protocol_feature_fix_staking_threshold", "protocol_feature_nonrefundable_transfer_nep491", - "serialize_all_state_changes", "testlib/nightly", "yield_resume", ] diff --git a/nearcore/src/cold_storage.rs b/nearcore/src/cold_storage.rs index 9256e823474..9a36c9a8243 100644 --- a/nearcore/src/cold_storage.rs +++ b/nearcore/src/cold_storage.rs @@ -47,7 +47,7 @@ enum ColdStoreCopyResult { OtherBlockCopied, } -/// The ColdStoreError indicates what errors were encoutered while copying a blocks and running sanity checks. +/// The ColdStoreError indicates what errors were encountered while copying a blocks and running sanity checks. #[derive(thiserror::Error, Debug)] pub enum ColdStoreError { #[error("Cold head is ahead of final head. cold head height: {cold_head_height} final head height {hot_final_head_height}")] @@ -215,7 +215,7 @@ fn cold_store_copy_result_to_string( } #[derive(Debug)] -enum ColdStoreInitialMigrationResult { +enum ColdStoreMigrationResult { /// Cold storage was already initialized NoNeedForMigration, /// Performed a successful cold storage migration @@ -224,15 +224,23 @@ enum ColdStoreInitialMigrationResult { MigrationInterrupted, } -/// This function performs initial population of cold storage if needed. +/// This function performs migration to cold storage if needed. /// Migration can be interrupted via `keep_going` flag. /// -/// First, checks that hot store is of kind `Archive`. If not, no migration needed. -/// Then, captures hot final head BEFORE the migration, as migration is performed during normal neard run. -/// If hot final head is not set, returns Err. -/// Otherwise: -/// 1. performed migration -/// 2. updates head to saved hot final head +/// Migration is performed if cold storage does not have a head set. +/// New head is determined based on hot storage DBKind. +/// - If hot storage is of type `Archive`, we need to perform initial migration from legacy archival node. +/// This process will take a long time. Cold head will be set to hot final head BEFORE the migration started. +/// - If hot storage is of type `Hot`, this node was just created in split storage mode from genesis. +/// Genesis data is written to hot storage before node can join the chain. Cold storage remains empty during that time. +/// Thus, when cold loop is spawned, we need to perform migration of genesis data to cold storage. +/// Cold head will be set to genesis height. +/// - Other kinds of hot storage are indicative of configuration error. +/// New cold head is written only after the migration is fully finished. +/// +/// After cold head is determined this function +/// 1. performs migration +/// 2. updates cold head /// /// Any Ok status means that this function should not be retried: /// - either migration was performed (now or earlier) @@ -240,49 +248,66 @@ enum ColdStoreInitialMigrationResult { /// which means that everything cold store thread related has to stop /// /// Error status means that for some reason migration cannot be performed. -fn cold_store_initial_migration( +fn cold_store_migration( split_storage_config: &SplitStorageConfig, keep_going: &Arc, + genesis_height: BlockHeight, hot_store: &Store, cold_store: &Store, cold_db: &Arc, -) -> anyhow::Result { - // We only need to perform the migration if hot store is of kind Archive and cold store doesn't have a head yet - if hot_store.get_db_kind()? != Some(near_store::metadata::DbKind::Archive) - || cold_store.get(DBCol::BlockMisc, HEAD_KEY)?.is_some() - { - return Ok(ColdStoreInitialMigrationResult::NoNeedForMigration); +) -> anyhow::Result { + // Migration is only needed if cold storage is not properly initialised, + // i.e. if cold head is not set. + if cold_store.get(DBCol::BlockMisc, HEAD_KEY)?.is_some() { + return Ok(ColdStoreMigrationResult::NoNeedForMigration); } - tracing::info!(target: "cold_store", "Starting initial population of cold store"); + tracing::info!(target: "cold_store", "Starting population of cold store."); + let new_cold_height = match hot_store.get_db_kind()? { + None => { + tracing::error!(target: "cold_store", "Hot store DBKind not set."); + return Err(anyhow::anyhow!("Hot store DBKind is not set")); + } + Some(near_store::metadata::DbKind::Hot) => { + tracing::info!(target: "cold_store", "Hot store DBKind is Hot."); + genesis_height + } + Some(near_store::metadata::DbKind::Archive) => { + tracing::info!(target: "cold_store", "Hot store DBKind is Archive."); + hot_store + .get_ser::(DBCol::BlockMisc, FINAL_HEAD_KEY)? + .ok_or_else(|| anyhow::anyhow!("FINAL_HEAD not found in hot storage"))? + .height + } + Some(kind) => { + tracing::error!(target: "cold_store", ?kind, "Hot store DbKind not supported."); + return Err(anyhow::anyhow!(format!("Hot store DBKind not {kind:?}."))); + } + }; - // If FINAL_HEAD is not set for hot storage something isn't right and we will probably fail in `update_cold_head`. - // Let's fail early. - let hot_final_head = hot_store - .get_ser::(DBCol::BlockMisc, FINAL_HEAD_KEY)? - .ok_or_else(|| anyhow::anyhow!("FINAL_HEAD not found in hot storage"))?; - let hot_final_head_height = hot_final_head.height; + tracing::info!(target: "cold_store", new_cold_height, "Determined cold storage head height after migration"); let batch_size = split_storage_config.cold_store_initial_migration_batch_size; match copy_all_data_to_cold(cold_db.clone(), hot_store, batch_size, keep_going)? { CopyAllDataToColdStatus::EverythingCopied => { - tracing::info!(target: "cold_store", "Initial population was successful, writing cold head of height {}", hot_final_head_height); - update_cold_head(cold_db, hot_store, &hot_final_head_height)?; - Ok(ColdStoreInitialMigrationResult::SuccessfulMigration) + tracing::info!(target: "cold_store", new_cold_height, "Cold storage population was successful, writing cold head."); + update_cold_head(cold_db, hot_store, &new_cold_height)?; + Ok(ColdStoreMigrationResult::SuccessfulMigration) } CopyAllDataToColdStatus::Interrupted => { - tracing::info!(target: "cold_store", "Initial population was interrupted"); - Ok(ColdStoreInitialMigrationResult::MigrationInterrupted) + tracing::info!(target: "cold_store", "Cold storage population was interrupted"); + Ok(ColdStoreMigrationResult::MigrationInterrupted) } } } -/// Runs a loop that tries to copy all data from hot store to cold (do initial migration). +/// Runs a loop that tries to copy all data from hot store to cold (do migration). /// If migration fails sleeps for 30s and tries again. /// If migration returned any successful status (including interruption status) breaks the loop. -fn cold_store_initial_migration_loop( +fn cold_store_migration_loop( split_storage_config: &SplitStorageConfig, keep_going: &Arc, + genesis_height: BlockHeight, hot_store: &Store, cold_store: &Store, cold_db: Arc, @@ -293,9 +318,10 @@ fn cold_store_initial_migration_loop( tracing::debug!(target: "cold_store", "stopping the initial migration loop"); break; } - match cold_store_initial_migration( + match cold_store_migration( split_storage_config, keep_going, + genesis_height, hot_store, cold_store, &cold_db, @@ -303,13 +329,18 @@ fn cold_store_initial_migration_loop( // We can either stop the cold store thread or hope that next time migration will not fail. // Here we pick the second option. Err(err) => { - let dur = split_storage_config.cold_store_initial_migration_loop_sleep_duration; - tracing::error!(target: "cold_store", "initial migration failed with error {}, sleeping {}s and trying again", err, dur.whole_seconds()); - std::thread::sleep(dur.unsigned_abs()); + let sleep_duration = split_storage_config + .cold_store_initial_migration_loop_sleep_duration + .unsigned_abs(); + let sleep_duration_in_secs = split_storage_config + .cold_store_initial_migration_loop_sleep_duration + .whole_seconds(); + tracing::error!(target: "cold_store", ?err, ?sleep_duration_in_secs, "Migration failed. Sleeping and trying again.", ); + std::thread::sleep(sleep_duration); } // Any Ok status from `cold_store_initial_migration` function means that we can proceed to regular run. - Ok(status) => { - tracing::info!(target: "cold_store", "Initial migration status: {:?}. Moving on.", status); + Ok(migration_status) => { + tracing::info!(target: "cold_store", ?migration_status, "Moving on."); break; } } @@ -428,9 +459,10 @@ pub fn spawn_cold_store_loop( tracing::info!(target : "cold_store", "Spawning the cold store loop"); let join_handle = std::thread::Builder::new().name("cold_store_copy".to_string()).spawn(move || { - cold_store_initial_migration_loop( + cold_store_migration_loop( &split_storage_config, &keep_going_clone, + genesis_height, &hot_store, &cold_store, cold_db.clone(), diff --git a/nearcore/src/config_validate.rs b/nearcore/src/config_validate.rs index d73ee593d59..3db950d9607 100644 --- a/nearcore/src/config_validate.rs +++ b/nearcore/src/config_validate.rs @@ -32,7 +32,7 @@ impl<'a> ConfigValidator<'a> { /// this function would check all conditions, and add all error messages to ConfigValidator.errors fn validate_all_conditions(&mut self) { if !self.config.archive && self.config.save_trie_changes == Some(false) { - let error_message = "Configuration with archive = false and save_trie_changes = false is not supported because non-archival nodes must save trie changes in order to do do garbage collection.".to_string(); + let error_message = "Configuration with archive = false and save_trie_changes = false is not supported because non-archival nodes must save trie changes in order to do garbage collection.".to_string(); self.validation_errors.push_config_semantics_error(error_message); } @@ -198,7 +198,7 @@ mod tests { #[test] #[should_panic( - expected = "\\nconfig.json semantic issue: Configuration with archive = false and save_trie_changes = false is not supported because non-archival nodes must save trie changes in order to do do garbage collection.\\nconfig.json semantic issue: gc config values should all be greater than 0" + expected = "\\nconfig.json semantic issue: Configuration with archive = false and save_trie_changes = false is not supported because non-archival nodes must save trie changes in order to do garbage collection.\\nconfig.json semantic issue: gc config values should all be greater than 0" )] fn test_multiple_config_validation_errors() { let mut config = Config::default(); diff --git a/nearcore/src/lib.rs b/nearcore/src/lib.rs index 8d3c9c4da94..87ab3f7d9e6 100644 --- a/nearcore/src/lib.rs +++ b/nearcore/src/lib.rs @@ -334,13 +334,10 @@ pub fn start_with_config_and_synchronization( adv.clone(), ); - let state_snapshot_actor = Arc::new( - StateSnapshotActor::new( - runtime.get_flat_storage_manager(), - network_adapter.as_multi_sender(), - runtime.get_tries(), - ) - .start(), + let (state_snapshot_actor, state_snapshot_arbiter) = StateSnapshotActor::spawn( + runtime.get_flat_storage_manager(), + network_adapter.as_multi_sender(), + runtime.get_tries(), ); let delete_snapshot_callback = get_delete_snapshot_callback(state_snapshot_actor.clone()); let make_snapshot_callback = @@ -447,8 +444,12 @@ pub fn start_with_config_and_synchronization( tracing::trace!(target: "diagnostic", key = "log", "Starting NEAR node with diagnostic activated"); - let mut arbiters = - vec![client_arbiter_handle, shards_manager_arbiter_handle, trie_metrics_arbiter]; + let mut arbiters = vec![ + client_arbiter_handle, + shards_manager_arbiter_handle, + trie_metrics_arbiter, + state_snapshot_arbiter, + ]; if let Some(db_metrics_arbiter) = db_metrics_arbiter { arbiters.push(db_metrics_arbiter); } diff --git a/neard/Cargo.toml b/neard/Cargo.toml index 4834cc11368..4f6d0fc7db1 100644 --- a/neard/Cargo.toml +++ b/neard/Cargo.toml @@ -74,7 +74,6 @@ rosetta_rpc = ["nearcore/rosetta_rpc"] json_rpc = ["nearcore/json_rpc"] protocol_feature_fix_staking_threshold = ["nearcore/protocol_feature_fix_staking_threshold"] protocol_feature_nonrefundable_transfer_nep491 = ["near-state-viewer/protocol_feature_nonrefundable_transfer_nep491"] -serialize_all_state_changes = ["nearcore/serialize_all_state_changes"] new_epoch_sync = ["nearcore/new_epoch_sync", "dep:near-epoch-sync-tool"] yield_resume = ["nearcore/yield_resume"] @@ -98,7 +97,6 @@ nightly = [ "nightly_protocol", "protocol_feature_fix_staking_threshold", "protocol_feature_nonrefundable_transfer_nep491", - "serialize_all_state_changes", "yield_resume", ] nightly_protocol = [ diff --git a/neard/src/cli.rs b/neard/src/cli.rs index 5b5bda3ce7c..d3cfb8c1281 100644 --- a/neard/src/cli.rs +++ b/neard/src/cli.rs @@ -593,8 +593,7 @@ impl RunCmd { .await; actix::System::current().stop(); // Disable the subscriber to properly shutdown the tracer. - near_o11y::reload(Some("error"), None, Some(near_o11y::OpenTelemetryLevel::OFF)) - .unwrap(); + near_o11y::reload(Some("error"), None, Some("off")).unwrap(); }); sys.run().unwrap(); info!(target: "neard", "Waiting for RocksDB to gracefully shutdown"); diff --git a/nightly/README.md b/nightly/README.md index d35953a6bfc..f41b267a8d2 100644 --- a/nightly/README.md +++ b/nightly/README.md @@ -5,7 +5,7 @@ request a run of the tests. Most notably, `nightly.txt` file contains all the tests that NayDuck runs once a day on the head of the master branch of the repository. -Nightly build results are available on [NayDuck](https://nayduck.near.org/). +Nightly build results are available on [NayDuck](https://nayduck.nearone.org/). ## List file format diff --git a/pytest/tests/contracts/deploy_call_smart_contract.py b/pytest/tests/contracts/deploy_call_smart_contract.py index 3cb33820311..4c745a7610a 100755 --- a/pytest/tests/contracts/deploy_call_smart_contract.py +++ b/pytest/tests/contracts/deploy_call_smart_contract.py @@ -11,6 +11,8 @@ from transaction import sign_deploy_contract_tx, sign_function_call_tx from utils import load_test_contract +GGAS = 10**9 + def test_deploy_contract(): nodes = start_cluster( @@ -26,8 +28,7 @@ def test_deploy_contract(): last_block_hash = nodes[1].get_latest_block().hash_bytes tx = sign_function_call_tx(nodes[0].signer_key, nodes[0].signer_key.account_id, 'log_something', - [], 100000000000, 100000000000, 20, - last_block_hash) + [], 150 * GGAS, 1, 20, last_block_hash) res = nodes[1].send_tx_and_wait(tx, 20) import json print(json.dumps(res, indent=2)) diff --git a/pytest/tests/loadtest/locust/download_contracts.sh b/pytest/tests/loadtest/locust/download_contracts.sh index f2438ef3dc0..2bcf5dc3b15 100755 --- a/pytest/tests/loadtest/locust/download_contracts.sh +++ b/pytest/tests/loadtest/locust/download_contracts.sh @@ -1,9 +1,18 @@ #!/bin/bash # -# Downloads the WASM contracts necessary for all workloads and stores them in "res" folder. +# Retrieves the WASM contracts from respective URLs or directories for all Locust workloads +# and stores them in the "res" folder. -cd res -wget https://raw.githubusercontent.com/NearSocial/social-db/master/res/social_db_release.wasm -O social_db.wasm -wget https://raw.githubusercontent.com/sweatco/sweat-near/main/res/sweat.wasm -O sweat.wasm -ln -s ../../../../../runtime/near-test-contracts/res/fungible_token.wasm fungible_token.wasm -ln -s ../../../../../runtime/near-test-contracts/res/backwards_compatible_rs_contract.wasm congestion.wasm +SCRIPT_DIR="${0%/*}" + +# Directory to place the wasm files in. +TARGET_CONTRACTS_DIR="${SCRIPT_DIR}/res" + +# Directory where some of the contracts are located. +# TODO: Consider storing the contracts in a single place. +SOURCE_CONTRACTS_DIR="${SCRIPT_DIR}/../../../../runtime/near-test-contracts/res" + +wget https://raw.githubusercontent.com/NearSocial/social-db/master/res/social_db_release.wasm -O ${TARGET_CONTRACTS_DIR}/social_db.wasm +wget https://raw.githubusercontent.com/sweatco/sweat-near/main/res/sweat.wasm -O ${TARGET_CONTRACTS_DIR}/sweat.wasm +ln -s ${SOURCE_CONTRACTS_DIR}/fungible_token.wasm ${TARGET_CONTRACTS_DIR}/fungible_token.wasm +ln -s ${SOURCE_CONTRACTS_DIR}/backwards_compatible_rs_contract.wasm ${TARGET_CONTRACTS_DIR}/congestion.wasm diff --git a/pytest/tests/loadtest/locust/res/social_db.wasm b/pytest/tests/loadtest/locust/res/social_db.wasm index 37fcb1638b2..323ff16a8a5 100644 Binary files a/pytest/tests/loadtest/locust/res/social_db.wasm and b/pytest/tests/loadtest/locust/res/social_db.wasm differ diff --git a/pytest/tests/loadtest/locust/res/sweat.wasm b/pytest/tests/loadtest/locust/res/sweat.wasm index 948c1a725ff..81f48047bcf 100644 Binary files a/pytest/tests/loadtest/locust/res/sweat.wasm and b/pytest/tests/loadtest/locust/res/sweat.wasm differ diff --git a/pytest/tests/mocknet/helpers/neard_runner.py b/pytest/tests/mocknet/helpers/neard_runner.py index 4e1b7cc67cd..9a427016bb9 100644 --- a/pytest/tests/mocknet/helpers/neard_runner.py +++ b/pytest/tests/mocknet/helpers/neard_runner.py @@ -2,6 +2,7 @@ # python script to handle neard process management. import argparse +import datetime from enum import Enum import fcntl import json @@ -9,6 +10,7 @@ import logging import os import psutil +import re import requests import shutil import signal @@ -32,19 +34,6 @@ def get_lock(home): return fd -def http_code(jsonrpc_error): - if jsonrpc_error is None: - return http.HTTPStatus.OK - - if jsonrpc_error['code'] == -32700 or jsonrpc_error[ - 'code'] == -32600 or jsonrpc_error['code'] == -32602: - return http.HTTPStatus.BAD_REQUEST - elif jsonrpc_error['code'] == -32601: - return http.HTTPStatus.NOT_FOUND - else: - return http.HTTPStatus.INTERNAL_SERVER_ERROR - - class JSONHandler(http.server.BaseHTTPRequestHandler): def __init__(self, request, client_address, server): @@ -56,11 +45,17 @@ def __init__(self, request, client_address, server): self.dispatcher.add_method(server.neard_runner.do_update_config, name="update_config") self.dispatcher.add_method(server.neard_runner.do_ready, name="ready") + self.dispatcher.add_method(server.neard_runner.do_version, + name="version") self.dispatcher.add_method(server.neard_runner.do_start, name="start") self.dispatcher.add_method(server.neard_runner.do_stop, name="stop") self.dispatcher.add_method(server.neard_runner.do_reset, name="reset") self.dispatcher.add_method(server.neard_runner.do_update_binaries, name="update_binaries") + self.dispatcher.add_method(server.neard_runner.do_make_backup, + name="make_backup") + self.dispatcher.add_method(server.neard_runner.do_ls_backups, + name="ls_backups") super().__init__(request, client_address, server) def do_GET(self): @@ -85,7 +80,7 @@ def do_POST(self): response = jsonrpc.JSONRPCResponseManager.handle(body, self.dispatcher) response_body = response.json.encode('UTF-8') - self.send_response(http_code(response.error)) + self.send_response(http.HTTPStatus.OK) self.send_header("Content-Type", 'application/json') self.send_header("Content-Length", str(len(response_body))) self.end_headers() @@ -108,6 +103,11 @@ class TestState(Enum): STOPPED = 6 RESETTING = 7 ERROR = 8 + MAKING_BACKUP = 9 + SET_VALIDATORS = 10 + + +backup_id_pattern = re.compile(r'^[0-9a-zA-Z.][0-9a-zA-Z_\-.]+$') class NeardRunner: @@ -140,13 +140,32 @@ def __init__(self, args): 'neard_process': None, 'current_neard_path': None, 'state': TestState.NONE.value, + 'backups': {}, + 'state_data': None, } + self.legacy_records = self.is_legacy() # protects self.data, and its representation on disk, # because both the rpc server and the main loop touch them concurrently # TODO: consider locking the TestState variable separately, since there # is no need to block reading that when inside the update_binaries rpc for example self.lock = threading.Lock() + def is_legacy(self): + if os.path.exists(os.path.join(self.neard_home, 'setup', 'data')): + if os.path.exists( + os.path.join(self.neard_home, 'setup', 'records.json')): + logging.warning( + f'found both records.json and data/ in {os.path.join(self.neard_home, "setup")}' + ) + return False + if os.path.exists(os.path.join( + self.neard_home, 'setup', 'records.json')) and os.path.exists( + os.path.join(self.neard_home, 'setup', 'genesis.json')): + return True + sys.exit( + f'did not find either records.json and genesis.json or data/ in {os.path.join(self.neard_home, "setup")}' + ) + def is_traffic_generator(self): return self.config.get('is_traffic_generator', False) @@ -190,9 +209,11 @@ def parse_binaries_config(self): }) return binaries + def set_current_neard_path(self, path): + self.data['current_neard_path'] = path + def reset_current_neard_path(self): - self.data['current_neard_path'] = self.data['binaries'][0][ - 'system_path'] + self.set_current_neard_path(self.data['binaries'][0]['system_path']) # tries to download the binaries specified in config.json, saving them in $home/binaries/ # if force is set to true all binaries will be downloaded, otherwise only the missing ones @@ -205,12 +226,12 @@ def download_binaries(self, force): pass if force: - # always start from 0 and download all binaries - start_index = 0 - else: - # start at the index of the first missing binary - # typically it's all or nothing - start_index = len(self.data['binaries']) + # always start from start_index = 0 and download all binaries + self.data['binaries'] = [] + + # start at the index of the first missing binary + # typically it's all or nothing + start_index = len(self.data['binaries']) # for now we assume that the binaries recorded in data.json as having been # dowloaded are still valid and were not touched. Also this assumes that their @@ -243,7 +264,7 @@ def tmp_near_home_path(self, *args): args = ('tmp-near-home',) + args return os.path.join(self.home, *args) - def neard_init(self): + def neard_init(self, rpc_port, protocol_port, validator_id): # We make neard init save files to self.tmp_near_home_path() just to make it # a bit cleaner, so we can init to a non-existent directory and then move # the files we want to the real near home without having to remove it first @@ -252,22 +273,54 @@ def neard_init(self): self.tmp_near_home_path(), 'init' ] if not self.is_traffic_generator(): - cmd += ['--account-id', f'{socket.gethostname()}.near'] + if validator_id is None: + validator_id = f'{socket.gethostname()}.near' + cmd += ['--account-id', validator_id] + else: + if validator_id is not None: + logging.warning( + f'ignoring validator ID "{validator_id}" for traffic generator node' + ) subprocess.check_call(cmd) with open(self.tmp_near_home_path('config.json'), 'r') as f: config = json.load(f) + config['rpc']['addr'] = f'0.0.0.0:{rpc_port}' + config['network']['addr'] = f'0.0.0.0:{protocol_port}' self.data['neard_addr'] = config['rpc']['addr'] config['tracked_shards'] = [0, 1, 2, 3] config['log_summary_style'] = 'plain' config['network']['skip_sync_wait'] = False - config['genesis_records_file'] = 'records.json' + if self.legacy_records: + config['genesis_records_file'] = 'records.json' config['rpc']['enable_debug_rpc'] = True + config['consensus']['min_block_production_delay']['secs'] = 1 + config['consensus']['min_block_production_delay']['nanos'] = 300000000 + config['consensus']['max_block_production_delay']['secs'] = 3 + config['consensus']['max_block_production_delay']['nanos'] = 0 if self.is_traffic_generator(): config['archive'] = True with open(self.tmp_near_home_path('config.json'), 'w') as f: json.dump(config, f, indent=2) + def reset_starting_data_dir(self): + try: + shutil.rmtree(self.target_near_home_path('data')) + except FileNotFoundError: + pass + if not self.legacy_records: + cmd = [ + self.data['binaries'][0]['system_path'], + '--home', + os.path.join(self.neard_home, 'setup'), + 'database', + 'make-snapshot', + '--destination', + self.target_near_home_path(), + ] + logging.info(f'running {" ".join(cmd)}') + subprocess.check_call(cmd) + def move_init_files(self): try: os.mkdir(self.target_near_home_path()) @@ -277,16 +330,18 @@ def move_init_files(self): filename = self.target_near_home_path(p) if os.path.isfile(filename): os.remove(filename) - try: - shutil.rmtree(self.target_near_home_path('data')) - except FileNotFoundError: - pass + self.reset_starting_data_dir() + paths = ['config.json', 'node_key.json'] if not self.is_traffic_generator(): paths.append('validator_key.json') for path in paths: shutil.move(self.tmp_near_home_path(path), self.target_near_home_path(path)) + if not self.legacy_records: + shutil.copyfile( + os.path.join(self.neard_home, 'setup', 'genesis.json'), + self.target_near_home_path('genesis.json')) # This RPC method tells to stop neard and re-initialize its home dir. This returns the # validator and node key that resulted from the initialization. We can't yet call amend-genesis @@ -296,7 +351,20 @@ def move_init_files(self): # TODO: add a binaries argument that tells what binaries we want to use in the test. Before we do # this, it is pretty mandatory to implement some sort of client authentication, because without it, # anyone would be able to get us to download and run arbitrary code - def do_new_test(self): + def do_new_test(self, + rpc_port=3030, + protocol_port=24567, + validator_id=None): + if not isinstance(rpc_port, int): + raise jsonrpc.exceptions.JSONRPCDispatchException( + code=-32600, message='rpc_port argument not an int') + if not isinstance(protocol_port, int): + raise jsonrpc.exceptions.JSONRPCDispatchException( + code=-32600, message='protocol_port argument not an int') + if validator_id is not None and not isinstance(validator_id, str): + raise jsonrpc.exceptions.JSONRPCDispatchException( + code=-32600, message='validator_id argument not a string') + with self.lock: self.kill_neard() try: @@ -312,7 +380,7 @@ def do_new_test(self): except FileNotFoundError: pass - self.neard_init() + self.neard_init(rpc_port, protocol_port, validator_id) self.move_init_files() with open(self.target_near_home_path('config.json'), 'r') as f: @@ -329,6 +397,7 @@ def do_new_test(self): validator_account_id = None validator_public_key = None + self.data['backups'] = {} self.set_state(TestState.AWAITING_NETWORK_INIT) self.save_data() @@ -347,7 +416,8 @@ def do_network_init(self, boot_nodes, epoch_length=1000, num_seats=100, - protocol_version=None): + protocol_version=None, + genesis_time=None): if not isinstance(validators, list): raise jsonrpc.exceptions.JSONRPCDispatchException( code=-32600, message='validators argument not a list') @@ -363,6 +433,13 @@ def do_network_init(self, raise jsonrpc.exceptions.JSONRPCDispatchException( code=-32600, message='boot_nodes argument must not be empty') + if not self.legacy_records and genesis_time is None: + raise jsonrpc.exceptions.JSONRPCDispatchException( + code=-32600, + message= + 'genesis_time argument required for nodes running via neard fork-network' + ) + with self.lock: state = self.get_state() if state != TestState.AWAITING_NETWORK_INIT: @@ -385,6 +462,7 @@ def do_network_init(self, 'epoch_length': epoch_length, 'num_seats': num_seats, 'protocol_version': protocol_version, + 'genesis_time': genesis_time, }, f) def do_update_config(self, key_value): @@ -435,25 +513,54 @@ def do_stop(self): self.set_state(TestState.STOPPED) self.save_data() - def do_reset(self): + def do_reset(self, backup_id=None): with self.lock: state = self.get_state() logging.info(f"do_reset {state}") + if state != TestState.RUNNING and state != TestState.STOPPED: + raise jsonrpc.exceptions.JSONRPCDispatchException( + code=-32600, + message='Cannot reset data dir as test state is not ready') + + backups = self.data.get('backups', {}) + if backup_id is not None and backup_id != 'start' and backup_id not in backups: + raise jsonrpc.exceptions.JSONRPCDispatchException( + code=-32600, message=f'backup ID {backup_id} not known') + + if backup_id is None or backup_id == 'start': + path = self.data['binaries'][0]['system_path'] + else: + path = backups[backup_id]['neard_path'] + if state == TestState.RUNNING: self.kill_neard() - self.set_state(TestState.RESETTING) - self.reset_current_neard_path() - self.save_data() - elif state == TestState.STOPPED: - self.set_state(TestState.RESETTING) - self.reset_current_neard_path() - self.save_data() - else: + self.set_state(TestState.RESETTING, data=backup_id) + self.set_current_neard_path(path) + self.save_data() + + def do_make_backup(self, backup_id, description=None): + with self.lock: + state = self.get_state() + if state != TestState.RUNNING and state != TestState.STOPPED: raise jsonrpc.exceptions.JSONRPCDispatchException( code=-32600, - message= - 'Cannot reset node as test state has not been initialized yet' - ) + message='Cannot make backup as test state is not ready') + + if backup_id_pattern.match(backup_id) is None: + raise jsonrpc.exceptions.JSONRPCDispatchException( + code=-32600, message=f'invalid backup ID: {backup_id}') + + if backup_id in self.data.get('backups', {}): + raise jsonrpc.exceptions.JSONRPCDispatchException( + code=-32600, message=f'backup {backup_id} already exists') + if state == TestState.RUNNING: + self.kill_neard() + self.making_backup(backup_id, description) + self.save_data() + + def do_ls_backups(self): + with self.lock: + return self.data.get('backups', {}) def do_update_binaries(self): with self.lock: @@ -468,6 +575,13 @@ def do_update_binaries(self): self.save_data() logging.info('update binaries finished') + def do_version(self): + if self.legacy_records: + node_setup_version = '0' + else: + node_setup_version = '1' + return {'node_setup_version': node_setup_version} + def do_ready(self): with self.lock: state = self.get_state() @@ -664,14 +778,19 @@ def check_upgrade_neard(self): start_neard = True if start_neard: - self.data['current_neard_path'] = neard_path + self.set_current_neard_path(neard_path) self.start_neard() def get_state(self): return TestState(self.data['state']) - def set_state(self, state): + def set_state(self, state, data=None): self.data['state'] = state.value + self.data['state_data'] = data + + def making_backup(self, backup_id, description=None): + backup_data = {'backup_id': backup_id, 'description': description} + self.set_state(TestState.MAKING_BACKUP, data=backup_data) def network_init(self): # wait til we get a network_init RPC @@ -693,36 +812,82 @@ def network_init(self): with open(self.target_near_home_path('config.json'), 'w') as f: config = json.dump(config, f, indent=2) - cmd = [ - self.data['binaries'][0]['system_path'], - 'amend-genesis', - '--genesis-file-in', - os.path.join(self.neard_home, 'setup', 'genesis.json'), - '--records-file-in', - os.path.join(self.neard_home, 'setup', 'records.json'), - '--genesis-file-out', - self.target_near_home_path('genesis.json'), - '--records-file-out', - self.target_near_home_path('records.json'), - '--validators', - self.home_path('validators.json'), - '--chain-id', - 'mocknet', - '--transaction-validity-period', - '10000', - '--epoch-length', - str(n['epoch_length']), - '--num-seats', - str(n['num_seats']), - ] - if n['protocol_version'] is not None: - cmd.append('--protocol-version') - cmd.append(str(n['protocol_version'])) - - self.run_neard(cmd) - self.set_state(TestState.AMEND_GENESIS) + if self.legacy_records: + cmd = [ + self.data['binaries'][0]['system_path'], + 'amend-genesis', + '--genesis-file-in', + os.path.join(self.neard_home, 'setup', 'genesis.json'), + '--records-file-in', + os.path.join(self.neard_home, 'setup', 'records.json'), + '--genesis-file-out', + self.target_near_home_path('genesis.json'), + '--records-file-out', + self.target_near_home_path('records.json'), + '--validators', + self.home_path('validators.json'), + '--chain-id', + 'mocknet', + '--transaction-validity-period', + '10000', + '--epoch-length', + str(n['epoch_length']), + '--num-seats', + str(n['num_seats']), + ] + if n['protocol_version'] is not None: + cmd.append('--protocol-version') + cmd.append(str(n['protocol_version'])) + + self.run_neard(cmd) + self.set_state(TestState.AMEND_GENESIS) + else: + cmd = [ + self.data['binaries'][0]['system_path'], '--home', + self.target_near_home_path(), 'fork-network', 'set-validators', + '--validators', + self.home_path('validators.json'), '--chain-id-suffix', + '_mocknet', '--epoch-length', + str(n['epoch_length']), '--genesis-time', + str(n['genesis_time']) + ] + + self.run_neard(cmd) + self.set_state(TestState.SET_VALIDATORS) self.save_data() + def check_set_validators(self): + path, running, exit_code = self.poll_neard() + if path is None: + logging.error( + 'state is SET_VALIDATORS, but no amend-genesis process is known' + ) + self.set_state(TestState.AWAITING_NETWORK_INIT) + self.save_data() + elif not running: + if exit_code is not None and exit_code != 0: + logging.error( + f'neard fork-network set-validators exited with code {exit_code}' + ) + # for now just set the state to ERROR, and if this ever happens, the + # test operator will have to intervene manually. Probably shouldn't + # really happen in practice + self.set_state(TestState.ERROR) + self.save_data() + else: + cmd = [ + self.data['binaries'][0]['system_path'], + '--home', + self.target_near_home_path(), + 'fork-network', + 'finalize', + ] + logging.info(f'running {" ".join(cmd)}') + subprocess.check_call(cmd) + logging.info( + f'neard fork-network finalize succeeded. Node is ready') + self.make_initial_backup() + def check_amend_genesis(self): path, running, exit_code = self.poll_neard() if path is None: @@ -796,6 +961,53 @@ def check_amend_genesis(self): self.set_state(TestState.STATE_ROOTS) self.save_data() + def make_backup(self): + now = str(datetime.datetime.now()) + backup_data = self.data['state_data'] + name = backup_data['backup_id'] + description = backup_data.get('description', None) + + backup_dir = self.home_path('backups', name) + if os.path.exists(backup_dir): + # we already checked that this backup ID didn't already exist, so if this path + # exists, someone probably manually added it. for now just set the state to ERROR + # and make the human intervene, but it shouldn't happen in practice + logging.warn(f'{backup_dir} already exists') + self.set_state(TestState.ERROR) + return + logging.info(f'copying data dir to {backup_dir}') + shutil.copytree(self.target_near_home_path('data'), + backup_dir, + dirs_exist_ok=True) + logging.info(f'copied data dir to {backup_dir}') + + backups = self.data.get('backups', {}) + if name in backups: + # shouldn't happen if we check this in do_make_backups(), but fine to be paranoid and at least warn here + logging.warn( + f'backup {name} already existed in data.json, but it was not present before' + ) + backups[name] = { + 'time': now, + 'description': description, + 'neard_path': self.data['current_neard_path'] + } + self.data['backups'] = backups + self.set_state(TestState.STOPPED) + self.save_data() + + def make_initial_backup(self): + try: + shutil.rmtree(self.home_path('backups')) + except FileNotFoundError: + pass + os.mkdir(self.home_path('backups')) + self.making_backup( + 'start', + description='initial test state after state root computation') + self.save_data() + self.make_backup() + def check_genesis_state(self): path, running, exit_code = self.poll_neard() if not running: @@ -808,35 +1020,29 @@ def check_genesis_state(self): try: r = requests.get(f'http://{self.data["neard_addr"]}/status', timeout=5) - if r.status_code == 200: - logging.info('neard finished computing state roots') - self.kill_neard() - - try: - shutil.rmtree(self.home_path('backups')) - except FileNotFoundError: - pass - os.mkdir(self.home_path('backups')) - # Right now we save the backup to backups/start and in the future - # it would be nice to support a feature that lets you stop all the nodes and - # make another backup to restore to - backup_dir = self.home_path('backups', 'start') - logging.info(f'copying data dir to {backup_dir}') - shutil.copytree(self.target_near_home_path('data'), backup_dir) - self.set_state(TestState.STOPPED) - self.save_data() except requests.exceptions.ConnectionError: - pass + return + if r.status_code == 200: + logging.info('neard finished computing state roots') + self.kill_neard() + self.make_initial_backup() def reset_near_home(self): + backup_id = self.data['state_data'] + if backup_id is None: + backup_id = 'start' + backup_path = self.home_path('backups', backup_id) + if not os.path.exists(backup_path): + logging.error(f'backup dir {backup_path} does not exist') + self.set_state(TestState.ERROR) + self.save_data() try: logging.info("removing the old directory") shutil.rmtree(self.target_near_home_path('data')) except FileNotFoundError: pass - logging.info('restoring data dir from backup') - shutil.copytree(self.home_path('backups', 'start'), - self.target_near_home_path('data')) + logging.info(f'restoring data dir from backup at {backup_path}') + shutil.copytree(backup_path, self.target_near_home_path('data')) logging.info('data dir restored') self.set_state(TestState.STOPPED) self.save_data() @@ -850,12 +1056,16 @@ def main_loop(self): self.network_init() elif state == TestState.AMEND_GENESIS: self.check_amend_genesis() + elif state == TestState.SET_VALIDATORS: + self.check_set_validators() elif state == TestState.STATE_ROOTS: self.check_genesis_state() elif state == TestState.RUNNING: self.check_upgrade_neard() elif state == TestState.RESETTING: self.reset_near_home() + elif state == TestState.MAKING_BACKUP: + self.make_backup() time.sleep(10) def serve(self, port): diff --git a/pytest/tests/mocknet/local_test_node.py b/pytest/tests/mocknet/local_test_node.py new file mode 100644 index 00000000000..7fff43019b9 --- /dev/null +++ b/pytest/tests/mocknet/local_test_node.py @@ -0,0 +1,519 @@ +#!/usr/bin/env python3 +""" +defines the LocalTestNeardRunner class meant to to test mocknet itself locally +""" +from argparse import ArgumentParser +import http.server +import json +import os +import pathlib +import psutil +import re +import requests +import shutil +import signal +import subprocess +import sys +import threading +import time + +sys.path.append(str(pathlib.Path(__file__).resolve().parents[2] / 'lib')) + +from configured_logger import logger +from node_handle import NodeHandle + + +# return the process with pid listed in `pid_path`, if it's running +def get_process(pid_path): + try: + with open(pid_path, 'r') as f: + pid = int(f.read().strip()) + except FileNotFoundError: + return None + try: + return psutil.Process(pid) + except psutil.NoSuchProcess: + return None + + +# kill the process with pid listed in `pid_path` +def kill_process(pid_path): + p = get_process(pid_path) + if p is not None: + logger.info(f'killing process with pid {p.pid} indicated in {pid_path}') + p.send_signal(signal.SIGTERM) + p.wait() + try: + pid_path.unlink() + except FileNotFoundError: + pass + + +def http_post(addr, port, body): + r = requests.post(f'http://{addr}:{port}', json=body, timeout=5) + if r.status_code != 200: + logger.warning( + f'bad response {r.status_code} trying to post {body} to http://{addr}:{port}:\n{r.content}' + ) + r.raise_for_status() + return r.json() + + +class LocalTestNeardRunner: + + def __init__(self, home, port, neard_rpc_port, neard_protocol_port): + # last part of the path. e.g. ~/.near/local-mocknet/traffic-generator -> traffic-generator + self._name = os.path.basename(os.path.normpath(home)) + self.home = home + self.port = port + self.neard_rpc_port = neard_rpc_port + self.neard_protocol_port = neard_protocol_port + + def name(self): + return self._name + + def ip_addr(self): + return '0.0.0.0' + + def neard_port(self): + return self.neard_rpc_port + + def init(self): + return + + def mk_neard_runner_home(self, remove_home_dir): + # handled by local_test_setup_cmd() + return + + def upload_neard_runner(self): + return + + def upload_neard_runner_config(self, config): + # handled by local_test_setup_cmd() + return + + def init_python(self): + return + + def _pid_path(self): + return self.home / 'pid.txt' + + def stop_neard_runner(self): + kill_process(self._pid_path()) + + def start_neard_runner(self): + if get_process(self._pid_path()) is not None: + return + + with open(self.home / 'stdout', 'ab') as stdout, \ + open(self.home / 'stderr', 'ab') as stderr: + args = [ + sys.executable, 'tests/mocknet/helpers/neard_runner.py', + '--home', self.home / 'neard-runner', '--neard-home', + self.home / '.near', '--neard-logs', self.home / 'neard-logs', + '--port', + str(self.port) + ] + process = subprocess.Popen(args, + stdin=subprocess.DEVNULL, + stdout=stdout, + stderr=stderr, + process_group=0) + with open(self._pid_path(), 'w') as f: + f.write(f'{process.pid}\n') + logger.info( + f'started neard runner process with pid {process.pid} listening on port {self.port}' + ) + + def neard_runner_post(self, body): + return http_post(self.ip_addr(), self.port, body) + + def new_test_params(self): + return { + 'rpc_port': self.neard_rpc_port, + 'protocol_port': self.neard_protocol_port, + 'validator_id': self._name, + } + + def get_validators(self): + body = { + 'method': 'validators', + 'params': [None], + 'id': 'dontcare', + 'jsonrpc': '2.0' + } + return http_post(self.ip_addr(), self.neard_rpc_port, body) + + +def prompt_flags(args): + if args.num_nodes is None: + print( + 'number of validating nodes? One instance of neard_runner.py will be run for each one, plus a traffic generator: ' + ) + args.num_nodes = int(sys.stdin.readline().strip()) + assert args.num_nodes > 0 + + if args.neard_binary_path is None: + print('neard binary path?: ') + args.neard_binary_path = sys.stdin.readline().strip() + assert len(args.neard_binary_path) > 0 + + if not args.legacy_records and args.fork_height is None: + print( + 'prepare nodes with fork-network tool instead of genesis records JSON? [yes/no]:' + ) + while True: + r = sys.stdin.readline().strip().lower() + if r == 'yes': + args.legacy_records = False + break + elif r == 'no': + args.legacy_records = True + break + else: + print('please say yes or no') + + if args.source_home_dir is None: + if args.legacy_records: + print('source home dir: ') + else: + print( + 'source home dir containing the HEAD block of target home, plus more blocks after that: ' + ) + args.source_home_dir = sys.stdin.readline().strip() + assert len(args.source_home_dir) > 0 + + if args.target_home_dir is None and not args.legacy_records: + print('target home dir whose HEAD is contained in --source-home-dir: ') + args.target_home_dir = sys.stdin.readline().strip() + assert len(args.target_home_dir) > 0 + + if args.legacy_records and args.fork_height is None: + print('fork height: ') + args.fork_height = sys.stdin.readline().strip() + assert len(args.fork_height) > 0 + + +def run_cmd(cmd): + try: + subprocess.check_output(cmd, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + sys.exit( + f'running `{" ".join([str(a) for a in cmd])}` returned {e.returncode}. output:\n{e.output.decode("utf-8")}' + ) + + +# dumps records from `traffic_home_dir` and prepares records with keys changed +# for mirroring traffic +def make_records(neard_binary_path, traffic_home_dir, start_height): + run_cmd([ + neard_binary_path, '--home', traffic_home_dir, 'view-state', + 'dump-state', '--stream', '--height', + str(start_height) + ]) + shutil.copyfile(traffic_home_dir / 'output/genesis.json', + traffic_home_dir / 'setup/genesis.json') + run_cmd([ + neard_binary_path, + 'mirror', + 'prepare', + '--records-file-in', + traffic_home_dir / 'output/records.json', + '--records-file-out', + traffic_home_dir / 'setup/records.json', + '--secret-file-out', + '/dev/null', + '--no-secret', + ]) + + +def make_legacy_records(neard_binary_path, traffic_generator_home, node_homes, + start_height): + make_records(neard_binary_path, traffic_generator_home / '.near', + start_height) + for node_home in node_homes: + shutil.copyfile(traffic_generator_home / '.near/setup/genesis.json', + node_home / '.near/setup/genesis.json') + shutil.copyfile(traffic_generator_home / '.near/setup/records.json', + node_home / '.near/setup/records.json') + + +def fork_db(neard_binary_path, target_home_dir, home_dir, setup_dir): + copy_source_home(target_home_dir, setup_dir) + + run_cmd([ + neard_binary_path, + '--home', + setup_dir, + 'fork-network', + 'init', + ]) + run_cmd([ + neard_binary_path, + '--home', + setup_dir, + 'fork-network', + 'amend-access-keys', + ]) + shutil.rmtree(setup_dir / 'data/fork-snapshot') + + +def make_forked_network(neard_binary_path, traffic_generator_home, node_homes, + source_home_dir, target_home_dir): + for (home_dir, setup_dir) in [ + (h / '.near', h / '.near/setup') for h in node_homes + ] + [(traffic_generator_home / '.near/target', + traffic_generator_home / '.near/setup')]: + fork_db(neard_binary_path, target_home_dir, home_dir, setup_dir) + + +def mkdirs(local_mocknet_path): + traffic_generator_home = local_mocknet_path / 'traffic-generator' + traffic_generator_home.mkdir() + os.mkdir(traffic_generator_home / 'neard-runner') + os.mkdir(traffic_generator_home / '.near') + os.mkdir(traffic_generator_home / '.near/setup') + node_homes = [] + for i in range(args.num_nodes): + node_home = local_mocknet_path / f'node{i}' + node_home.mkdir() + os.mkdir(node_home / f'neard-runner') + os.mkdir(node_home / f'.near') + os.mkdir(node_home / f'.near/setup') + node_homes.append(node_home) + return traffic_generator_home, node_homes + + +def copy_source_home(source_home_dir, traffic_generator_home): + shutil.copyfile(source_home_dir / 'config.json', + traffic_generator_home / 'config.json') + shutil.copyfile(source_home_dir / 'node_key.json', + traffic_generator_home / 'node_key.json') + shutil.copyfile(source_home_dir / 'genesis.json', + traffic_generator_home / 'genesis.json') + try: + shutil.copyfile(source_home_dir / 'records.json', + traffic_generator_home / 'records.json') + except FileNotFoundError: + pass + shutil.copytree(source_home_dir / 'data', traffic_generator_home / 'data') + + +def make_binaries_dir(local_mocknet_path, neard_binary_path): + binaries_path = local_mocknet_path / 'binaries' + binaries_path.mkdir() + binary_path = binaries_path / 'neard' + binary_path.symlink_to(neard_binary_path) + return binaries_path + + +class Server(http.server.HTTPServer): + + def __init__(self, addr, directory): + self.directory = directory + super().__init__(addr, http.server.SimpleHTTPRequestHandler) + + def finish_request(self, request, client_address): + self.RequestHandlerClass(request, + client_address, + self, + directory=self.directory) + + +def write_config(home, config): + with open(home / 'neard-runner' / 'config.json', 'w') as f: + json.dump(config, f) + + +# looks for directories called node{i} in `local_mocknet_path` +def get_node_homes(local_mocknet_path): + dirents = os.listdir(local_mocknet_path) + node_homes = [] + for p in dirents: + m = re.match(r'node(\d+)', p) + if m is None: + continue + node_homes.append((p, int(m.groups()[0]))) + node_homes.sort(key=lambda x: x[1]) + idx = -1 + for (home, node_index) in node_homes: + if node_index != idx + 1: + raise ValueError( + f'some neard runner node dirs missing? found: {[n[0] for n in node_homes]}' + ) + idx = node_index + return [local_mocknet_path / x[0] for x in node_homes] + + +# return a NodeHandle for each of the neard runner directories in `local_mocknet_path` +def get_nodes(local_mocknet_path=pathlib.Path.home() / '.near/local-mocknet'): + runner_port = 3000 + neard_rpc_port = 3040 + neard_protocol_port = 24577 + traffic_generator = NodeHandle( + LocalTestNeardRunner(local_mocknet_path / 'traffic-generator', + runner_port, neard_rpc_port, neard_protocol_port)) + + node_homes = get_node_homes(local_mocknet_path) + nodes = [] + for home in node_homes: + runner_port += 1 + neard_rpc_port += 1 + neard_protocol_port += 1 + nodes.append( + NodeHandle( + LocalTestNeardRunner(home, runner_port, neard_rpc_port, + neard_protocol_port))) + + return traffic_generator, nodes + + +def kill_neard_runner(home): + kill_process(home / 'pid.txt') + + +def kill_neard_runners(local_mocknet_path): + kill_neard_runner(local_mocknet_path / 'traffic-generator') + node_homes = get_node_homes(local_mocknet_path) + for home in node_homes: + kill_neard_runner(home) + + +def wait_node_serving(node): + while True: + try: + node.neard_runner_ready() + return + except requests.exceptions.ConnectionError: + pass + time.sleep(0.5) + + +def local_test_setup_cmd(args): + prompt_flags(args) + if args.source_home_dir is None: + sys.exit(f'must give --source-home-dir') + if args.legacy_records: + if args.target_home_dir is not None: + sys.exit(f'cannot give --target-home-dir with --legacy-records') + if args.fork_height is None: + sys.exit('must give --fork-height with --legacy-records') + else: + if args.target_home_dir is None: + sys.exit(f'must give --target-home-dir') + if args.fork_height is not None: + sys.exit('cannot give --fork-height without --legacy-records') + + local_mocknet_path = pathlib.Path.home() / '.near/local-mocknet' + if os.path.exists(local_mocknet_path): + if not args.yes: + print( + f'{local_mocknet_path} already exists. This command will delete and reinitialize it. Continue? [yes/no]:' + ) + if sys.stdin.readline().strip() != 'yes': + return + kill_neard_runners(local_mocknet_path) + shutil.rmtree(local_mocknet_path) + + neard_binary_path = pathlib.Path(args.neard_binary_path) + source_home_dir = pathlib.Path(args.source_home_dir) + + os.mkdir(local_mocknet_path) + traffic_generator_home, node_homes = mkdirs(local_mocknet_path) + copy_source_home(source_home_dir, traffic_generator_home / '.near') + if args.legacy_records: + make_legacy_records(neard_binary_path, traffic_generator_home, + node_homes, args.fork_height) + else: + target_home_dir = pathlib.Path(args.target_home_dir) + make_forked_network(neard_binary_path, traffic_generator_home, + node_homes, source_home_dir, target_home_dir) + # now set up an HTTP server to serve the binary that each neard_runner.py will request + binaries_path = make_binaries_dir(local_mocknet_path, neard_binary_path) + binaries_server_addr = 'localhost' + binaries_server_port = 8000 + binaries_server = Server(addr=(binaries_server_addr, binaries_server_port), + directory=binaries_path) + server_thread = threading.Thread( + target=lambda: binaries_server.serve_forever(), daemon=True) + server_thread.start() + + node_config = { + 'is_traffic_generator': + False, + 'binaries': [{ + 'url': + f'http://{binaries_server_addr}:{binaries_server_port}/neard', + 'epoch_height': + 0 + }] + } + traffic_generator_config = { + 'is_traffic_generator': + True, + 'binaries': [{ + 'url': + f'http://{binaries_server_addr}:{binaries_server_port}/neard', + 'epoch_height': + 0 + }] + } + + write_config(traffic_generator_home, traffic_generator_config) + for node_home in node_homes: + write_config(node_home, node_config) + + traffic_generator, nodes = get_nodes(local_mocknet_path) + traffic_generator.start_neard_runner() + for node in nodes: + node.start_neard_runner() + + for node in [traffic_generator] + nodes: + wait_node_serving(node) + + print( + f'All directories initialized. neard runners are running in dirs: {[str(traffic_generator.node.home)] + [str(n.node.home) for n in nodes]}, listening on respective ports: {[traffic_generator.node.port] + [n.node.port for n in nodes]}' + ) + + +if __name__ == '__main__': + parser = ArgumentParser(description='Set up a local instance of mocknet') + subparsers = parser.add_subparsers(title='subcommands', + description='valid subcommands') + + local_test_setup_parser = subparsers.add_parser('local-test-setup', + help=''' + Setup several instances of neard-runner to run locally. Then the mirror.py --local-test + argument can be used to test these test scripts themselves. + ''') + local_test_setup_parser.add_argument('--num-nodes', type=int) + # TODO: add a --neard-upgrade-binary-path flag too + local_test_setup_parser.add_argument('--neard-binary-path', type=str) + local_test_setup_parser.add_argument('--source-home-dir', + type=str, + help=''' + Near home directory containing some transactions that can be used to create a forked state + for transaction mirroring. This could be a home dir from a pytest in tests/sanity, for example. + ''') + local_test_setup_parser.add_argument('--fork-height', + type=int, + help=''' + Height where state should be forked from in the directory indicated by --source-home-dir. Ideally this should + be a height close to the node's tail. This is something that could be automated if there were an easy + way to get machine-readable valid heights in a near data directory, but for now this flag must be given manually. + ''') + local_test_setup_parser.add_argument('--yes', action='store_true') + local_test_setup_parser.add_argument('--legacy-records', + action='store_true', + help=''' + If given, setup a records.json file with forked state instead of using the neard fork-network command + ''') + local_test_setup_parser.add_argument('--target-home-dir', + type=str, + help=''' + todo + ''') + local_test_setup_parser.set_defaults(func=local_test_setup_cmd) + + args = parser.parse_args() + args.func(args) diff --git a/pytest/tests/mocknet/mirror.py b/pytest/tests/mocknet/mirror.py index 9f3aaf66509..8ee544eca28 100755 --- a/pytest/tests/mocknet/mirror.py +++ b/pytest/tests/mocknet/mirror.py @@ -3,58 +3,20 @@ """ from argparse import ArgumentParser, BooleanOptionalAction -import cmd_utils +import datetime import pathlib import json import random -from rc import pmap, run -import requests +from rc import pmap +import re import sys import time sys.path.append(str(pathlib.Path(__file__).resolve().parents[2] / 'lib')) -import mocknet - from configured_logger import logger - - -def get_nodes(args): - pattern = args.chain_id + '-' + str( - args.start_height) + '-' + args.unique_id - all_nodes = mocknet.get_nodes(pattern=pattern) - if len(all_nodes) < 1: - sys.exit(f'no known nodes matching {pattern}') - - traffic_generator = None - nodes = [] - for n in all_nodes: - if n.instance_name.endswith('traffic'): - if traffic_generator is not None: - sys.exit( - f'more than one traffic generator instance found. {traffic_generator.instance_name} and {n.instance_name}' - ) - traffic_generator = n - else: - nodes.append(n) - - if traffic_generator is None: - sys.exit(f'no traffic generator instance found') - return traffic_generator, nodes - - -def wait_node_up(node): - while True: - try: - res = node.get_validators() - if 'error' not in res: - assert 'result' in res - logger.info(f'Node {node.instance_name} is up') - return - except (ConnectionRefusedError, - requests.exceptions.ConnectionError) as e: - pass - time.sleep(10) +import local_test_node +import remote_node def prompt_setup_flags(args): @@ -82,44 +44,6 @@ def prompt_setup_flags(args): args.genesis_protocol_version = int(sys.stdin.readline().strip()) -def start_neard_runner(node): - cmd_utils.run_in_background(node, f'/home/ubuntu/neard-runner/venv/bin/python /home/ubuntu/neard-runner/neard_runner.py ' \ - '--home /home/ubuntu/neard-runner --neard-home /home/ubuntu/.near ' \ - '--neard-logs /home/ubuntu/neard-logs --port 3000', 'neard-runner.txt') - - -def upload_neard_runner(node): - node.machine.upload('tests/mocknet/helpers/neard_runner.py', - '/home/ubuntu/neard-runner', - switch_user='ubuntu') - node.machine.upload('tests/mocknet/helpers/requirements.txt', - '/home/ubuntu/neard-runner', - switch_user='ubuntu') - - -def init_neard_runner(node, config, remove_home_dir=False): - stop_neard_runner(node) - cmd_utils.init_node(node) - if remove_home_dir: - cmd_utils.run_cmd( - node, - 'rm -rf /home/ubuntu/neard-runner && mkdir -p /home/ubuntu/neard-runner' - ) - else: - cmd_utils.run_cmd(node, 'mkdir -p /home/ubuntu/neard-runner') - upload_neard_runner(node) - mocknet.upload_json(node, '/home/ubuntu/neard-runner/config.json', config) - cmd = 'cd /home/ubuntu/neard-runner && python3 -m virtualenv venv -p $(which python3)' \ - ' && ./venv/bin/pip install -r requirements.txt' - cmd_utils.run_cmd(node, cmd) - start_neard_runner(node) - - -def stop_neard_runner(node): - # it's probably fine for now, but this is very heavy handed/not precise - node.machine.run('kill $(ps -C python -o pid=)') - - def prompt_init_flags(args): if args.neard_binary_url is None: print('neard binary URL?: ') @@ -178,9 +102,9 @@ def init_neard_runners(args, traffic_generator, nodes, remove_home_dir=False): }] } - init_neard_runner(traffic_generator, traffic_generator_config, - remove_home_dir) - pmap(lambda x: init_neard_runner(x[0], x[1], remove_home_dir), + traffic_generator.init_neard_runner(traffic_generator_config, + remove_home_dir) + pmap(lambda x: x[0].init_neard_runner(x[1], remove_home_dir), zip(nodes, configs)) @@ -197,18 +121,19 @@ def hard_reset_cmd(args, traffic_generator, nodes): Continue? [yes/no]""") if sys.stdin.readline().strip() != 'yes': return - all_nodes = nodes + [traffic_generator] - pmap(stop_neard_runner, all_nodes) - mocknet.stop_nodes(all_nodes) init_neard_runners(args, traffic_generator, nodes, remove_home_dir=True) def restart_cmd(args, traffic_generator, nodes): all_nodes = nodes + [traffic_generator] - pmap(stop_neard_runner, all_nodes) + pmap(lambda node: node.stop_neard_runner(), all_nodes) if args.upload_program: - pmap(upload_neard_runner, all_nodes) - pmap(start_neard_runner, all_nodes) + pmap(lambda node: node.upload_neard_runner(), all_nodes) + pmap(lambda node: node.start_neard_runner(), all_nodes) + + +def stop_runner_cmd(args, traffic_generator, nodes): + pmap(lambda node: node.stop_neard_runner(), nodes + [traffic_generator]) # returns boot nodes and validators we want for the new test network @@ -242,6 +167,22 @@ def get_network_nodes(new_test_rpc_responses, num_validators): return validators, boot_nodes +def new_genesis_timestamp(node): + version = node.neard_runner_version() + err = version.get('error') + if err is not None: + if err['code'] != -32601: + sys.exit( + f'bad response calling version RPC on {node.name()}: {err}') + return None + genesis_time = None + result = version.get('result') + if result is not None: + if result.get('node_setup_version') == '1': + genesis_time = str(datetime.datetime.now(tz=datetime.UTC)) + return genesis_time + + def new_test(args, traffic_generator, nodes): prompt_setup_flags(args) @@ -254,32 +195,38 @@ def new_test(args, traffic_generator, nodes): f'--num-validators is {args.num_validators} but only found {len(nodes)} under test' ) + genesis_time = new_genesis_timestamp(nodes[0]) + all_nodes = nodes + [traffic_generator] logger.info(f'resetting/initializing home dirs') - test_keys = pmap(neard_runner_new_test, all_nodes) + test_keys = pmap(lambda node: node.neard_runner_new_test(), all_nodes) validators, boot_nodes = get_network_nodes( - zip([n.machine.ip for n in all_nodes], test_keys), args.num_validators) + zip([n.ip_addr() for n in all_nodes], test_keys), args.num_validators) logger.info("""setting validators: {0} Then running neard amend-genesis on all nodes, and starting neard to compute genesis \ state roots. This will take a few hours. Run `status` to check if the nodes are \ ready. After they're ready, you can run `start-traffic`""".format(validators)) pmap( - lambda node: neard_runner_network_init( - node, validators, boot_nodes, args.epoch_length, args.num_seats, - args.genesis_protocol_version), all_nodes) + lambda node: node.neard_runner_network_init( + validators, + boot_nodes, + args.epoch_length, + args.num_seats, + args.genesis_protocol_version, + genesis_time=genesis_time), all_nodes) def status_cmd(args, traffic_generator, nodes): all_nodes = nodes + [traffic_generator] - statuses = pmap(neard_runner_ready, all_nodes) + statuses = pmap(lambda node: node.neard_runner_ready(), all_nodes) num_ready = 0 not_ready = [] for ready, node in zip(statuses, all_nodes): if not ready: - not_ready.append(node.instance_name) + not_ready.append(node.name()) if len(not_ready) == 0: print(f'all {len(all_nodes)} nodes ready') @@ -296,141 +243,115 @@ def reset_cmd(args, traffic_generator, nodes): ) if sys.stdin.readline().strip() != 'yes': sys.exit() + if args.backup_id is None: + backups = nodes[0].neard_runner_ls_backups() + backups_msg = 'ID | Time | Description\n' + if 'start' not in backups: + backups_msg += 'start | None | initial test state after state root computation\n' + for backup_id, backup_data in backups.items(): + backups_msg += f'{backup_id} | {backup_data.get("time")} | {backup_data.get("description")}\n' + + print(f'Backups as reported by {nodes[0].name()}):\n\n{backups_msg}') + print('please enter a backup ID here:') + args.backup_id = sys.stdin.readline().strip() + if args.backup_id != 'start' and args.backup_id not in backups: + print( + f'Given backup ID ({args.backup_id}) was not in the list given') + sys.exit() + all_nodes = nodes + [traffic_generator] - pmap(neard_runner_reset, all_nodes) + pmap(lambda node: node.neard_runner_reset(backup_id=args.backup_id), + all_nodes) logger.info( 'Data dir reset in progress. Run the `status` command to see when this is finished. Until it is finished, neard runners may not respond to HTTP requests.' ) -def stop_nodes_cmd(args, traffic_generator, nodes): - pmap(neard_runner_stop, nodes + [traffic_generator]) - - -def stop_traffic_cmd(args, traffic_generator, nodes): - neard_runner_stop(traffic_generator) - - -def neard_runner_jsonrpc(node, method, params=[]): - body = { - 'method': method, - 'params': params, - 'id': 'dontcare', - 'jsonrpc': '2.0' - } - body = json.dumps(body) - # '"'"' will be interpreted as ending the first quote and then concatenating it with "'", - # followed by a new quote started with ' and the rest of the string, to get any single quotes - # in method or params into the command correctly - body = body.replace("'", "'\"'\"'") - r = cmd_utils.run_cmd(node, f'curl localhost:3000 -d \'{body}\'') - response = json.loads(r.stdout) - if 'error' in response: - # TODO: errors should be handled better here in general but just exit for now - sys.exit( - f'bad response trying to send {method} JSON RPC to neard runner on {node.instance_name}:\n{response}' +def make_backup_cmd(args, traffic_generator, nodes): + if not args.yes: + print( + 'this will stop all nodes and create a new backup of their home dirs. continue? [yes/no]' ) - return response['result'] - - -def neard_runner_start(node): - neard_runner_jsonrpc(node, 'start') - - -def neard_runner_stop(node): - neard_runner_jsonrpc(node, 'stop') + if sys.stdin.readline().strip() != 'yes': + sys.exit() + if args.backup_id is None: + print('please enter a backup ID:') + args.backup_id = sys.stdin.readline().strip() + if re.match(r'^[0-9a-zA-Z.][0-9a-zA-Z_\-.]+$', args.backup_id) is None: + sys.exit('invalid backup ID') + if args.description is None: + print('please enter a description (enter nothing to skip):') + description = sys.stdin.readline().strip() + if len(description) > 0: + args.description = description -def neard_runner_new_test(node): - return neard_runner_jsonrpc(node, 'new_test') + all_nodes = nodes + [traffic_generator] + pmap( + lambda node: node.neard_runner_make_backup( + backup_id=args.backup_id, description=args.description), all_nodes) -def neard_runner_network_init(node, validators, boot_nodes, epoch_length, - num_seats, protocol_version): - return neard_runner_jsonrpc(node, - 'network_init', - params={ - 'validators': validators, - 'boot_nodes': boot_nodes, - 'epoch_length': epoch_length, - 'num_seats': num_seats, - 'protocol_version': protocol_version, - }) +def stop_nodes_cmd(args, traffic_generator, nodes): + pmap(lambda node: node.neard_runner_stop(), nodes + [traffic_generator]) -def neard_update_config(node, key_value): - return neard_runner_jsonrpc( - node, - 'update_config', - params={ - "key_value": key_value, - }, - ) +def stop_traffic_cmd(args, traffic_generator, nodes): + traffic_generator.neard_runner_stop() def update_config_cmd(args, traffic_generator, nodes): nodes = nodes + [traffic_generator] results = pmap( - lambda node: neard_update_config( - node, - args.set, - ), + lambda node: node.neard_update_config(args.set), nodes, ) if not all(results): - logger.warn('failed to update configs for some nodes') + logger.warning('failed to update configs for some nodes') return -def neard_runner_ready(node): - return neard_runner_jsonrpc(node, 'ready') - - -def neard_runner_reset(node): - return neard_runner_jsonrpc(node, 'reset') - - def start_nodes_cmd(args, traffic_generator, nodes): - if not all(pmap(neard_runner_ready, nodes)): - logger.warn( + if not all(pmap(lambda node: node.neard_runner_ready(), nodes)): + logger.warning( 'not all nodes are ready to start yet. Run the `status` command to check their statuses' ) return - pmap(neard_runner_start, nodes) - pmap(wait_node_up, nodes) + pmap(lambda node: node.neard_runner_start(), nodes) + pmap(lambda node: node.wait_node_up(), nodes) def start_traffic_cmd(args, traffic_generator, nodes): - if not all(pmap(neard_runner_ready, nodes + [traffic_generator])): - logger.warn( + if not all( + pmap(lambda node: node.neard_runner_ready(), + nodes + [traffic_generator])): + logger.warning( 'not all nodes are ready to start yet. Run the `status` command to check their statuses' ) return - pmap(neard_runner_start, nodes) + pmap(lambda node: node.neard_runner_start(), nodes) logger.info("waiting for validators to be up") - pmap(wait_node_up, nodes) + pmap(lambda node: node.wait_node_up(), nodes) logger.info( "waiting a bit after validators started before starting traffic") time.sleep(10) - neard_runner_start(traffic_generator) + traffic_generator.neard_runner_start() logger.info( - f'test running. to check the traffic sent, try running "curl http://{traffic_generator.machine.ip}:3030/metrics | grep mirror"' + f'test running. to check the traffic sent, try running "curl --silent http://{traffic_generator.ip_addr()}:{traffic_generator.neard_port()}/metrics | grep near_mirror"' ) -def neard_runner_update_binaries(node): - neard_runner_jsonrpc(node, 'update_binaries') - - def update_binaries_cmd(args, traffic_generator, nodes): - pmap(neard_runner_update_binaries, nodes + [traffic_generator]) + pmap(lambda node: node.neard_runner_update_binaries(), + nodes + [traffic_generator]) if __name__ == '__main__': - parser = ArgumentParser(description='Run a load test') - parser.add_argument('--chain-id', type=str, required=True) - parser.add_argument('--start-height', type=int, required=True) - parser.add_argument('--unique-id', type=str, required=True) + parser = ArgumentParser(description='Control a mocknet instance') + parser.add_argument('--chain-id', type=str) + parser.add_argument('--start-height', type=int) + parser.add_argument('--unique-id', type=str) + parser.add_argument('--local-test', action='store_true') subparsers = parser.add_subparsers(title='subcommands', description='valid subcommands', @@ -467,6 +388,10 @@ def update_binaries_cmd(args, traffic_generator, nodes): restart_parser.add_argument('--upload-program', action='store_true') restart_parser.set_defaults(func=restart_cmd, upload_program=False) + stop_runner_parser = subparsers.add_parser( + 'stop-neard-runner', help='''Stops the neard runner on all nodes.''') + stop_runner_parser.set_defaults(func=stop_runner_cmd) + hard_reset_parser = subparsers.add_parser( 'hard-reset', help='''Stops neard and clears all test state on all nodes.''') @@ -514,6 +439,15 @@ def update_binaries_cmd(args, traffic_generator, nodes): help='stop the traffic generator, but leave the other nodes running') stop_parser.set_defaults(func=stop_traffic_cmd) + backup_parser = subparsers.add_parser('make-backup', + help=''' + Stops all nodes and haves them make a backup of the data dir that can later be restored to with the reset command + ''') + backup_parser.add_argument('--yes', action='store_true') + backup_parser.add_argument('--backup-id', type=str) + backup_parser.add_argument('--description', type=str) + backup_parser.set_defaults(func=make_backup_cmd) + reset_parser = subparsers.add_parser('reset', help=''' The new_test command saves the data directory after the genesis state roots are computed so that @@ -521,6 +455,7 @@ def update_binaries_cmd(args, traffic_generator, nodes): data dirs to what was saved then, so that start-traffic will start the test all over again. ''') reset_parser.add_argument('--yes', action='store_true') + reset_parser.add_argument('--backup-id', type=str) reset_parser.set_defaults(func=reset_cmd) # It re-uses the same binary urls because it's quite easy to do it with the @@ -536,5 +471,16 @@ def update_binaries_cmd(args, traffic_generator, nodes): args = parser.parse_args() - traffic_generator, nodes = get_nodes(args) + if args.local_test: + if args.chain_id is not None or args.start_height is not None or args.unique_id is not None: + sys.exit( + f'cannot give --chain-id --start-height or --unique-id along with --local-test' + ) + traffic_generator, nodes = local_test_node.get_nodes() + else: + if args.chain_id is None or args.start_height is None or args.unique_id is None: + sys.exit( + f'must give all of --chain-id --start-height and --unique-id') + traffic_generator, nodes = remote_node.get_nodes( + args.chain_id, args.start_height, args.unique_id) args.func(args, traffic_generator, nodes) diff --git a/pytest/tests/mocknet/node_handle.py b/pytest/tests/mocknet/node_handle.py new file mode 100644 index 00000000000..92e10b699bb --- /dev/null +++ b/pytest/tests/mocknet/node_handle.py @@ -0,0 +1,141 @@ +import pathlib +import requests +import sys +import time + +sys.path.append(str(pathlib.Path(__file__).resolve().parents[2] / 'lib')) + +from configured_logger import logger + + +class NodeHandle: + + def __init__(self, node): + self.node = node + + def name(self): + return self.node.name() + + def ip_addr(self): + return self.node.ip_addr() + + def neard_port(self): + return self.node.neard_port() + + def stop_neard_runner(self): + self.node.stop_neard_runner() + + def start_neard_runner(self): + self.node.start_neard_runner() + + def upload_neard_runner(self): + self.node.upload_neard_runner() + + def init_neard_runner(self, config, remove_home_dir=False): + self.node.stop_neard_runner() + self.node.init() + self.node.mk_neard_runner_home(remove_home_dir) + self.node.upload_neard_runner() + # TODO: this config file should just be replaced by parameters to the new-test + # rpc method. This was originally made a config file instead because the rpc port + # was open to the internet, but now that we call it via ssh instead (which we should + # have done from the beginning), it's not really necessary and just an arbitrary difference + self.node.upload_neard_runner_config(config) + self.node.init_python() + self.node.start_neard_runner() + + # TODO: is the validators RPC the best way to do this? What are we trying to + # test for exactly? The use of this is basically just cargo culted from a while ago, + # but maybe we should consider something else + def wait_node_up(self): + while True: + try: + res = self.node.get_validators() + if 'error' not in res: + assert 'result' in res + logger.info(f'Node {self.node.name()} is up') + return + except (ConnectionRefusedError, + requests.exceptions.ConnectionError) as e: + pass + time.sleep(10) + + # Same as neard_runner_jsonrpc() without checking the error + # This should maybe be the behavior everywhere, and callers + # should handle errors themselves + def neard_runner_jsonrpc_nocheck(self, method, params=[]): + body = { + 'method': method, + 'params': params, + 'id': 'dontcare', + 'jsonrpc': '2.0' + } + return self.node.neard_runner_post(body) + + def neard_runner_jsonrpc(self, method, params=[]): + response = self.neard_runner_jsonrpc_nocheck(method, params) + if 'error' in response: + # TODO: errors should be handled better here in general but just exit for now + sys.exit( + f'bad response trying to send {method} JSON RPC to neard runner on {self.node.name()}:\n{response}' + ) + return response['result'] + + def neard_runner_start(self): + return self.neard_runner_jsonrpc('start') + + def neard_runner_stop(self): + return self.neard_runner_jsonrpc('stop') + + def neard_runner_new_test(self): + params = self.node.new_test_params() + return self.neard_runner_jsonrpc('new_test', params) + + def neard_runner_network_init(self, + validators, + boot_nodes, + epoch_length, + num_seats, + protocol_version, + genesis_time=None): + params = { + 'validators': validators, + 'boot_nodes': boot_nodes, + 'epoch_length': epoch_length, + 'num_seats': num_seats, + 'protocol_version': protocol_version, + } + if genesis_time is not None: + params['genesis_time'] = genesis_time + return self.neard_runner_jsonrpc('network_init', params=params) + + def neard_runner_ready(self): + return self.neard_runner_jsonrpc('ready') + + def neard_runner_version(self): + return self.neard_runner_jsonrpc_nocheck('version') + + def neard_runner_make_backup(self, backup_id, description=None): + return self.neard_runner_jsonrpc('make_backup', + params={ + 'backup_id': backup_id, + 'description': description + }) + + def neard_runner_ls_backups(self): + return self.neard_runner_jsonrpc('ls_backups') + + def neard_runner_reset(self, backup_id=None): + return self.neard_runner_jsonrpc('reset', + params={'backup_id': backup_id}) + + def neard_runner_update_binaries(self): + return self.neard_runner_jsonrpc('update_binaries') + + def neard_update_config(self, key_value): + return self.neard_runner_jsonrpc( + 'update_config', + params={ + "key_value": key_value, + }, + ) diff --git a/pytest/tests/mocknet/remote_node.py b/pytest/tests/mocknet/remote_node.py new file mode 100644 index 00000000000..b32106708d5 --- /dev/null +++ b/pytest/tests/mocknet/remote_node.py @@ -0,0 +1,111 @@ +#!/usr/bin/env python3 +""" +defines the RemoteNeardRunner class meant to be interacted with over ssh +""" +import pathlib +import json +import sys + +sys.path.append(str(pathlib.Path(__file__).resolve().parents[2] / 'lib')) + +import cmd_utils +from node_handle import NodeHandle +import mocknet + + +class RemoteNeardRunner: + + def __init__(self, node): + self.node = node + + def name(self): + return self.node.instance_name + + def ip_addr(self): + return self.node.machine.ip + + def neard_port(self): + return 3030 + + def init(self): + cmd_utils.init_node(self.node) + + def mk_neard_runner_home(self, remove_home_dir): + if remove_home_dir: + cmd_utils.run_cmd( + self.node, + 'rm -rf /home/ubuntu/.near/neard-runner && mkdir -p /home/ubuntu/.near/neard-runner' + ) + else: + cmd_utils.run_cmd(self.node, + 'mkdir -p /home/ubuntu/.near/neard-runner') + + def upload_neard_runner(self): + self.node.machine.upload('tests/mocknet/helpers/neard_runner.py', + '/home/ubuntu/.near/neard-runner', + switch_user='ubuntu') + self.node.machine.upload('tests/mocknet/helpers/requirements.txt', + '/home/ubuntu/.near/neard-runner', + switch_user='ubuntu') + + def upload_neard_runner_config(self, config): + mocknet.upload_json(self.node, + '/home/ubuntu/.near/neard-runner/config.json', + config) + + def init_python(self): + cmd = 'cd /home/ubuntu/.near/neard-runner && python3 -m virtualenv venv -p $(which python3)' \ + ' && ./venv/bin/pip install -r requirements.txt' + cmd_utils.run_cmd(self.node, cmd) + + def stop_neard_runner(self): + # this looks for python processes with neard_runner.py in the command line. the first word will + # be the pid, which we extract with the last awk command + self.node.machine.run( + 'kill $(ps -C python -o pid=,cmd= | grep neard_runner.py | awk \'{print $1};\')' + ) + + def start_neard_runner(self): + cmd_utils.run_in_background(self.node, f'/home/ubuntu/.near/neard-runner/venv/bin/python /home/ubuntu/.near/neard-runner/neard_runner.py ' \ + '--home /home/ubuntu/.near/neard-runner --neard-home /home/ubuntu/.near ' \ + '--neard-logs /home/ubuntu/neard-logs --port 3000', 'neard-runner.txt') + + def neard_runner_post(self, body): + body = json.dumps(body) + # '"'"' will be interpreted as ending the first quote and then concatenating it with "'", + # followed by a new quote started with ' and the rest of the string, to get any single quotes + # in method or params into the command correctly + body = body.replace("'", "'\"'\"'") + r = cmd_utils.run_cmd(self.node, f'curl localhost:3000 -d \'{body}\'') + return json.loads(r.stdout) + + def new_test_params(self): + return [] + + def get_validators(self): + return self.node.get_validators() + + +def get_nodes(chain_id, start_height, unique_id): + pattern = chain_id + '-' + str(start_height) + '-' + unique_id + all_nodes = mocknet.get_nodes(pattern=pattern) + if len(all_nodes) < 1: + sys.exit(f'no known nodes matching {pattern}') + + traffic_generator = None + nodes = [] + for n in all_nodes: + if n.instance_name.endswith('traffic'): + if traffic_generator is not None: + sys.exit( + f'more than one traffic generator instance found. {traffic_generator.instance_name} and {n.instance_name}' + ) + traffic_generator = n + else: + nodes.append(n) + + if traffic_generator is None: + sys.exit(f'no traffic generator instance found') + return NodeHandle(RemoteNeardRunner(traffic_generator)), [ + NodeHandle(RemoteNeardRunner(node)) for node in nodes + ] diff --git a/pytest/tests/sanity/rpc_max_gas_burnt.py b/pytest/tests/sanity/rpc_max_gas_burnt.py index 665e0b71ac8..5788eaa4a30 100755 --- a/pytest/tests/sanity/rpc_max_gas_burnt.py +++ b/pytest/tests/sanity/rpc_max_gas_burnt.py @@ -17,6 +17,8 @@ import utils import transaction +GGAS = 10**9 + def test_max_gas_burnt_view(): nodes = cluster.start_cluster( @@ -26,7 +28,7 @@ def test_max_gas_burnt_view(): config=None, genesis_config_changes=[], client_config_changes={1: { - 'max_gas_burnt_view': int(5e10) + 'max_gas_burnt_view': 130 * GGAS, }}) contract_key = nodes[0].signer_key diff --git a/pytest/tools/mirror/mirror_utils.py b/pytest/tools/mirror/mirror_utils.py index 0c43d427ea1..b363dcf0366 100644 --- a/pytest/tools/mirror/mirror_utils.py +++ b/pytest/tools/mirror/mirror_utils.py @@ -369,7 +369,7 @@ def call_create_account(node, signer_key, account_id, public_key, nonce, args = bytearray(args, encoding='utf-8') actions = [ - transaction.create_function_call_action('create_account', args, 10**13, + transaction.create_function_call_action('create_account', args, 10**14, 10**24), transaction.create_payment_action(123) ] diff --git a/runtime/near-test-contracts/test-contract-rs/src/lib.rs b/runtime/near-test-contracts/test-contract-rs/src/lib.rs index d42b4ed1713..b4e2d4e6aa4 100644 --- a/runtime/near-test-contracts/test-contract-rs/src/lib.rs +++ b/runtime/near-test-contracts/test-contract-rs/src/lib.rs @@ -843,16 +843,16 @@ fn call_promise() { /// the contents of which should match the function's input. /// /// Used as the yield callback in tests of yield create / yield resume. +/// Returns double the first byte of the payload, if there is one. #[no_mangle] -unsafe fn check_promise_result() { +unsafe fn check_promise_result_return_value() { input(0); let expected_result_len = register_len(0) as usize; let expected = vec![0u8; expected_result_len]; read_register(0, expected.as_ptr() as u64); assert_eq!(promise_results_count(), 1); - - let status = match promise_result(0, 0) { + match promise_result(0, 0) { 1 => { let mut result = vec![0; register_len(0) as usize]; read_register(0, result.as_ptr() as *const u64 as u64); @@ -862,12 +862,38 @@ unsafe fn check_promise_result() { // Used in tests to verify that this function's return value is handled as expected. result[0] *= 2; value_return(1u64, result.as_ptr() as u64); + } + 2 => { + assert_eq!(expected_result_len, 0); + let result = vec![23u8]; + value_return(1u64, result.as_ptr() as u64); + } + _ => unreachable!(), + }; +} +/// Function which expects to receive exactly one promise result, +/// the contents of which should match the function's input. +/// +/// Used as the yield callback in tests of yield create / yield resume. +/// Writes the status of the promise result to storage. +#[no_mangle] +unsafe fn check_promise_result_write_status() { + input(0); + let expected_result_len = register_len(0) as usize; + let expected = vec![0u8; expected_result_len]; + read_register(0, expected.as_ptr() as u64); + + assert_eq!(promise_results_count(), 1); + let status = match promise_result(0, 0) { + 1 => { + let result = vec![0; register_len(0) as usize]; + read_register(0, result.as_ptr() as *const u64 as u64); + assert_eq!(expected, result); "Resumed " } 2 => { assert_eq!(expected_result_len, 0); - "Timeout " } _ => unreachable!(), @@ -887,16 +913,46 @@ unsafe fn check_promise_result() { /// Call promise_yield_create, specifying `check_promise_result` as the yield callback. /// Given input is passed as the argument to the `check_promise_result` function call. +/// Sets the yield callback's output as the return value. +#[cfg(feature = "nightly")] +#[no_mangle] +pub unsafe fn call_yield_create_return_promise() { + input(0); + let payload = vec![0u8; register_len(0) as usize]; + read_register(0, payload.as_ptr() as u64); + + // Create a promise yield with callback `check_promise_result`, + // passing the expected payload as an argument to the function. + let method_name = "check_promise_result_return_value"; + let gas_fixed = 0; + let gas_weight = 1; + let data_id_register = 0; + let promise_index = promise_yield_create( + method_name.len() as u64, + method_name.as_ptr() as u64, + payload.len() as u64, + payload.as_ptr() as u64, + gas_fixed, + gas_weight, + data_id_register, + ); + + promise_return(promise_index); +} + +/// Call promise_yield_create, specifying `check_promise_result` as the yield callback. +/// Given input is passed as the argument to the `check_promise_result` function call. +/// Returns the data id produced by promise_yield_create. #[cfg(feature = "nightly")] #[no_mangle] -pub unsafe fn call_yield_create() { +pub unsafe fn call_yield_create_return_data_id() { input(0); let payload = vec![0u8; register_len(0) as usize]; read_register(0, payload.as_ptr() as u64); // Create a promise yield with callback `check_promise_result`, // passing the expected payload as an argument to the function. - let method_name = "check_promise_result"; + let method_name = "check_promise_result_write_status"; let gas_fixed = 0; let gas_weight = 1; let data_id_register = 0; @@ -951,7 +1007,7 @@ pub unsafe fn call_yield_create_and_resume() { // Create a promise yield with callback `check_promise_result`, // passing the expected payload as an argument to the function. - let method_name = "check_promise_result"; + let method_name = "check_promise_result_return_value"; let gas_fixed = 0; let gas_weight = 1; let data_id_register = 0; diff --git a/runtime/near-vm-runner/src/near_vm_runner/memory.rs b/runtime/near-vm-runner/src/near_vm_runner/memory.rs new file mode 100644 index 00000000000..ad7ad529f78 --- /dev/null +++ b/runtime/near-vm-runner/src/near_vm_runner/memory.rs @@ -0,0 +1,99 @@ +use crate::logic::{MemSlice, MemoryLike}; +use near_vm_types::{MemoryType, Pages}; +use near_vm_vm::{LinearMemory, Memory, MemoryStyle, VMMemory}; +use std::borrow::Cow; +use std::sync::Arc; + +#[derive(Clone)] +pub struct NearVmMemory(Arc); + +impl NearVmMemory { + pub fn new( + initial_memory_pages: u32, + max_memory_pages: u32, + ) -> Result { + let max_pages = Pages(max_memory_pages); + Ok(NearVmMemory(Arc::new(LinearMemory::new( + &MemoryType::new(Pages(initial_memory_pages), Some(max_pages), false), + &MemoryStyle::Static { + bound: max_pages, + offset_guard_size: near_vm_types::WASM_PAGE_SIZE as u64, + }, + )?))) + } + + /// Returns pointer to memory at the specified offset provided that there’s + /// enough space in the buffer starting at the returned pointer. + /// + /// Safety: Caller must guarantee that the returned pointer is not used + /// after guest memory mapping is changed (e.g. grown). + unsafe fn get_ptr(&self, offset: u64, len: usize) -> Result<*mut u8, ()> { + let offset = usize::try_from(offset).map_err(|_| ())?; + // SAFETY: Caller promisses memory mapping won’t change. + let vmmem = unsafe { self.0.vmmemory().as_ref() }; + // `checked_sub` here verifies that offsetting the buffer by offset + // still lands us in-bounds of the allocated object. + let remaining = vmmem.current_length.checked_sub(offset).ok_or(())?; + if len <= remaining { + Ok(vmmem.base.add(offset)) + } else { + Err(()) + } + } + + /// Returns shared reference to slice in guest memory at given offset. + /// + /// Safety: Caller must guarantee that guest memory mapping is not changed + /// (e.g. grown) while the slice is held. + unsafe fn get(&self, offset: u64, len: usize) -> Result<&[u8], ()> { + // SAFETY: Caller promisses memory mapping won’t change. + let ptr = unsafe { self.get_ptr(offset, len)? }; + // SAFETY: get_ptr verifies that [ptr, ptr+len) is valid slice. + Ok(unsafe { core::slice::from_raw_parts(ptr, len) }) + } + + /// Returns shared reference to slice in guest memory at given offset. + /// + /// Safety: Caller must guarantee that guest memory mapping is not changed + /// (e.g. grown) while the slice is held. + unsafe fn get_mut(&mut self, offset: u64, len: usize) -> Result<&mut [u8], ()> { + // SAFETY: Caller promisses memory mapping won’t change. + let ptr = unsafe { self.get_ptr(offset, len)? }; + // SAFETY: get_ptr verifies that [ptr, ptr+len) is valid slice and since + // we’re holding exclusive self reference another mut reference won’t be + // created + Ok(unsafe { core::slice::from_raw_parts_mut(ptr, len) }) + } + + pub(super) fn vm(&self) -> VMMemory { + VMMemory { from: self.0.clone(), instance_ref: None } + } +} + +impl MemoryLike for NearVmMemory { + fn fits_memory(&self, slice: MemSlice) -> Result<(), ()> { + // SAFETY: Contracts are executed on a single thread thus we know no one + // will change guest memory mapping under us. + unsafe { self.get_ptr(slice.ptr, slice.len()?) }.map(|_| ()) + } + + fn view_memory(&self, slice: MemSlice) -> Result, ()> { + // SAFETY: Firstly, contracts are executed on a single thread thus we + // know no one will change guest memory mapping under us. Secondly, the + // way MemoryLike interface is used we know the memory mapping won’t be + // changed by the caller while it holds the slice reference. + unsafe { self.get(slice.ptr, slice.len()?) }.map(Cow::Borrowed) + } + + fn read_memory(&self, offset: u64, buffer: &mut [u8]) -> Result<(), ()> { + // SAFETY: Contracts are executed on a single thread thus we know no one + // will change guest memory mapping under us. + Ok(buffer.copy_from_slice(unsafe { self.get(offset, buffer.len())? })) + } + + fn write_memory(&mut self, offset: u64, buffer: &[u8]) -> Result<(), ()> { + // SAFETY: Contracts are executed on a single thread thus we know no one + // will change guest memory mapping under us. + Ok(unsafe { self.get_mut(offset, buffer.len())? }.copy_from_slice(buffer)) + } +} diff --git a/runtime/near-vm-runner/src/near_vm_runner/mod.rs b/runtime/near-vm-runner/src/near_vm_runner/mod.rs new file mode 100644 index 00000000000..031b8bf2a5f --- /dev/null +++ b/runtime/near-vm-runner/src/near_vm_runner/mod.rs @@ -0,0 +1,48 @@ +mod memory; +mod runner; + +pub use memory::NearVmMemory; +pub(crate) use runner::NearVM; + +#[derive(Hash)] +struct NearVmConfig { + seed: u32, + engine: NearVmEngine, + compiler: NearVmCompiler, +} + +impl NearVmConfig { + fn config_hash(self: Self) -> u64 { + crate::utils::stable_hash(&self) + } +} + +#[derive(Hash, PartialEq, Debug)] +#[allow(unused)] +enum NearVmEngine { + Universal = 1, + StaticLib = 2, + DynamicLib = 3, +} + +#[derive(Hash, PartialEq, Debug)] +#[allow(unused)] +enum NearVmCompiler { + Singlepass = 1, + Cranelift = 2, + Llvm = 3, +} + +// We use following scheme for the bits forming seed: +// kind << 29, kind 2 is for NearVm +// major version << 6 +// minor version +const VM_CONFIG: NearVmConfig = NearVmConfig { + seed: (2 << 29) | (2 << 6) | 1, + engine: NearVmEngine::Universal, + compiler: NearVmCompiler::Singlepass, +}; + +pub(crate) fn near_vm_vm_hash() -> u64 { + VM_CONFIG.config_hash() +} diff --git a/runtime/near-vm-runner/src/near_vm_runner.rs b/runtime/near-vm-runner/src/near_vm_runner/runner.rs similarity index 83% rename from runtime/near-vm-runner/src/near_vm_runner.rs rename to runtime/near-vm-runner/src/near_vm_runner/runner.rs index 0940bd62946..9becfc23acd 100644 --- a/runtime/near-vm-runner/src/near_vm_runner.rs +++ b/runtime/near-vm-runner/src/near_vm_runner/runner.rs @@ -1,3 +1,4 @@ +use super::{NearVmMemory, VM_CONFIG}; use crate::cache::CompiledContractInfo; use crate::errors::ContractPrecompilatonResult; use crate::imports::near_vm::NearVmImports; @@ -6,7 +7,8 @@ use crate::logic::errors::{ }; use crate::logic::gas_counter::FastGasCounter; use crate::logic::types::PromiseResult; -use crate::logic::{Config, External, MemSlice, MemoryLike, VMContext, VMLogic, VMOutcome}; +use crate::logic::{Config, External, VMContext, VMLogic, VMOutcome}; +use crate::near_vm_runner::{NearVmCompiler, NearVmEngine}; use crate::runner::VMResult; use crate::{ get_contract_cache_key, imports, CompiledContract, ContractCode, ContractRuntimeCache, @@ -22,110 +24,12 @@ use near_vm_engine::universal::{ UniversalExecutableRef, }; use near_vm_types::{FunctionIndex, InstanceConfig, MemoryType, Pages, WASM_PAGE_SIZE}; -use near_vm_vm::{ - Artifact, Instantiatable, LinearMemory, LinearTable, Memory, MemoryStyle, TrapCode, VMMemory, -}; -use std::borrow::Cow; -use std::hash::Hash; +use near_vm_vm::{Artifact, Instantiatable, LinearTable, Memory, MemoryStyle, TrapCode, VMMemory}; use std::mem::size_of; use std::sync::{Arc, OnceLock}; type VMArtifact = Arc; -#[derive(Clone)] -pub struct NearVmMemory(Arc); - -impl NearVmMemory { - fn new( - initial_memory_pages: u32, - max_memory_pages: u32, - ) -> Result { - let max_pages = Pages(max_memory_pages); - Ok(NearVmMemory(Arc::new(LinearMemory::new( - &MemoryType::new(Pages(initial_memory_pages), Some(max_pages), false), - &MemoryStyle::Static { - bound: max_pages, - offset_guard_size: near_vm_types::WASM_PAGE_SIZE as u64, - }, - )?))) - } - - /// Returns pointer to memory at the specified offset provided that there’s - /// enough space in the buffer starting at the returned pointer. - /// - /// Safety: Caller must guarantee that the returned pointer is not used - /// after guest memory mapping is changed (e.g. grown). - unsafe fn get_ptr(&self, offset: u64, len: usize) -> Result<*mut u8, ()> { - let offset = usize::try_from(offset).map_err(|_| ())?; - // SAFETY: Caller promisses memory mapping won’t change. - let vmmem = unsafe { self.0.vmmemory().as_ref() }; - // `checked_sub` here verifies that offsetting the buffer by offset - // still lands us in-bounds of the allocated object. - let remaining = vmmem.current_length.checked_sub(offset).ok_or(())?; - if len <= remaining { - Ok(vmmem.base.add(offset)) - } else { - Err(()) - } - } - - /// Returns shared reference to slice in guest memory at given offset. - /// - /// Safety: Caller must guarantee that guest memory mapping is not changed - /// (e.g. grown) while the slice is held. - unsafe fn get(&self, offset: u64, len: usize) -> Result<&[u8], ()> { - // SAFETY: Caller promisses memory mapping won’t change. - let ptr = unsafe { self.get_ptr(offset, len)? }; - // SAFETY: get_ptr verifies that [ptr, ptr+len) is valid slice. - Ok(unsafe { core::slice::from_raw_parts(ptr, len) }) - } - - /// Returns shared reference to slice in guest memory at given offset. - /// - /// Safety: Caller must guarantee that guest memory mapping is not changed - /// (e.g. grown) while the slice is held. - unsafe fn get_mut(&mut self, offset: u64, len: usize) -> Result<&mut [u8], ()> { - // SAFETY: Caller promisses memory mapping won’t change. - let ptr = unsafe { self.get_ptr(offset, len)? }; - // SAFETY: get_ptr verifies that [ptr, ptr+len) is valid slice and since - // we’re holding exclusive self reference another mut reference won’t be - // created - Ok(unsafe { core::slice::from_raw_parts_mut(ptr, len) }) - } - - pub(crate) fn vm(&self) -> VMMemory { - VMMemory { from: self.0.clone(), instance_ref: None } - } -} - -impl MemoryLike for NearVmMemory { - fn fits_memory(&self, slice: MemSlice) -> Result<(), ()> { - // SAFETY: Contracts are executed on a single thread thus we know no one - // will change guest memory mapping under us. - unsafe { self.get_ptr(slice.ptr, slice.len()?) }.map(|_| ()) - } - - fn view_memory(&self, slice: MemSlice) -> Result, ()> { - // SAFETY: Firstly, contracts are executed on a single thread thus we - // know no one will change guest memory mapping under us. Secondly, the - // way MemoryLike interface is used we know the memory mapping won’t be - // changed by the caller while it holds the slice reference. - unsafe { self.get(slice.ptr, slice.len()?) }.map(Cow::Borrowed) - } - - fn read_memory(&self, offset: u64, buffer: &mut [u8]) -> Result<(), ()> { - // SAFETY: Contracts are executed on a single thread thus we know no one - // will change guest memory mapping under us. - Ok(buffer.copy_from_slice(unsafe { self.get(offset, buffer.len())? })) - } - - fn write_memory(&mut self, offset: u64, buffer: &[u8]) -> Result<(), ()> { - // SAFETY: Contracts are executed on a single thread thus we know no one - // will change guest memory mapping under us. - Ok(unsafe { self.get_mut(offset, buffer.len())? }.copy_from_slice(buffer)) - } -} - fn get_entrypoint_index( artifact: &UniversalArtifact, method_name: &str, @@ -187,49 +91,6 @@ fn translate_runtime_error( }) } -#[derive(Hash, PartialEq, Debug)] -#[allow(unused)] -enum NearVmEngine { - Universal = 1, - StaticLib = 2, - DynamicLib = 3, -} - -#[derive(Hash, PartialEq, Debug)] -#[allow(unused)] -enum NearVmCompiler { - Singlepass = 1, - Cranelift = 2, - Llvm = 3, -} - -#[derive(Hash)] -struct NearVmConfig { - seed: u32, - engine: NearVmEngine, - compiler: NearVmCompiler, -} - -impl NearVmConfig { - fn config_hash(self: Self) -> u64 { - crate::utils::stable_hash(&self) - } -} - -// We use following scheme for the bits forming seed: -// kind << 29, kind 2 is for NearVm -// major version << 6 -// minor version -const VM_CONFIG: NearVmConfig = NearVmConfig { - seed: (2 << 29) | (2 << 6) | 1, - engine: NearVmEngine::Universal, - compiler: NearVmCompiler::Singlepass, -}; - -pub(crate) fn near_vm_vm_hash() -> u64 { - VM_CONFIG.config_hash() -} - pub(crate) struct NearVM { pub(crate) config: Config, pub(crate) engine: UniversalEngine, diff --git a/runtime/near-vm-runner/src/tests/compile_errors.rs b/runtime/near-vm-runner/src/tests/compile_errors.rs index d70fa030eb7..669ac73defa 100644 --- a/runtime/near-vm-runner/src/tests/compile_errors.rs +++ b/runtime/near-vm-runner/src/tests/compile_errors.rs @@ -22,7 +22,7 @@ fn test_initializer_wrong_signature_contract() { Err: PrepareError: Error happened while deserializing the module. "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 48017463 used gas 48017463 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 98625073 used gas 98625073 Err: PrepareError: Error happened while deserializing the module. "#]], ]); @@ -43,7 +43,7 @@ fn test_function_not_defined_contract() { Err: PrepareError: Error happened while deserializing the module. "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 39564213 used gas 39564213 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 56142568 used gas 56142568 Err: PrepareError: Error happened while deserializing the module. "#]], ]); @@ -73,7 +73,7 @@ fn test_function_type_not_defined_contract_1() { Err: PrepareError: Error happened while deserializing the module. "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 44982963 used gas 44982963 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 83374943 used gas 83374943 Err: PrepareError: Error happened while deserializing the module. "#]], ]); @@ -93,7 +93,7 @@ fn test_function_type_not_defined_contract_2() { Err: PrepareError: Error happened while deserializing the module. "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 44982963 used gas 44982963 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 83374943 used gas 83374943 Err: PrepareError: Error happened while deserializing the module. "#]], ]); @@ -131,7 +131,7 @@ fn test_evil_function_index() { Err: PrepareError: Error happened while deserializing the module. "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 44115963 used gas 44115963 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 79017763 used gas 79017763 Err: PrepareError: Error happened while deserializing the module. "#]], ]); @@ -161,10 +161,10 @@ fn test_limit_contract_functions_number() { VMOutcome: balance 4 storage_usage 12 return data None burnt gas 13048032213 used gas 13048032213 "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 13054614261 used gas 13054614261 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 65437853336 used gas 65437853336 "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 13054614261 used gas 13054614261 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 65437853336 used gas 65437853336 "#]], ]); @@ -193,7 +193,7 @@ fn test_limit_contract_functions_number() { Err: PrepareError: Too many functions in contract. "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 13049332713 used gas 13049332713 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 65437807058 used gas 65437807058 Err: PrepareError: Too many functions in contract. "#]], ]); @@ -223,7 +223,7 @@ fn test_limit_contract_functions_number() { Err: PrepareError: Too many functions in contract. "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 19554433713 used gas 19554433713 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 98129728598 used gas 98129728598 Err: PrepareError: Too many functions in contract. "#]], ]); @@ -252,7 +252,7 @@ fn test_limit_contract_functions_number() { Err: PrepareError: Too many functions in contract. "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 13051283463 used gas 13051283463 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 65447610713 used gas 65447610713 Err: PrepareError: Too many functions in contract. "#]], ]); @@ -286,7 +286,7 @@ fn test_limit_locals() { Err: PrepareError: Error happened while deserializing the module. "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 43682463 used gas 43682463 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 76839173 used gas 76839173 Err: PrepareError: Error happened while deserializing the module. "#]], ]); @@ -311,7 +311,7 @@ fn test_limit_locals() { Err: ... "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 43682463 used gas 43682463 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 76839173 used gas 76839173 Err: ... "#]], ]); @@ -344,7 +344,7 @@ fn test_limit_locals_global() { Err: PrepareError: Too many locals declared in the contract. "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 195407463 used gas 195407463 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 839345673 used gas 839345673 Err: PrepareError: Too many locals declared in the contract. "#]], ]); @@ -367,7 +367,7 @@ fn test_limit_locals_global() { VMOutcome: balance 4 storage_usage 12 return data None burnt gas 139269213 used gas 139269213 "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 13001413761 used gas 13001413761 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 13419362816 used gas 13419362816 "#]] ]); } @@ -397,7 +397,7 @@ pub fn test_stabilized_host_function() { VMOutcome: balance 4 storage_usage 12 return data None burnt gas 7143010623 used gas 7143010623 "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 7149592671 used gas 7149592671 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 7226376631 used gas 7226376631 "#]], ]); } @@ -422,7 +422,7 @@ fn test_sandbox_only_function() { #[cfg(not(feature = "sandbox"))] tb.expect(&expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 57337713 used gas 57337713 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 145464758 used gas 145464758 Err: ... "#]]); } @@ -444,7 +444,7 @@ fn extension_saturating_float_to_int() { Err: PrepareError: Error happened while deserializing the module. "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 48450963 used gas 48450963 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 100803663 used gas 100803663 Err: PrepareError: Error happened while deserializing the module. "#]], ]); @@ -468,7 +468,7 @@ fn extension_signext() { Err: PrepareError: Error happened while deserializing the module. "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 58284261 used gas 58284261 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 123725136 used gas 123725136 "#]], ]); } diff --git a/runtime/near-vm-runner/src/tests/regression_tests.rs b/runtime/near-vm-runner/src/tests/regression_tests.rs index b593a3141fb..c7f671942d0 100644 --- a/runtime/near-vm-runner/src/tests/regression_tests.rs +++ b/runtime/near-vm-runner/src/tests/regression_tests.rs @@ -26,7 +26,7 @@ fn memory_size_alignment_issue() { VMOutcome: balance 4 storage_usage 12 return data None burnt gas 46411725 used gas 46411725 "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 52993773 used gas 52993773 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 90513208 used gas 90513208 "#]], ]); } diff --git a/runtime/near-vm-runner/src/tests/runtime_errors.rs b/runtime/near-vm-runner/src/tests/runtime_errors.rs index 33b49c15b42..2ff936e32b3 100644 --- a/runtime/near-vm-runner/src/tests/runtime_errors.rs +++ b/runtime/near-vm-runner/src/tests/runtime_errors.rs @@ -35,7 +35,7 @@ fn test_infinite_initializer_export_not_found() { Err: MethodNotFound "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 49101213 used gas 49101213 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 104071548 used gas 104071548 Err: MethodNotFound "#]], ]); @@ -55,7 +55,7 @@ fn test_simple_contract() { VMOutcome: balance 4 storage_usage 12 return data None burnt gas 42815463 used gas 42815463 "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 49397511 used gas 49397511 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 79064041 used gas 79064041 "#]], ]); } @@ -76,7 +76,7 @@ fn test_imported_memory() { Err: ... "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 44982963 used gas 44982963 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 83374943 used gas 83374943 Err: ... "#]], ]); @@ -94,7 +94,7 @@ fn test_multiple_memories() { Err: ... "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 39130713 used gas 39130713 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 53963978 used gas 53963978 Err: ... "#]], ]); @@ -111,7 +111,7 @@ fn test_export_not_found() { Err: MethodNotFound "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 42815463 used gas 42815463 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 72481993 used gas 72481993 Err: MethodNotFound "#]], ]); @@ -139,7 +139,7 @@ fn test_trap_contract() { Err: WebAssembly trap: An `unreachable` opcode was executed. "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 50437017 used gas 50437017 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 80976092 used gas 80976092 Err: WebAssembly trap: An `unreachable` opcode was executed. "#]], ]); @@ -166,7 +166,7 @@ fn test_trap_initializer() { Err: WebAssembly trap: An `unreachable` opcode was executed. "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 53905017 used gas 53905017 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 98404812 used gas 98404812 Err: WebAssembly trap: An `unreachable` opcode was executed. "#]], ]); @@ -197,7 +197,7 @@ fn test_div_by_zero_contract() { Err: WebAssembly trap: An arithmetic exception, e.g. divided by zero. "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 53166279 used gas 53166279 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 88068079 used gas 88068079 Err: WebAssembly trap: An arithmetic exception, e.g. divided by zero. "#]], ]); @@ -228,7 +228,7 @@ fn test_float_to_int_contract() { Err: WebAssembly trap: An arithmetic exception, e.g. divided by zero. "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 53427273 used gas 53427273 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 92691798 used gas 92691798 Err: WebAssembly trap: An arithmetic exception, e.g. divided by zero. "#]], ]); @@ -262,7 +262,7 @@ fn test_indirect_call_to_null_contract() { Err: ... "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 56678523 used gas 56678523 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 109031223 used gas 109031223 Err: ... "#]], ]) @@ -298,7 +298,7 @@ fn test_indirect_call_to_wrong_signature_contract() { Err: WebAssembly trap: Call indirect incorrect signature trap. "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 61663773 used gas 61663773 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 134085008 used gas 134085008 Err: WebAssembly trap: Call indirect incorrect signature trap. "#]] ]) @@ -315,7 +315,7 @@ fn test_wrong_signature_contract() { Err: MethodInvalidSignature "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 43032213 used gas 43032213 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 73571288 used gas 73571288 Err: MethodInvalidSignature "#]], ]); @@ -332,7 +332,7 @@ fn test_export_wrong_type() { Err: MethodNotFound "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 41298213 used gas 41298213 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 64856928 used gas 64856928 Err: MethodNotFound "#]], ]); @@ -358,7 +358,7 @@ fn test_guest_panic() { Err: Smart contract panicked: explicit guest panic "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 322357878 used gas 322357878 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 381690938 used gas 381690938 Err: Smart contract panicked: explicit guest panic "#]], ]); @@ -375,7 +375,7 @@ fn test_panic_re_export() { )"#, ) .expect(&expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 312352074 used gas 312352074 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 361214594 used gas 361214594 Err: Smart contract panicked: explicit guest panic "#]]); } @@ -395,7 +395,7 @@ fn test_stack_overflow() { Err: ... "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 30376143897 used gas 30376143897 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 30418898602 used gas 30418898602 Err: ... "#]], ]); @@ -433,7 +433,7 @@ fn test_stack_instrumentation_protocol_upgrade() { Err: ... "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 31767212013 used gas 31767212013 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 31825672528 used gas 31825672528 Err: ... "#]], ]); @@ -468,7 +468,7 @@ fn test_stack_instrumentation_protocol_upgrade() { Err: ... "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 29698803429 used gas 29698803429 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 29757263944 used gas 29757263944 Err: ... "#]], ]); @@ -533,7 +533,7 @@ fn test_bad_import_1() { Err: PrepareError: Error happened during instantiation. "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 50618463 used gas 50618463 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 111696613 used gas 111696613 Err: PrepareError: Error happened during instantiation. "#]], ]); @@ -550,7 +550,7 @@ fn test_bad_import_2() { Err: PrepareError: Error happened during instantiation. "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 50184963 used gas 50184963 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 109518023 used gas 109518023 Err: PrepareError: Error happened during instantiation. "#]], ]); @@ -575,7 +575,7 @@ fn test_bad_import_3() { Err: ... "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 48234213 used gas 48234213 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 99714368 used gas 99714368 Err: ... "#]], ]); @@ -584,7 +584,7 @@ fn test_bad_import_3() { #[test] fn test_bad_import_4() { test_builder().wasm(&bad_import_func("env")).opaque_error().expect(&expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 47800713 used gas 47800713 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 97535778 used gas 97535778 Err: ... "#]]); } @@ -623,7 +623,7 @@ fn test_bad_many_imports() { )) .opaque_error() .expect(&expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 299447463 used gas 299447463 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 1362207273 used gas 1362207273 Err: ... "#]]) } @@ -647,7 +647,7 @@ fn test_external_call_ok() { VMOutcome: balance 4 storage_usage 12 return data None burnt gas 320283336 used gas 320283336 "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 326865384 used gas 326865384 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 401031709 used gas 401031709 "#]], ]); } @@ -687,7 +687,7 @@ fn test_external_call_indirect() { VMOutcome: balance 4 storage_usage 12 return data None burnt gas 328909092 used gas 328909092 "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 335491140 used gas 335491140 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 441069085 used gas 441069085 "#]], ]); } @@ -719,7 +719,7 @@ fn test_address_overflow() { Err: WebAssembly trap: Memory out of bounds trap. "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 54294273 used gas 54294273 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 97048978 used gas 97048978 Err: WebAssembly trap: Memory out of bounds trap. "#]], ]); @@ -736,7 +736,7 @@ fn test_address_overflow() { VMOutcome: balance 4 storage_usage 12 return data None burnt gas 48534981 used gas 48534981 "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 55117029 used gas 55117029 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 97871734 used gas 97871734 "#]], ]); } @@ -776,7 +776,7 @@ fn test_nan_sign() { VMOutcome: balance 4 storage_usage 12 return data None burnt gas 54988767 used gas 54988767 "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 61570815 used gas 61570815 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 110433335 used gas 110433335 "#]], ]); @@ -793,7 +793,7 @@ fn test_nan_sign() { Err: WebAssembly trap: An arithmetic exception, e.g. divided by zero. "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 60748059 used gas 60748059 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 109610579 used gas 109610579 Err: WebAssembly trap: An arithmetic exception, e.g. divided by zero. "#]], ]); @@ -885,10 +885,10 @@ mod fix_contract_loading_cost_protocol_upgrade { VMOutcome: balance 4 storage_usage 12 return data None burnt gas 47406987 used gas 47406987 "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 53989035 used gas 53989035 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 88890835 used gas 88890835 "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 53989035 used gas 53989035 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 88890835 used gas 88890835 "#]], ]); } @@ -898,7 +898,7 @@ mod fix_contract_loading_cost_protocol_upgrade { #[test] fn test_fn_loading_gas_protocol_upgrade_exceed_loading() { let expect = expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 44115963 used gas 44115963 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 79017763 used gas 79017763 Err: Exceeded the prepaid gas. "#]]; let test_after = test_builder().wat(ALMOST_TRIVIAL_CONTRACT); @@ -919,7 +919,7 @@ mod fix_contract_loading_cost_protocol_upgrade { #[test] fn test_fn_loading_gas_protocol_upgrade_exceed_executing() { let expect = expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 45000000 used gas 45000000 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 79901800 used gas 79901800 Err: Exceeded the prepaid gas. "#]]; let test_after = test_builder().wat(ALMOST_TRIVIAL_CONTRACT); @@ -956,7 +956,7 @@ mod fix_contract_loading_cost_protocol_upgrade { Err: PrepareError: Error happened while deserializing the module. "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 39347463 used gas 39347463 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 55053273 used gas 55053273 Err: PrepareError: Error happened while deserializing the module. "#]], ]); @@ -970,7 +970,7 @@ mod fix_contract_loading_cost_protocol_upgrade { Err: PrepareError: Error happened during instantiation. "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 48234213 used gas 48234213 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 99714368 used gas 99714368 Err: PrepareError: Error happened during instantiation. "#]], ]); @@ -984,7 +984,7 @@ mod fix_contract_loading_cost_protocol_upgrade { Err: PrepareError: Error happened during instantiation. "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 47800713 used gas 47800713 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 97535778 used gas 97535778 Err: PrepareError: Error happened during instantiation. "#]], ]); @@ -1003,7 +1003,7 @@ mod fix_contract_loading_cost_protocol_upgrade { Err: PrepareError: Too many locals declared in the contract. "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 195407463 used gas 195407463 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 839345673 used gas 839345673 Err: PrepareError: Too many locals declared in the contract. "#]], ]); @@ -1023,7 +1023,7 @@ mod fix_contract_loading_cost_protocol_upgrade { Err: PrepareError: Too many functions in contract. "#]], expect![[r#" - VMOutcome: balance 4 storage_usage 12 return data None burnt gas 19554433713 used gas 19554433713 + VMOutcome: balance 4 storage_usage 12 return data None burnt gas 98129728598 used gas 98129728598 Err: PrepareError: Too many functions in contract. "#]], ]); diff --git a/runtime/near-wallet-contract/Cargo.toml b/runtime/near-wallet-contract/Cargo.toml index b73553adfb8..e684935ef46 100644 --- a/runtime/near-wallet-contract/Cargo.toml +++ b/runtime/near-wallet-contract/Cargo.toml @@ -13,10 +13,8 @@ publish = false workspace = true [dependencies] -near-vm-runner.workspace = true - -[dev-dependencies] near-primitives-core.workspace = true +near-vm-runner.workspace = true [build-dependencies] anyhow.workspace = true diff --git a/runtime/near-wallet-contract/build.rs b/runtime/near-wallet-contract/build.rs index 8842a14aed4..b97cbc3fdfe 100644 --- a/runtime/near-wallet-contract/build.rs +++ b/runtime/near-wallet-contract/build.rs @@ -1,36 +1,84 @@ /// This file is run as a part of `cargo build` process and it builds the `Wallet Contract`. /// The generated WASM file is put to the `./res` directory. -use anyhow::{anyhow, Context, Ok, Result}; +use anyhow::{anyhow, Context}; -use std::path::{Path, PathBuf}; +use std::path::Path; use std::process::Command; -#[allow(unreachable_code)] -fn main() -> Result<()> { - // TODO(eth-implicit) Remove this once we have a proper way to generate the Wallet Contract WASM file. - return Ok(()); - build_contract("./wallet-contract", &[], "wallet_contract") +const IMAGE_TAG: &str = "13430592a7be246dd5a29439791f4081e0107ff3"; + +/// See https://chainlist.org/chain/397 +const MAINNET_CHAIN_ID: u64 = 397; + +/// See https://chainlist.org/chain/398 +const TESTNET_CHAIN_ID: u64 = 398; + +/// Not officially registered on chainlist.org because this is for local testing only. +const LOCALNET_CHAIN_ID: u64 = 399; + +fn main() -> anyhow::Result<()> { + let contract_dir = "./implementation"; + + build_contract( + contract_dir, + "eth_wallet_contract", + "wallet_contract_mainnet", + MAINNET_CHAIN_ID, + ) + .context("Mainnet build failed")?; + + build_contract( + contract_dir, + "eth_wallet_contract", + "wallet_contract_testnet", + TESTNET_CHAIN_ID, + ) + .context("Testnet build failed")?; + + build_contract( + contract_dir, + "eth_wallet_contract", + "wallet_contract_localnet", + LOCALNET_CHAIN_ID, + ) + .context("Localnet build failed")?; + + println!("cargo:rerun-if-changed={}", contract_dir); + println!("cargo:rerun-if-changed={}", "./res"); + + Ok(()) } -fn build_contract(dir: &str, args: &[&str], output: &str) -> Result<()> { - let target_dir: PathBuf = - std::env::var("OUT_DIR").context("Failed to read OUT_DIR environment variable")?.into(); +fn build_contract( + dir: &str, + contract_name: &str, + output: &str, + chain_id: u64, +) -> anyhow::Result<()> { + let wasm_target_path = format!("./res/{}.wasm", output); + if Path::new(&wasm_target_path).exists() { + // Skip building if an artifact is already present + return Ok(()); + } - // We place the build artifacts in `target_dir` (workspace's build directory). - let mut cmd = cargo_build_cmd(&target_dir); - cmd.args(args); - cmd.current_dir(dir); - run_checking_status(cmd)?; + let absolute_dir = Path::new(dir).canonicalize()?; + + let chain_id_path = absolute_dir.join("wallet-contract/src/CHAIN_ID"); + let chain_id_content = std::fs::read(&chain_id_path).context("Failed to read CHAIN_ID file")?; + + // Update the chain id before building + std::fs::write(&chain_id_path, chain_id.to_string().into_bytes())?; + docker_build(absolute_dir.to_str().expect("path should be valid UTF-8"))?; + + // Restore chain id file to original value after building + std::fs::write(&chain_id_path, chain_id_content)?; let build_artifact_path = - format!("wasm32-unknown-unknown/release/{}.wasm", dir.replace('-', "_")); - let src = target_dir.join(build_artifact_path); - let wasm_target_path = format!("./res/{}.wasm", output); + format!("target/wasm32-unknown-unknown/release/{}.wasm", contract_name); + let src = absolute_dir.join(build_artifact_path); std::fs::copy(&src, &wasm_target_path) .with_context(|| format!("Failed to copy `{}` to `{}`", src.display(), wasm_target_path))?; - - println!("cargo:rerun-if-changed={}", dir); Ok(()) } @@ -39,25 +87,25 @@ fn build_contract(dir: &str, args: &[&str], output: &str) -> Result<()> { /// of the global `cargo build` process that already has some flags set. /// `env_remove` invocations will remove these flags from the nested `cargo build` /// process, to avoid unexpected behaviors due to the workspace configurations. -// TODO(eth-implicit) Change it to have a reproducible hash of the WASM file. -// see https://github.com/near/nearcore/pull/10269#discussion_r1430139987. -fn cargo_build_cmd(target_dir: &Path) -> Command { - let mut res = Command::new("cargo"); - - res.env_remove("CARGO_BUILD_RUSTFLAGS"); - res.env_remove("CARGO_ENCODED_RUSTFLAGS"); - res.env_remove("RUSTC_WORKSPACE_WRAPPER"); - - res.env("RUSTFLAGS", "-Dwarnings"); - res.env("CARGO_TARGET_DIR", target_dir); - - res.args(["build", "--target=wasm32-unknown-unknown", "--release"]); - - res -} +fn docker_build(host_path: &str) -> anyhow::Result<()> { + let volume_arg = format!("{host_path}:/host"); + let image_name = format!("nearprotocol/contract-builder:master-{IMAGE_TAG}-amd64"); -fn run_checking_status(mut cmd: Command) -> Result<()> { - cmd.status().with_context(|| format!("Failed to run command `{cmd:?}`")).and_then(|status| { + let mut cmd = Command::new("docker"); + let status = cmd + .args([ + "run", + "--volume", + &volume_arg, + "-w", + "/host", + "-i", + "--rm", + &image_name, + "./docker-entrypoint.sh", + ]) + .status(); + status.with_context(|| format!("Failed to run command `{cmd:?}`")).and_then(|status| { if status.success() { Ok(()) } else { diff --git a/runtime/near-wallet-contract/implementation/Cargo.lock b/runtime/near-wallet-contract/implementation/Cargo.lock new file mode 100644 index 00000000000..4c9e0d51d8f --- /dev/null +++ b/runtime/near-wallet-contract/implementation/Cargo.lock @@ -0,0 +1,5173 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "Inflector" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" + +[[package]] +name = "actix" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb72882332b6d6282f428b77ba0358cb2687e61a6f6df6a6d3871e8a177c2d4f" +dependencies = [ + "actix-macros", + "actix-rt", + "actix_derive", + "bitflags 2.5.0", + "bytes", + "crossbeam-channel", + "futures-core", + "futures-sink", + "futures-task", + "futures-util", + "log", + "once_cell", + "parking_lot", + "pin-project-lite", + "smallvec", + "tokio", + "tokio-util 0.7.10", +] + +[[package]] +name = "actix-macros" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e01ed3140b2f8d422c68afa1ed2e85d996ea619c988ac834d255db32138655cb" +dependencies = [ + "quote", + "syn 2.0.53", +] + +[[package]] +name = "actix-rt" +version = "2.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28f32d40287d3f402ae0028a9d54bef51af15c8769492826a69d28f81893151d" +dependencies = [ + "futures-core", + "tokio", +] + +[[package]] +name = "actix_derive" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c7db3d5a9718568e4cf4a537cfd7070e6e6ff7481510d0237fb529ac850f6d3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.53", +] + +[[package]] +name = "addr2line" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" +dependencies = [ + "gimli 0.28.1", +] + +[[package]] +name = "adler" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" + +[[package]] +name = "aes" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" +dependencies = [ + "cfg-if 1.0.0", + "cipher 0.4.4", + "cpufeatures", +] + +[[package]] +name = "ahash" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" +dependencies = [ + "getrandom 0.2.12", + "once_cell", + "version_check", +] + +[[package]] +name = "aho-corasick" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" +dependencies = [ + "memchr", +] + +[[package]] +name = "android-tzdata" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" + +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + +[[package]] +name = "anstream" +version = "0.6.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d96bd03f33fe50a863e394ee9718a706f988b9079b20c3784fb726e7678b62fb" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc" + +[[package]] +name = "anstyle-parse" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c75ac65da39e5fe5ab759307499ddad880d724eed2f6ce5b5e8a26f4f387928c" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e28923312444cdd728e4738b3f9c9cac739500909bb3d3c94b43551b16517648" +dependencies = [ + "windows-sys 0.52.0", +] + +[[package]] +name = "anstyle-wincon" +version = "3.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7" +dependencies = [ + "anstyle", + "windows-sys 0.52.0", +] + +[[package]] +name = "anyhow" +version = "1.0.81" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0952808a6c2afd1aa8947271f3a60f1a6763c7b912d210184c5149b5cf147247" + +[[package]] +name = "arbitrary" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d5a26814d8dcb93b0e5a0ff3c6d80a8843bafb21b39e8e18a6f05471870e110" +dependencies = [ + "derive_arbitrary", +] + +[[package]] +name = "arrayref" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" + +[[package]] +name = "arrayvec" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" + +[[package]] +name = "assert_matches" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" + +[[package]] +name = "async-stream" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" +dependencies = [ + "async-stream-impl", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.53", +] + +[[package]] +name = "async-trait" +version = "0.1.78" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "461abc97219de0eaaf81fe3ef974a540158f3d079c2ab200f891f1a2ef201e85" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.53", +] + +[[package]] +name = "atty" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +dependencies = [ + "hermit-abi 0.1.19", + "libc", + "winapi", +] + +[[package]] +name = "aurora-engine-modexp" +version = "1.0.0" +source = "git+https://github.com/aurora-is-near/aurora-engine.git?rev=c03a2d8610cd27a9decb91b3bddb107db2177b29#c03a2d8610cd27a9decb91b3bddb107db2177b29" +dependencies = [ + "hex", + "num", +] + +[[package]] +name = "aurora-engine-precompiles" +version = "1.0.0" +source = "git+https://github.com/aurora-is-near/aurora-engine.git?rev=c03a2d8610cd27a9decb91b3bddb107db2177b29#c03a2d8610cd27a9decb91b3bddb107db2177b29" +dependencies = [ + "aurora-engine-modexp", + "aurora-engine-sdk", + "aurora-engine-types", + "ethabi", + "evm", + "hex", + "libsecp256k1", + "num", + "ripemd", + "sha2 0.10.8", + "sha3", + "zeropool-bn", +] + +[[package]] +name = "aurora-engine-sdk" +version = "1.0.0" +source = "git+https://github.com/aurora-is-near/aurora-engine.git?rev=c03a2d8610cd27a9decb91b3bddb107db2177b29#c03a2d8610cd27a9decb91b3bddb107db2177b29" +dependencies = [ + "aurora-engine-types", + "base64 0.21.7", + "sha2 0.10.8", + "sha3", +] + +[[package]] +name = "aurora-engine-transactions" +version = "1.0.0" +source = "git+https://github.com/aurora-is-near/aurora-engine.git?rev=c03a2d8610cd27a9decb91b3bddb107db2177b29#c03a2d8610cd27a9decb91b3bddb107db2177b29" +dependencies = [ + "aurora-engine-precompiles", + "aurora-engine-sdk", + "aurora-engine-types", + "evm", + "rlp", +] + +[[package]] +name = "aurora-engine-types" +version = "1.0.0" +source = "git+https://github.com/aurora-is-near/aurora-engine.git?rev=c03a2d8610cd27a9decb91b3bddb107db2177b29#c03a2d8610cd27a9decb91b3bddb107db2177b29" +dependencies = [ + "base64 0.21.7", + "borsh 1.3.1", + "bs58 0.5.1", + "hex", + "primitive-types 0.12.2", + "rlp", + "serde", + "serde_json", +] + +[[package]] +name = "auto_impl" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.53", +] + +[[package]] +name = "autocfg" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" + +[[package]] +name = "backtrace" +version = "0.3.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" +dependencies = [ + "addr2line", + "cc", + "cfg-if 1.0.0", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", +] + +[[package]] +name = "base64" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" + +[[package]] +name = "base64" +version = "0.21.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" + +[[package]] +name = "base64ct" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" + +[[package]] +name = "binary-install" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93bff426ff93f3610dd2b946f3eb8cb2d1285ca8682834d43be531a3f93db2ff" +dependencies = [ + "anyhow", + "dirs-next", + "flate2", + "fs2", + "hex", + "is_executable", + "siphasher", + "tar", + "ureq", + "zip 0.6.6", +] + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bitflags" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" + +[[package]] +name = "bitvec" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" +dependencies = [ + "funty", + "radium", + "tap", + "wyz", +] + +[[package]] +name = "blake2" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a4e37d16930f5459780f5621038b6382b9bb37c19016f39fb6b5808d831f174" +dependencies = [ + "crypto-mac", + "digest 0.9.0", + "opaque-debug", +] + +[[package]] +name = "block-buffer" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" +dependencies = [ + "generic-array", +] + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array", +] + +[[package]] +name = "borsh" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15bf3650200d8bffa99015595e10f1fbd17de07abbc25bb067da79e769939bfa" +dependencies = [ + "borsh-derive 0.9.3", + "hashbrown 0.11.2", +] + +[[package]] +name = "borsh" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f58b559fd6448c6e2fd0adb5720cd98a2506594cafa4737ff98c396f3e82f667" +dependencies = [ + "borsh-derive 1.3.1", + "cfg_aliases", +] + +[[package]] +name = "borsh-derive" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6441c552f230375d18e3cc377677914d2ca2b0d36e52129fe15450a2dce46775" +dependencies = [ + "borsh-derive-internal", + "borsh-schema-derive-internal", + "proc-macro-crate 0.1.5", + "proc-macro2", + "syn 1.0.109", +] + +[[package]] +name = "borsh-derive" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7aadb5b6ccbd078890f6d7003694e33816e6b784358f18e15e7e6d9f065a57cd" +dependencies = [ + "once_cell", + "proc-macro-crate 3.1.0", + "proc-macro2", + "quote", + "syn 2.0.53", + "syn_derive", +] + +[[package]] +name = "borsh-derive-internal" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5449c28a7b352f2d1e592a8a28bf139bc71afb0764a14f3c02500935d8c44065" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "borsh-schema-derive-internal" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdbd5696d8bfa21d53d9fe39a714a18538bad11492a42d066dbbc395fb1951c0" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "brownstone" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "030ea61398f34f1395ccbeb046fb68c87b631d1f34567fed0f0f11fa35d18d8d" +dependencies = [ + "arrayvec", +] + +[[package]] +name = "bs58" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" + +[[package]] +name = "bs58" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf88ba1141d185c399bee5288d850d63b8369520c1eafc32a0430b5b6c287bf4" +dependencies = [ + "sha2 0.10.8", + "tinyvec", +] + +[[package]] +name = "bumpalo" +version = "3.15.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ff69b9dd49fd426c69a0db9fc04dd934cdb6645ff000864d98f7e2af8830eaa" + +[[package]] +name = "byte-slice-cast" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "bytes" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" + +[[package]] +name = "bytesize" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3e368af43e418a04d52505cf3dbc23dda4e3407ae2fa99fd0e4f308ce546acc" +dependencies = [ + "serde", +] + +[[package]] +name = "bzip2" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bdb116a6ef3f6c3698828873ad02c3014b3c85cadb88496095628e3ef1e347f8" +dependencies = [ + "bzip2-sys", + "libc", +] + +[[package]] +name = "bzip2-sys" +version = "0.1.11+1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "736a955f3fa7875102d57c82b8cac37ec45224a07fd32d58f9f7a186b6cd4cdc" +dependencies = [ + "cc", + "libc", + "pkg-config", +] + +[[package]] +name = "c2-chacha" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d27dae93fe7b1e0424dc57179ac396908c26b035a87234809f5c4dfd1b47dc80" +dependencies = [ + "cipher 0.2.5", + "ppv-lite86", +] + +[[package]] +name = "camino" +version = "1.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c59e92b5a388f549b863a7bea62612c09f24c8393560709a54558a9abdfb3b9c" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo-near" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f73eb01da3b6737778d2006645533e75563d1080c64bf714bfb88d3fb0ac09b" +dependencies = [ + "anyhow", + "atty", + "bs58 0.4.0", + "camino", + "cargo_metadata", + "clap 3.2.25", + "colored", + "env_logger", + "libloading", + "log", + "near-abi", + "rustc_version", + "schemars", + "serde_json", + "sha2 0.10.8", + "symbolic-debuginfo", + "zstd", +] + +[[package]] +name = "cargo-platform" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "694c8807f2ae16faecc43dc17d74b3eb042482789fd0eb64b39a2e04e087053f" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo_metadata" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4acbb09d9ee8e23699b9634375c72795d095bf268439da88562cf9b501f181fa" +dependencies = [ + "camino", + "cargo-platform", + "semver", + "serde", + "serde_json", +] + +[[package]] +name = "cc" +version = "1.0.90" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8cd6604a82acf3039f1144f54b8eb34e91ffba622051189e71b781822d5ee1f5" +dependencies = [ + "jobserver", + "libc", +] + +[[package]] +name = "cfg-if" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "cfg_aliases" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" + +[[package]] +name = "chrono" +version = "0.4.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8eaf5903dcbc0a39312feb77df2ff4c76387d591b9fc7b04a238dcf8bb62639a" +dependencies = [ + "android-tzdata", + "iana-time-zone", + "js-sys", + "num-traits", + "serde", + "wasm-bindgen", + "windows-targets 0.52.4", +] + +[[package]] +name = "cipher" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12f8e7987cbd042a63249497f41aed09f8e65add917ea6566effbc56578d6801" +dependencies = [ + "generic-array", +] + +[[package]] +name = "cipher" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" +dependencies = [ + "crypto-common", + "inout", +] + +[[package]] +name = "clap" +version = "3.2.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ea181bf566f71cb9a5d17a59e1871af638180a18fb0035c92ae62b705207123" +dependencies = [ + "atty", + "bitflags 1.3.2", + "clap_derive 3.2.25", + "clap_lex 0.2.4", + "indexmap 1.9.3", + "once_cell", + "strsim 0.10.0", + "termcolor", + "textwrap", +] + +[[package]] +name = "clap" +version = "4.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "949626d00e063efc93b6dca932419ceb5432f99769911c0b995f7e884c778813" +dependencies = [ + "clap_builder", + "clap_derive 4.5.3", +] + +[[package]] +name = "clap_builder" +version = "4.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae129e2e766ae0ec03484e609954119f123cc1fe650337e155d03b022f24f7b4" +dependencies = [ + "anstream", + "anstyle", + "clap_lex 0.7.0", + "strsim 0.11.0", +] + +[[package]] +name = "clap_derive" +version = "3.2.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae6371b8bdc8b7d3959e9cf7b22d4435ef3e79e138688421ec654acf8c81b008" +dependencies = [ + "heck 0.4.1", + "proc-macro-error", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "clap_derive" +version = "4.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90239a040c80f5e14809ca132ddc4176ab33d5e17e49691793296e3fcb34d72f" +dependencies = [ + "heck 0.5.0", + "proc-macro2", + "quote", + "syn 2.0.53", +] + +[[package]] +name = "clap_lex" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5" +dependencies = [ + "os_str_bytes", +] + +[[package]] +name = "clap_lex" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" + +[[package]] +name = "colorchoice" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" + +[[package]] +name = "colored" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbf2150cce219b664a8a70df7a1f933836724b503f8a413af9365b4dcc4d90b8" +dependencies = [ + "lazy_static", + "windows-sys 0.48.0", +] + +[[package]] +name = "constant_time_eq" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" + +[[package]] +name = "convert_case" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" + +[[package]] +name = "core-foundation" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" + +[[package]] +name = "cpufeatures" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" +dependencies = [ + "libc", +] + +[[package]] +name = "crc32fast" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3855a8a784b474f333699ef2bbca9db2c4a1f6d9088a90a2d25b1eb53111eaa" +dependencies = [ + "cfg-if 1.0.0", +] + +[[package]] +name = "crossbeam-channel" +version = "0.5.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab3db02a9c5b5121e1e42fbdb1aeb65f5e02624cc58c43f2884c6ccac0b82f95" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" + +[[package]] +name = "crunchy" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" + +[[package]] +name = "crypto-common" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +dependencies = [ + "generic-array", + "typenum", +] + +[[package]] +name = "crypto-mac" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" +dependencies = [ + "generic-array", + "subtle", +] + +[[package]] +name = "curve25519-dalek" +version = "4.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a677b8922c94e01bdbb12126b0bc852f00447528dee1782229af9c720c3f348" +dependencies = [ + "cfg-if 1.0.0", + "cpufeatures", + "curve25519-dalek-derive", + "digest 0.10.7", + "fiat-crypto", + "platforms", + "rand_core 0.6.4", + "rustc_version", + "subtle", + "zeroize", +] + +[[package]] +name = "curve25519-dalek-derive" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.53", +] + +[[package]] +name = "darling" +version = "0.20.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54e36fcd13ed84ffdfda6f5be89b31287cbb80c439841fe69e04841435464391" +dependencies = [ + "darling_core", + "darling_macro", +] + +[[package]] +name = "darling_core" +version = "0.20.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c2cf1c23a687a1feeb728783b993c4e1ad83d99f351801977dd809b48d0a70f" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim 0.10.0", + "syn 2.0.53", +] + +[[package]] +name = "darling_macro" +version = "0.20.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a668eda54683121533a393014d8692171709ff57a7d61f187b6e782719f8933f" +dependencies = [ + "darling_core", + "quote", + "syn 2.0.53", +] + +[[package]] +name = "debugid" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6ee87af31d84ef885378aebca32be3d682b0e0dc119d5b4860a2c5bb5046730" +dependencies = [ + "uuid", +] + +[[package]] +name = "deranged" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" +dependencies = [ + "powerfmt", + "serde", +] + +[[package]] +name = "derive_arbitrary" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.53", +] + +[[package]] +name = "derive_more" +version = "0.99.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" +dependencies = [ + "convert_case", + "proc-macro2", + "quote", + "rustc_version", + "syn 1.0.109", +] + +[[package]] +name = "digest" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" +dependencies = [ + "generic-array", +] + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer 0.10.4", + "crypto-common", + "subtle", +] + +[[package]] +name = "dirs-next" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1" +dependencies = [ + "cfg-if 1.0.0", + "dirs-sys-next", +] + +[[package]] +name = "dirs-sys-next" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" +dependencies = [ + "libc", + "redox_users", + "winapi", +] + +[[package]] +name = "dmsort" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0bc8fbe9441c17c9f46f75dfe27fa1ddb6c68a461ccaed0481419219d4f10d3" + +[[package]] +name = "dyn-clone" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125" + +[[package]] +name = "easy-ext" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53aff6fdc1b181225acdcb5b14c47106726fd8e486707315b1b138baed68ee31" + +[[package]] +name = "ed25519" +version = "2.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" +dependencies = [ + "signature", +] + +[[package]] +name = "ed25519-dalek" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" +dependencies = [ + "curve25519-dalek", + "ed25519", + "rand_core 0.6.4", + "sha2 0.10.8", + "subtle", +] + +[[package]] +name = "either" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11157ac094ffbdde99aa67b23417ebdd801842852b500e395a45a9c0aac03e4a" + +[[package]] +name = "elementtree" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f6319c9433cf1e95c60c8533978bccf0614f27f03bb4e514253468eeeaa7fe3" +dependencies = [ + "string_cache", + "xml-rs", +] + +[[package]] +name = "encoding_rs" +version = "0.8.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1" +dependencies = [ + "cfg-if 1.0.0", +] + +[[package]] +name = "enum-map" +version = "2.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6866f3bfdf8207509a033af1a75a7b08abda06bbaaeae6669323fd5a097df2e9" +dependencies = [ + "enum-map-derive", +] + +[[package]] +name = "enum-map-derive" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f282cfdfe92516eb26c2af8589c274c7c17681f5ecc03c18255fe741c6aa64eb" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.53", +] + +[[package]] +name = "env_logger" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7" +dependencies = [ + "atty", + "humantime", + "log", + "regex", + "termcolor", +] + +[[package]] +name = "equivalent" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" + +[[package]] +name = "errno" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "eth-address-registrar" +version = "0.1.0" +dependencies = [ + "hex", + "near-sdk", + "serde", +] + +[[package]] +name = "eth-wallet-contract" +version = "0.1.0" +dependencies = [ + "anyhow", + "aurora-engine-transactions", + "aurora-engine-types", + "base64 0.21.7", + "ethabi", + "hex", + "near-crypto 0.21.2", + "near-sdk", + "near-workspaces", + "once_cell", + "rlp", + "serde", + "serde_json", + "sha3", + "tokio", +] + +[[package]] +name = "ethabi" +version = "18.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7413c5f74cc903ea37386a8965a936cbeb334bd270862fdece542c1b2dcbc898" +dependencies = [ + "ethereum-types", + "hex", + "sha3", +] + +[[package]] +name = "ethbloom" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c22d4b5885b6aa2fe5e8b9329fb8d232bf739e434e6b87347c63bdd00c120f60" +dependencies = [ + "crunchy", + "fixed-hash 0.8.0", + "impl-codec", + "impl-rlp", + "scale-info", + "tiny-keccak", +] + +[[package]] +name = "ethereum" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a89fb87a9e103f71b903b80b670200b54cc67a07578f070681f1fffb7396fb7" +dependencies = [ + "bytes", + "ethereum-types", + "hash-db", + "hash256-std-hasher", + "rlp", + "sha3", + "triehash", +] + +[[package]] +name = "ethereum-types" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02d215cbf040552efcbe99a38372fe80ab9d00268e20012b79fcd0f073edd8ee" +dependencies = [ + "ethbloom", + "fixed-hash 0.8.0", + "impl-codec", + "impl-rlp", + "primitive-types 0.12.2", + "scale-info", + "uint", +] + +[[package]] +name = "evm" +version = "0.39.1" +source = "git+https://github.com/aurora-is-near/sputnikvm.git?tag=v0.39.1#0334a09d6b6e83ff3a8da992e33f29ba95e0c9fe" +dependencies = [ + "auto_impl", + "ethereum", + "evm-core", + "evm-gasometer", + "evm-runtime", + "log", + "primitive-types 0.12.2", + "rlp", + "sha3", +] + +[[package]] +name = "evm-core" +version = "0.39.1" +source = "git+https://github.com/aurora-is-near/sputnikvm.git?tag=v0.39.1#0334a09d6b6e83ff3a8da992e33f29ba95e0c9fe" +dependencies = [ + "primitive-types 0.12.2", +] + +[[package]] +name = "evm-gasometer" +version = "0.39.1" +source = "git+https://github.com/aurora-is-near/sputnikvm.git?tag=v0.39.1#0334a09d6b6e83ff3a8da992e33f29ba95e0c9fe" +dependencies = [ + "evm-core", + "evm-runtime", + "primitive-types 0.12.2", +] + +[[package]] +name = "evm-runtime" +version = "0.39.1" +source = "git+https://github.com/aurora-is-near/sputnikvm.git?tag=v0.39.1#0334a09d6b6e83ff3a8da992e33f29ba95e0c9fe" +dependencies = [ + "auto_impl", + "evm-core", + "primitive-types 0.12.2", + "sha3", +] + +[[package]] +name = "fallible-iterator" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" + +[[package]] +name = "fastrand" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" + +[[package]] +name = "fiat-crypto" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1676f435fc1dadde4d03e43f5d62b259e1ce5f40bd4ffb21db2b42ebe59c1382" + +[[package]] +name = "filetime" +version = "0.2.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ee447700ac8aa0b2f2bd7bc4462ad686ba06baa6727ac149a2d6277f0d240fd" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "redox_syscall", + "windows-sys 0.52.0", +] + +[[package]] +name = "fixed-hash" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c" +dependencies = [ + "static_assertions", +] + +[[package]] +name = "fixed-hash" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" +dependencies = [ + "byteorder", + "rustc-hex", + "static_assertions", +] + +[[package]] +name = "fixedbitset" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" + +[[package]] +name = "flate2" +version = "1.0.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46303f565772937ffe1d394a4fac6f411c6013172fadde9dcdb1e147a086940e" +dependencies = [ + "crc32fast", + "miniz_oxide", +] + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + +[[package]] +name = "form_urlencoded" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "fs2" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213" +dependencies = [ + "libc", + "winapi", +] + +[[package]] +name = "funty" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" + +[[package]] +name = "futures" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" + +[[package]] +name = "futures-executor" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-io" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" + +[[package]] +name = "futures-sink" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" + +[[package]] +name = "futures-task" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" + +[[package]] +name = "futures-util" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "pin-utils", + "slab", +] + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", +] + +[[package]] +name = "getrandom" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "wasi 0.9.0+wasi-snapshot-preview1", +] + +[[package]] +name = "getrandom" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "wasi 0.11.0+wasi-snapshot-preview1", +] + +[[package]] +name = "gimli" +version = "0.26.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22030e2c5a68ec659fde1e949a745124b48e6fa8b045b7ed5bd1fe4ccc5c4e5d" +dependencies = [ + "fallible-iterator", + "stable_deref_trait", +] + +[[package]] +name = "gimli" +version = "0.28.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" + +[[package]] +name = "goblin" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7666983ed0dd8d21a6f6576ee00053ca0926fb281a5522577a4dbd0f1b54143" +dependencies = [ + "log", + "plain", + "scroll 0.11.0", +] + +[[package]] +name = "h2" +version = "0.3.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" +dependencies = [ + "bytes", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http", + "indexmap 2.2.5", + "slab", + "tokio", + "tokio-util 0.7.10", + "tracing", +] + +[[package]] +name = "hash-db" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d23bd4e7b5eda0d0f3a307e8b381fdc8ba9000f26fbe912250c0a4cc3956364a" + +[[package]] +name = "hash256-std-hasher" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92c171d55b98633f4ed3860808f004099b36c1cc29c42cfc53aa8591b21efcf2" +dependencies = [ + "crunchy", +] + +[[package]] +name = "hashbrown" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" +dependencies = [ + "ahash", +] + +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" + +[[package]] +name = "hashbrown" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e57fa0ae458eb99874f54c09f4f9174f8b45fb87e854536a4e608696247f0c23" + +[[package]] +name = "heck" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" +dependencies = [ + "unicode-segmentation", +] + +[[package]] +name = "heck" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "hermit-abi" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" +dependencies = [ + "libc", +] + +[[package]] +name = "hermit-abi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +dependencies = [ + "serde", +] + +[[package]] +name = "hmac" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "126888268dcc288495a26bf004b38c5fdbb31682f992c84ceb046a1f0fe38840" +dependencies = [ + "crypto-mac", + "digest 0.9.0", +] + +[[package]] +name = "hmac" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +dependencies = [ + "digest 0.10.7", +] + +[[package]] +name = "hmac-drbg" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17ea0a1394df5b6574da6e0c1ade9e78868c9fb0a4e5ef4428e32da4676b85b1" +dependencies = [ + "digest 0.9.0", + "generic-array", + "hmac 0.8.1", +] + +[[package]] +name = "home" +version = "0.5.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" +dependencies = [ + "windows-sys 0.52.0", +] + +[[package]] +name = "http" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http-body" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" +dependencies = [ + "bytes", + "http", + "pin-project-lite", +] + +[[package]] +name = "httparse" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" + +[[package]] +name = "httpdate" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" + +[[package]] +name = "humantime" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" + +[[package]] +name = "hyper" +version = "0.14.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80" +dependencies = [ + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "socket2", + "tokio", + "tower-service", + "tracing", + "want", +] + +[[package]] +name = "hyper-timeout" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" +dependencies = [ + "hyper", + "pin-project-lite", + "tokio", + "tokio-io-timeout", +] + +[[package]] +name = "hyper-tls" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" +dependencies = [ + "bytes", + "hyper", + "native-tls", + "tokio", + "tokio-native-tls", +] + +[[package]] +name = "iana-time-zone" +version = "0.1.60" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "wasm-bindgen", + "windows-core", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + +[[package]] +name = "idna" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + +[[package]] +name = "impl-codec" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" +dependencies = [ + "parity-scale-codec", +] + +[[package]] +name = "impl-rlp" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f28220f89297a075ddc7245cd538076ee98b01f2a9c23a53a4f1105d5a322808" +dependencies = [ + "rlp", +] + +[[package]] +name = "impl-serde" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc88fc67028ae3db0c853baa36269d398d5f45b6982f95549ff5def78c935cd" +dependencies = [ + "serde", +] + +[[package]] +name = "impl-trait-for-tuples" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "indent_write" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0cfe9645a18782869361d9c8732246be7b410ad4e919d3609ebabdac00ba12c3" + +[[package]] +name = "indexmap" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +dependencies = [ + "autocfg", + "hashbrown 0.12.3", + "serde", +] + +[[package]] +name = "indexmap" +version = "2.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b0b929d511467233429c45a44ac1dcaa21ba0f5ba11e4879e6ed28ddb4f9df4" +dependencies = [ + "equivalent", + "hashbrown 0.14.4", + "serde", +] + +[[package]] +name = "inout" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" +dependencies = [ + "generic-array", +] + +[[package]] +name = "ipnet" +version = "2.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" + +[[package]] +name = "is_executable" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "302d553b8abc8187beb7d663e34c065ac4570b273bc9511a50e940e99409c577" +dependencies = [ + "winapi", +] + +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" + +[[package]] +name = "jobserver" +version = "0.1.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab46a6e9526ddef3ae7f787c06f0f2600639ba80ea3eade3d8e670a2230f51d6" +dependencies = [ + "libc", +] + +[[package]] +name = "joinery" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72167d68f5fce3b8655487b8038691a3c9984ee769590f93f2a631f4ad64e4f5" + +[[package]] +name = "js-sys" +version = "0.3.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" +dependencies = [ + "wasm-bindgen", +] + +[[package]] +name = "json-patch" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55ff1e1486799e3f64129f8ccad108b38290df9cd7015cd31bed17239f0789d6" +dependencies = [ + "serde", + "serde_json", + "thiserror", + "treediff", +] + +[[package]] +name = "json_comments" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dbbfed4e59ba9750e15ba154fdfd9329cee16ff3df539c2666b70f58cc32105" + +[[package]] +name = "keccak" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" +dependencies = [ + "cpufeatures", +] + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +dependencies = [ + "spin 0.5.2", +] + +[[package]] +name = "lazycell" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" + +[[package]] +name = "libc" +version = "0.2.153" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" + +[[package]] +name = "libloading" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" +dependencies = [ + "cfg-if 1.0.0", + "winapi", +] + +[[package]] +name = "libredox" +version = "0.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8" +dependencies = [ + "bitflags 2.5.0", + "libc", + "redox_syscall", +] + +[[package]] +name = "libsecp256k1" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95b09eff1b35ed3b33b877ced3a691fc7a481919c7e29c53c906226fcf55e2a1" +dependencies = [ + "arrayref", + "base64 0.13.1", + "digest 0.9.0", + "hmac-drbg", + "libsecp256k1-core", + "libsecp256k1-gen-ecmult", + "libsecp256k1-gen-genmult", + "rand 0.8.5", + "serde", + "sha2 0.9.9", + "typenum", +] + +[[package]] +name = "libsecp256k1-core" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5be9b9bb642d8522a44d533eab56c16c738301965504753b03ad1de3425d5451" +dependencies = [ + "crunchy", + "digest 0.9.0", + "subtle", +] + +[[package]] +name = "libsecp256k1-gen-ecmult" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3038c808c55c87e8a172643a7d87187fc6c4174468159cb3090659d55bcb4809" +dependencies = [ + "libsecp256k1-core", +] + +[[package]] +name = "libsecp256k1-gen-genmult" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3db8d6ba2cec9eacc40e6e8ccc98931840301f1006e95647ceb2dd5c3aa06f7c" +dependencies = [ + "libsecp256k1-core", +] + +[[package]] +name = "linux-raw-sys" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" + +[[package]] +name = "lock_api" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" +dependencies = [ + "autocfg", + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" + +[[package]] +name = "matchers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +dependencies = [ + "regex-automata 0.1.10", +] + +[[package]] +name = "memchr" +version = "2.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" + +[[package]] +name = "memmap2" +version = "0.5.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83faa42c0a078c393f6b29d5db232d8be22776a891f8f56e5284faee4a20b327" +dependencies = [ + "libc", +] + +[[package]] +name = "memoffset" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1" +dependencies = [ + "autocfg", +] + +[[package]] +name = "memory_units" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8452105ba047068f40ff7093dd1d9da90898e63dd61736462e9cdda6a90ad3c3" + +[[package]] +name = "mime" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" + +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + +[[package]] +name = "miniz_oxide" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7" +dependencies = [ + "adler", +] + +[[package]] +name = "mio" +version = "0.8.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" +dependencies = [ + "libc", + "wasi 0.11.0+wasi-snapshot-preview1", + "windows-sys 0.48.0", +] + +[[package]] +name = "multimap" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" + +[[package]] +name = "native-tls" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" +dependencies = [ + "lazy_static", + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework", + "security-framework-sys", + "tempfile", +] + +[[package]] +name = "near-abi" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "885db39b08518fa700b73fa2214e8adbbfba316ba82dd510f50519173eadaf73" +dependencies = [ + "borsh 0.9.3", + "schemars", + "semver", + "serde", +] + +[[package]] +name = "near-account-id" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35cbb989542587b47205e608324ddd391f0cee1c22b4b64ae49f458334b95907" +dependencies = [ + "borsh 1.3.1", + "serde", +] + +[[package]] +name = "near-chain-configs" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d05e5a8ace81c09d7eb165dffc1742358a021b2fa761f2160943305f83216003" +dependencies = [ + "anyhow", + "bytesize", + "chrono", + "derive_more", + "near-config-utils 0.20.1", + "near-crypto 0.20.1", + "near-parameters", + "near-primitives", + "num-rational 0.3.2", + "once_cell", + "serde", + "serde_json", + "sha2 0.10.8", + "smart-default", + "tracing", +] + +[[package]] +name = "near-config-utils" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ae1eaab1d545a9be7a55b6ef09f365c2017f93a03063547591d12c0c6d27e58" +dependencies = [ + "anyhow", + "json_comments", + "thiserror", + "tracing", +] + +[[package]] +name = "near-config-utils" +version = "0.21.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae1c9ff519efa8c778d341fa34971dee93e8adf4e8ae51feaefaa63bdf7e496a" +dependencies = [ + "anyhow", + "json_comments", + "thiserror", + "tracing", +] + +[[package]] +name = "near-crypto" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2991d2912218a80ec0733ac87f84fa803accea105611eea209d4419271957667" +dependencies = [ + "blake2", + "borsh 1.3.1", + "bs58 0.4.0", + "c2-chacha", + "curve25519-dalek", + "derive_more", + "ed25519-dalek", + "hex", + "near-account-id", + "near-config-utils 0.20.1", + "near-stdx 0.20.1", + "once_cell", + "primitive-types 0.10.1", + "rand 0.7.3", + "secp256k1", + "serde", + "serde_json", + "subtle", + "thiserror", +] + +[[package]] +name = "near-crypto" +version = "0.21.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d927e95742aea981b9fd60996fbeba3b61e90acafd54c2c3c2a4ed40065ff03" +dependencies = [ + "blake2", + "borsh 1.3.1", + "bs58 0.4.0", + "c2-chacha", + "curve25519-dalek", + "derive_more", + "ed25519-dalek", + "hex", + "near-account-id", + "near-config-utils 0.21.2", + "near-stdx 0.21.2", + "once_cell", + "primitive-types 0.10.1", + "rand 0.7.3", + "secp256k1", + "serde", + "serde_json", + "subtle", + "thiserror", +] + +[[package]] +name = "near-fmt" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7d998dfc1e04001608899b2498ad5a782c7d036b73769d510de21964db99286" +dependencies = [ + "near-primitives-core", +] + +[[package]] +name = "near-gas" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14e75c875026229902d065e4435804497337b631ec69ba746b102954273e9ad1" +dependencies = [ + "borsh 1.3.1", + "schemars", + "serde", +] + +[[package]] +name = "near-jsonrpc-client" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18ad81e015f7aced8925d5b9ba3f369b36da9575c15812cfd0786bc1213284ca" +dependencies = [ + "borsh 1.3.1", + "lazy_static", + "log", + "near-chain-configs", + "near-crypto 0.20.1", + "near-jsonrpc-primitives", + "near-primitives", + "reqwest", + "serde", + "serde_json", + "thiserror", +] + +[[package]] +name = "near-jsonrpc-primitives" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0ce745e954ae776eef05957602e638ee9581106a3675946fb43c2fe7e38ef03" +dependencies = [ + "arbitrary", + "near-chain-configs", + "near-crypto 0.20.1", + "near-primitives", + "near-rpc-error-macro", + "serde", + "serde_json", + "thiserror", +] + +[[package]] +name = "near-o11y" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d20762631bc8253030013bbae9b5f0542691edc1aa6722f1e8141cc9b928ae5b" +dependencies = [ + "actix", + "base64 0.21.7", + "clap 4.5.3", + "near-crypto 0.20.1", + "near-fmt", + "near-primitives-core", + "once_cell", + "opentelemetry", + "opentelemetry-otlp", + "opentelemetry-semantic-conventions", + "prometheus", + "serde", + "serde_json", + "strum 0.24.1", + "thiserror", + "tokio", + "tracing", + "tracing-appender", + "tracing-opentelemetry", + "tracing-subscriber", +] + +[[package]] +name = "near-parameters" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9f16a59b6c3e69b0585be951af6fe42a0ba86c0e207cb8c63badd19efd16680" +dependencies = [ + "assert_matches", + "borsh 1.3.1", + "enum-map", + "near-account-id", + "near-primitives-core", + "num-rational 0.3.2", + "serde", + "serde_repr", + "serde_yaml", + "strum 0.24.1", + "thiserror", +] + +[[package]] +name = "near-primitives" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0462b067732132babcc89d5577db3bfcb0a1bcfbaaed3f2db4c11cd033666314" +dependencies = [ + "arbitrary", + "base64 0.21.7", + "borsh 1.3.1", + "bytesize", + "cfg-if 1.0.0", + "chrono", + "derive_more", + "easy-ext", + "enum-map", + "hex", + "near-crypto 0.20.1", + "near-fmt", + "near-o11y", + "near-parameters", + "near-primitives-core", + "near-rpc-error-macro", + "near-stdx 0.20.1", + "near-vm-runner", + "num-rational 0.3.2", + "once_cell", + "primitive-types 0.10.1", + "rand 0.8.5", + "rand_chacha 0.3.1", + "reed-solomon-erasure", + "serde", + "serde_json", + "serde_with", + "serde_yaml", + "sha3", + "smart-default", + "strum 0.24.1", + "thiserror", + "time", + "tracing", +] + +[[package]] +name = "near-primitives-core" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8443eb718606f572c438be6321a097a8ebd69f8e48d953885b4f16601af88225" +dependencies = [ + "arbitrary", + "base64 0.21.7", + "borsh 1.3.1", + "bs58 0.4.0", + "derive_more", + "enum-map", + "near-account-id", + "num-rational 0.3.2", + "serde", + "serde_repr", + "serde_with", + "sha2 0.10.8", + "strum 0.24.1", + "thiserror", +] + +[[package]] +name = "near-rpc-error-core" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "80fca203c51edd9595ec14db1d13359fb9ede32314990bf296b6c5c4502f6ab7" +dependencies = [ + "quote", + "serde", + "syn 2.0.53", +] + +[[package]] +name = "near-rpc-error-macro" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "897a445de2102f6732c8a185d922f5e3bf7fd0a41243ce40854df2197237f799" +dependencies = [ + "fs2", + "near-rpc-error-core", + "serde", + "syn 2.0.53", +] + +[[package]] +name = "near-sandbox-utils" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2de216bb0152bfb64f59016d9e6a5b1ac56dd85f729e5fde08461571e2182c8f" +dependencies = [ + "anyhow", + "binary-install", + "chrono", + "fs2", + "home", + "tokio", +] + +[[package]] +name = "near-sdk" +version = "5.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5c2e7c9524308b1b301cca05d875de13b3b20dc8b92e71f3890b380372e4c88" +dependencies = [ + "base64 0.21.7", + "borsh 1.3.1", + "bs58 0.5.1", + "near-account-id", + "near-gas", + "near-sdk-macros", + "near-sys", + "near-token", + "once_cell", + "serde", + "serde_json", + "wee_alloc", +] + +[[package]] +name = "near-sdk-macros" +version = "5.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1e9b23d9d7757ade258921c9cbc7923542e64d9d3b52a6cd91f746c77cb0a0f" +dependencies = [ + "Inflector", + "darling", + "proc-macro2", + "quote", + "serde", + "serde_json", + "strum 0.26.2", + "strum_macros 0.26.2", + "syn 2.0.53", +] + +[[package]] +name = "near-stdx" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "855fd5540e3b4ff6fedf12aba2db1ee4b371b36f465da1363a6d022b27cb43b8" + +[[package]] +name = "near-stdx" +version = "0.21.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73a697f311c110d0fabae6c8c49ab0a8eff0ec37df82cc860deba92156e77c43" + +[[package]] +name = "near-sys" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "397688591acf8d3ebf2c2485ba32d4b24fc10aad5334e3ad8ec0b7179bfdf06b" + +[[package]] +name = "near-token" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b68f3f8a2409f72b43efdbeff8e820b81e70824c49fee8572979d789d1683fb" +dependencies = [ + "borsh 1.3.1", + "serde", +] + +[[package]] +name = "near-vm-runner" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c56c80bdb1954808f59bd36a9112377197b38d424991383bf05f52d0fe2e0da5" +dependencies = [ + "base64 0.21.7", + "borsh 1.3.1", + "ed25519-dalek", + "enum-map", + "memoffset", + "near-crypto 0.20.1", + "near-parameters", + "near-primitives-core", + "near-stdx 0.20.1", + "num-rational 0.3.2", + "once_cell", + "prefix-sum-vec", + "ripemd", + "serde", + "serde_repr", + "serde_with", + "sha2 0.10.8", + "sha3", + "strum 0.24.1", + "thiserror", + "tracing", + "zeropool-bn", +] + +[[package]] +name = "near-workspaces" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3e597da87d0c1a722e23efb8c24ae42a0ad99a15f37101dad45c15defb051c1" +dependencies = [ + "async-trait", + "base64 0.21.7", + "bs58 0.5.1", + "cargo-near", + "chrono", + "fs2", + "json-patch", + "libc", + "near-account-id", + "near-crypto 0.20.1", + "near-gas", + "near-jsonrpc-client", + "near-jsonrpc-primitives", + "near-primitives", + "near-sandbox-utils", + "near-token", + "rand 0.8.5", + "reqwest", + "serde", + "serde_json", + "sha2 0.10.8", + "tempfile", + "thiserror", + "tokio", + "tokio-retry", + "tracing", + "url", +] + +[[package]] +name = "new_debug_unreachable" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "650eef8c711430f1a879fdd01d4745a7deea475becfb90269c06775983bbf086" + +[[package]] +name = "nom" +version = "7.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +dependencies = [ + "memchr", + "minimal-lexical", +] + +[[package]] +name = "nom-supreme" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aadc66631948f6b65da03be4c4cd8bd104d481697ecbb9bbd65719b1ec60bc9f" +dependencies = [ + "brownstone", + "indent_write", + "joinery", + "memchr", + "nom", +] + +[[package]] +name = "nu-ansi-term" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +dependencies = [ + "overload", + "winapi", +] + +[[package]] +name = "num" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b05180d69e3da0e530ba2a1dae5110317e49e3b7f3d41be227dc5f92e49ee7af" +dependencies = [ + "num-bigint 0.4.4", + "num-complex", + "num-integer", + "num-iter", + "num-rational 0.4.1", + "num-traits", +] + +[[package]] +name = "num-bigint" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f6f7833f2cbf2360a6cfd58cd41a53aa7a90bd4c202f5b1c7dd2ed73c57b2c3" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-bigint" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "608e7659b5c3d7cba262d894801b9ec9d00de989e8a82bd4bef91d08da45cdc0" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-complex" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23c6602fda94a57c990fe0df199a035d83576b496aa29f4e634a8ac6004e68a6" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + +[[package]] +name = "num-integer" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-iter" +version = "0.1.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d869c01cc0c455284163fd0092f1f93835385ccab5a98a0dcc497b2f8bf055a9" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-rational" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12ac428b1cb17fce6f731001d307d351ec70a6d202fc2e60f7d4c5e42d8f4f07" +dependencies = [ + "autocfg", + "num-bigint 0.3.3", + "num-integer", + "num-traits", + "serde", +] + +[[package]] +name = "num-rational" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0638a1c9d0a3c0914158145bc76cff373a75a627e6ecbfb71cbe6f453a5a19b0" +dependencies = [ + "autocfg", + "num-bigint 0.4.4", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" +dependencies = [ + "autocfg", +] + +[[package]] +name = "num_cpus" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" +dependencies = [ + "hermit-abi 0.3.9", + "libc", +] + +[[package]] +name = "object" +version = "0.32.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" +dependencies = [ + "memchr", +] + +[[package]] +name = "once_cell" +version = "1.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" + +[[package]] +name = "opaque-debug" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" + +[[package]] +name = "openssl" +version = "0.10.64" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95a0481286a310808298130d22dd1fef0fa571e05a8f44ec801801e84b216b1f" +dependencies = [ + "bitflags 2.5.0", + "cfg-if 1.0.0", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", +] + +[[package]] +name = "openssl-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.53", +] + +[[package]] +name = "openssl-probe" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" + +[[package]] +name = "openssl-sys" +version = "0.9.101" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dda2b0f344e78efc2facf7d195d098df0dd72151b26ab98da807afc26c198dff" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "opentelemetry" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6105e89802af13fdf48c49d7646d3b533a70e536d818aae7e78ba0433d01acb8" +dependencies = [ + "async-trait", + "crossbeam-channel", + "futures-channel", + "futures-executor", + "futures-util", + "js-sys", + "lazy_static", + "percent-encoding", + "pin-project", + "rand 0.8.5", + "thiserror", + "tokio", + "tokio-stream", +] + +[[package]] +name = "opentelemetry-otlp" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d1a6ca9de4c8b00aa7f1a153bd76cb263287155cec642680d79d98706f3d28a" +dependencies = [ + "async-trait", + "futures", + "futures-util", + "http", + "opentelemetry", + "prost", + "thiserror", + "tokio", + "tonic", + "tonic-build", +] + +[[package]] +name = "opentelemetry-semantic-conventions" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "985cc35d832d412224b2cffe2f9194b1b89b6aa5d0bef76d080dce09d90e62bd" +dependencies = [ + "opentelemetry", +] + +[[package]] +name = "os_str_bytes" +version = "6.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2355d85b9a3786f481747ced0e0ff2ba35213a1f9bd406ed906554d7af805a1" + +[[package]] +name = "overload" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" + +[[package]] +name = "parity-scale-codec" +version = "3.6.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "881331e34fa842a2fb61cc2db9643a8fedc615e47cfcc52597d1af0db9a7e8fe" +dependencies = [ + "arrayvec", + "byte-slice-cast", + "impl-trait-for-tuples", + "parity-scale-codec-derive", +] + +[[package]] +name = "parity-scale-codec-derive" +version = "3.6.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be30eaf4b0a9fba5336683b38de57bb86d179a35862ba6bfcf57625d006bde5b" +dependencies = [ + "proc-macro-crate 2.0.0", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "parking_lot" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "redox_syscall", + "smallvec", + "windows-targets 0.48.5", +] + +[[package]] +name = "password-hash" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7676374caaee8a325c9e7a2ae557f216c5563a171d6997b0ef8a65af35147700" +dependencies = [ + "base64ct", + "rand_core 0.6.4", + "subtle", +] + +[[package]] +name = "pbkdf2" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83a0692ec44e4cf1ef28ca317f14f8f07da2d95ec3fa01f86e4467b725e60917" +dependencies = [ + "digest 0.10.7", + "hmac 0.12.1", + "password-hash", + "sha2 0.10.8", +] + +[[package]] +name = "pdb" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13f4d162ecaaa1467de5afbe62d597757b674b51da8bb4e587430c5fdb2af7aa" +dependencies = [ + "fallible-iterator", + "scroll 0.10.2", + "uuid", +] + +[[package]] +name = "percent-encoding" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" + +[[package]] +name = "petgraph" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" +dependencies = [ + "fixedbitset", + "indexmap 2.2.5", +] + +[[package]] +name = "phf_shared" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6796ad771acdc0123d2a88dc428b5e38ef24456743ddb1744ed628f9815c096" +dependencies = [ + "siphasher", +] + +[[package]] +name = "pin-project" +version = "1.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.53", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "pkg-config" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" + +[[package]] +name = "plain" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4596b6d070b27117e987119b4dac604f3c58cfb0b191112e24771b2faeac1a6" + +[[package]] +name = "platforms" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "626dec3cac7cc0e1577a2ec3fc496277ec2baa084bebad95bb6fdbfae235f84c" + +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + +[[package]] +name = "ppv-lite86" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" + +[[package]] +name = "precomputed-hash" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" + +[[package]] +name = "prefix-sum-vec" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa06bd51638b6e76ac9ba9b6afb4164fa647bd2916d722f2623fbb6d1ed8bdba" + +[[package]] +name = "primitive-types" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05e4722c697a58a99d5d06a08c30821d7c082a4632198de1eaa5a6c22ef42373" +dependencies = [ + "fixed-hash 0.7.0", + "uint", +] + +[[package]] +name = "primitive-types" +version = "0.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b34d9fd68ae0b74a41b21c03c2f62847aa0ffea044eee893b4c140b37e244e2" +dependencies = [ + "fixed-hash 0.8.0", + "impl-codec", + "impl-rlp", + "impl-serde", + "scale-info", + "uint", +] + +[[package]] +name = "proc-macro-crate" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" +dependencies = [ + "toml", +] + +[[package]] +name = "proc-macro-crate" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" +dependencies = [ + "once_cell", + "toml_edit 0.19.15", +] + +[[package]] +name = "proc-macro-crate" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e8366a6159044a37876a2b9817124296703c586a5c92e2c53751fa06d8d43e8" +dependencies = [ + "toml_edit 0.20.7", +] + +[[package]] +name = "proc-macro-crate" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d37c51ca738a55da99dc0c4a34860fd675453b8b36209178c2249bb13651284" +dependencies = [ + "toml_edit 0.21.1", +] + +[[package]] +name = "proc-macro-error" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" +dependencies = [ + "proc-macro-error-attr", + "proc-macro2", + "quote", + "syn 1.0.109", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" +dependencies = [ + "proc-macro2", + "quote", + "version_check", +] + +[[package]] +name = "proc-macro2" +version = "1.0.79" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e835ff2298f5721608eb1a980ecaee1aef2c132bf95ecc026a11b7bf3c01c02e" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "prometheus" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "449811d15fbdf5ceb5c1144416066429cf82316e2ec8ce0c1f6f8a02e7bbcf8c" +dependencies = [ + "cfg-if 1.0.0", + "fnv", + "lazy_static", + "memchr", + "parking_lot", + "protobuf", + "thiserror", +] + +[[package]] +name = "prost" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "444879275cb4fd84958b1a1d5420d15e6fcf7c235fe47f053c9c2a80aceb6001" +dependencies = [ + "bytes", + "prost-derive", +] + +[[package]] +name = "prost-build" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62941722fb675d463659e49c4f3fe1fe792ff24fe5bbaa9c08cd3b98a1c354f5" +dependencies = [ + "bytes", + "heck 0.3.3", + "itertools", + "lazy_static", + "log", + "multimap", + "petgraph", + "prost", + "prost-types", + "regex", + "tempfile", + "which", +] + +[[package]] +name = "prost-derive" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9cc1a3263e07e0bf68e96268f37665207b49560d98739662cdfaae215c720fe" +dependencies = [ + "anyhow", + "itertools", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "prost-types" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "534b7a0e836e3c482d2693070f982e39e7611da9695d4d1f5a4b186b51faef0a" +dependencies = [ + "bytes", + "prost", +] + +[[package]] +name = "protobuf" +version = "2.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94" + +[[package]] +name = "quote" +version = "1.0.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "radium" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" + +[[package]] +name = "rand" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" +dependencies = [ + "getrandom 0.1.16", + "libc", + "rand_chacha 0.2.2", + "rand_core 0.5.1", + "rand_hc", +] + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha 0.3.1", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_chacha" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" +dependencies = [ + "ppv-lite86", + "rand_core 0.5.1", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_core" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" +dependencies = [ + "getrandom 0.1.16", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom 0.2.12", +] + +[[package]] +name = "rand_hc" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" +dependencies = [ + "rand_core 0.5.1", +] + +[[package]] +name = "redox_syscall" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" +dependencies = [ + "bitflags 1.3.2", +] + +[[package]] +name = "redox_users" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a18479200779601e498ada4e8c1e1f50e3ee19deb0259c25825a98b5603b2cb4" +dependencies = [ + "getrandom 0.2.12", + "libredox", + "thiserror", +] + +[[package]] +name = "reed-solomon-erasure" +version = "4.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a415a013dd7c5d4221382329a5a3482566da675737494935cbbbcdec04662f9d" +dependencies = [ + "smallvec", +] + +[[package]] +name = "regex" +version = "1.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata 0.4.6", + "regex-syntax 0.8.2", +] + +[[package]] +name = "regex-automata" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" +dependencies = [ + "regex-syntax 0.6.29", +] + +[[package]] +name = "regex-automata" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax 0.8.2", +] + +[[package]] +name = "regex-syntax" +version = "0.6.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" + +[[package]] +name = "regex-syntax" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" + +[[package]] +name = "reqwest" +version = "0.11.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" +dependencies = [ + "base64 0.21.7", + "bytes", + "encoding_rs", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "hyper", + "hyper-tls", + "ipnet", + "js-sys", + "log", + "mime", + "native-tls", + "once_cell", + "percent-encoding", + "pin-project-lite", + "rustls-pemfile", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper", + "system-configuration", + "tokio", + "tokio-native-tls", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "winreg", +] + +[[package]] +name = "ring" +version = "0.17.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" +dependencies = [ + "cc", + "cfg-if 1.0.0", + "getrandom 0.2.12", + "libc", + "spin 0.9.8", + "untrusted", + "windows-sys 0.52.0", +] + +[[package]] +name = "ripemd" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd124222d17ad93a644ed9d011a40f4fb64aa54275c08cc216524a9ea82fb09f" +dependencies = [ + "digest 0.10.7", +] + +[[package]] +name = "rlp" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb919243f34364b6bd2fc10ef797edbfa75f33c252e7998527479c6d6b47e1ec" +dependencies = [ + "bytes", + "rlp-derive", + "rustc-hex", +] + +[[package]] +name = "rlp-derive" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e33d7b2abe0c340d8797fe2907d3f20d3b5ea5908683618bfe80df7f621f672a" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" + +[[package]] +name = "rustc-hex" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" + +[[package]] +name = "rustc_version" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +dependencies = [ + "semver", +] + +[[package]] +name = "rustix" +version = "0.38.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ea3e1a662af26cd7a3ba09c0297a31af215563ecf42817c98df621387f4e949" +dependencies = [ + "bitflags 2.5.0", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.52.0", +] + +[[package]] +name = "rustls" +version = "0.22.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e87c9956bd9807afa1f77e0f7594af32566e830e088a5576d27c5b6f30f49d41" +dependencies = [ + "log", + "ring", + "rustls-pki-types", + "rustls-webpki", + "subtle", + "zeroize", +] + +[[package]] +name = "rustls-pemfile" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" +dependencies = [ + "base64 0.21.7", +] + +[[package]] +name = "rustls-pki-types" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ede67b28608b4c60685c7d54122d4400d90f62b40caee7700e700380a390fa8" + +[[package]] +name = "rustls-webpki" +version = "0.102.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "faaa0a62740bedb9b2ef5afa303da42764c012f743917351dc9a237ea1663610" +dependencies = [ + "ring", + "rustls-pki-types", + "untrusted", +] + +[[package]] +name = "rustversion" +version = "1.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" + +[[package]] +name = "ryu" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e86697c916019a8588c99b5fac3cead74ec0b4b819707a682fd4d23fa0ce1ba1" + +[[package]] +name = "scale-info" +version = "2.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ef2175c2907e7c8bc0a9c3f86aeb5ec1f3b275300ad58a44d0c3ae379a5e52e" +dependencies = [ + "cfg-if 1.0.0", + "derive_more", + "parity-scale-codec", + "scale-info-derive", +] + +[[package]] +name = "scale-info-derive" +version = "2.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "634d9b8eb8fd61c5cdd3390d9b2132300a7e7618955b98b8416f118c1b4e144f" +dependencies = [ + "proc-macro-crate 1.3.1", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "schannel" +version = "0.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" +dependencies = [ + "windows-sys 0.52.0", +] + +[[package]] +name = "schemars" +version = "0.8.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45a28f4c49489add4ce10783f7911893516f15afe45d015608d41faca6bc4d29" +dependencies = [ + "dyn-clone", + "schemars_derive", + "serde", + "serde_json", +] + +[[package]] +name = "schemars_derive" +version = "0.8.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c767fd6fa65d9ccf9cf026122c1b555f2ef9a4f0cea69da4d7dbc3e258d30967" +dependencies = [ + "proc-macro2", + "quote", + "serde_derive_internals", + "syn 1.0.109", +] + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "scroll" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fda28d4b4830b807a8b43f7b0e6b5df875311b3e7621d84577188c175b6ec1ec" + +[[package]] +name = "scroll" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04c565b551bafbef4157586fa379538366e4385d42082f255bfd96e4fe8519da" +dependencies = [ + "scroll_derive", +] + +[[package]] +name = "scroll_derive" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1db149f81d46d2deba7cd3c50772474707729550221e69588478ebf9ada425ae" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.53", +] + +[[package]] +name = "secp256k1" +version = "0.27.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25996b82292a7a57ed3508f052cfff8640d38d32018784acd714758b43da9c8f" +dependencies = [ + "rand 0.8.5", + "secp256k1-sys", +] + +[[package]] +name = "secp256k1-sys" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70a129b9e9efbfb223753b9163c4ab3b13cff7fd9c7f010fbac25ab4099fa07e" +dependencies = [ + "cc", +] + +[[package]] +name = "security-framework" +version = "2.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" +dependencies = [ + "bitflags 1.3.2", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "semver" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca" +dependencies = [ + "serde", +] + +[[package]] +name = "serde" +version = "1.0.197" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.197" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.53", +] + +[[package]] +name = "serde_derive_internals" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85bf8229e7920a9f636479437026331ce11aa132b4dde37d121944a44d6e5f3c" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "serde_json" +version = "1.0.114" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c5f09b1bd632ef549eaa9f60a1f8de742bdbc698e6cee2095fc84dde5f549ae0" +dependencies = [ + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "serde_repr" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b2e6b945e9d3df726b65d6ee24060aff8e3533d431f677a9695db04eff9dfdb" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.53", +] + +[[package]] +name = "serde_urlencoded" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "serde_with" +version = "3.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee80b0e361bbf88fd2f6e242ccd19cfda072cb0faa6ae694ecee08199938569a" +dependencies = [ + "base64 0.21.7", + "chrono", + "hex", + "indexmap 1.9.3", + "indexmap 2.2.5", + "serde", + "serde_derive", + "serde_json", + "serde_with_macros", + "time", +] + +[[package]] +name = "serde_with_macros" +version = "3.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6561dc161a9224638a31d876ccdfefbc1df91d3f3a8342eddb35f055d48c7655" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn 2.0.53", +] + +[[package]] +name = "serde_yaml" +version = "0.9.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0623d197252096520c6f2a5e1171ee436e5af99a5d7caa2891e55e61950e6d9" +dependencies = [ + "indexmap 2.2.5", + "itoa", + "ryu", + "serde", + "unsafe-libyaml", +] + +[[package]] +name = "sha1" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" +dependencies = [ + "cfg-if 1.0.0", + "cpufeatures", + "digest 0.10.7", +] + +[[package]] +name = "sha2" +version = "0.9.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" +dependencies = [ + "block-buffer 0.9.0", + "cfg-if 1.0.0", + "cpufeatures", + "digest 0.9.0", + "opaque-debug", +] + +[[package]] +name = "sha2" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" +dependencies = [ + "cfg-if 1.0.0", + "cpufeatures", + "digest 0.10.7", +] + +[[package]] +name = "sha3" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" +dependencies = [ + "digest 0.10.7", + "keccak", +] + +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "signal-hook-registry" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" +dependencies = [ + "libc", +] + +[[package]] +name = "signature" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" + +[[package]] +name = "siphasher" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" + +[[package]] +name = "slab" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" +dependencies = [ + "autocfg", +] + +[[package]] +name = "smallvec" +version = "1.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7" + +[[package]] +name = "smart-default" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "133659a15339456eeeb07572eb02a91c91e9815e9cbc89566944d2c8d3efdbf6" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "socket2" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "spin" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" + +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" + +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + +[[package]] +name = "string_cache" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f91138e76242f575eb1d3b38b4f1362f10d3a43f47d182a5b359af488a02293b" +dependencies = [ + "new_debug_unreachable", + "once_cell", + "parking_lot", + "phf_shared", + "precomputed-hash", + "serde", +] + +[[package]] +name = "strsim" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" + +[[package]] +name = "strsim" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ee073c9e4cd00e28217186dbe12796d692868f432bf2e97ee73bed0c56dfa01" + +[[package]] +name = "strum" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "063e6045c0e62079840579a7e47a355ae92f60eb74daaf156fb1e84ba164e63f" +dependencies = [ + "strum_macros 0.24.3", +] + +[[package]] +name = "strum" +version = "0.26.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d8cec3501a5194c432b2b7976db6b7d10ec95c253208b45f83f7136aa985e29" + +[[package]] +name = "strum_macros" +version = "0.24.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" +dependencies = [ + "heck 0.4.1", + "proc-macro2", + "quote", + "rustversion", + "syn 1.0.109", +] + +[[package]] +name = "strum_macros" +version = "0.26.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6cf59daf282c0a494ba14fd21610a0325f9f90ec9d1231dea26bcb1d696c946" +dependencies = [ + "heck 0.4.1", + "proc-macro2", + "quote", + "rustversion", + "syn 2.0.53", +] + +[[package]] +name = "subtle" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" + +[[package]] +name = "symbolic-common" +version = "8.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f551f902d5642e58039aee6a9021a61037926af96e071816361644983966f540" +dependencies = [ + "debugid", + "memmap2", + "stable_deref_trait", + "uuid", +] + +[[package]] +name = "symbolic-debuginfo" +version = "8.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1165dabf9fc1d6bb6819c2c0e27c8dd0e3068d2c53cf186d319788e96517f0d6" +dependencies = [ + "bitvec", + "dmsort", + "elementtree", + "fallible-iterator", + "flate2", + "gimli 0.26.2", + "goblin", + "lazy_static", + "lazycell", + "nom", + "nom-supreme", + "parking_lot", + "pdb", + "regex", + "scroll 0.11.0", + "serde", + "serde_json", + "smallvec", + "symbolic-common", + "thiserror", + "wasmparser", + "zip 0.5.13", +] + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.53" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7383cd0e49fff4b6b90ca5670bfd3e9d6a733b3f90c686605aa7eec8c4996032" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn_derive" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1329189c02ff984e9736652b1631330da25eaa6bc639089ed4915d25446cbe7b" +dependencies = [ + "proc-macro-error", + "proc-macro2", + "quote", + "syn 2.0.53", +] + +[[package]] +name = "sync_wrapper" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" + +[[package]] +name = "system-configuration" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" +dependencies = [ + "bitflags 1.3.2", + "core-foundation", + "system-configuration-sys", +] + +[[package]] +name = "system-configuration-sys" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "tap" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" + +[[package]] +name = "tar" +version = "0.4.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b16afcea1f22891c49a00c751c7b63b2233284064f11a200fc624137c51e2ddb" +dependencies = [ + "filetime", + "libc", + "xattr", +] + +[[package]] +name = "tempfile" +version = "3.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" +dependencies = [ + "cfg-if 1.0.0", + "fastrand", + "rustix", + "windows-sys 0.52.0", +] + +[[package]] +name = "termcolor" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "textwrap" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23d434d3f8967a09480fb04132ebe0a3e088c173e6d0ee7897abbdf4eab0f8b9" + +[[package]] +name = "thiserror" +version = "1.0.58" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03468839009160513471e86a034bb2c5c0e4baae3b43f79ffc55c4a5427b3297" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.58" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.53", +] + +[[package]] +name = "thread_local" +version = "1.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" +dependencies = [ + "cfg-if 1.0.0", + "once_cell", +] + +[[package]] +name = "time" +version = "0.3.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8248b6521bb14bc45b4067159b9b6ad792e2d6d754d6c41fb50e29fefe38749" +dependencies = [ + "deranged", + "itoa", + "num-conv", + "powerfmt", + "serde", + "time-core", + "time-macros", +] + +[[package]] +name = "time-core" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" + +[[package]] +name = "time-macros" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ba3a3ef41e6672a2f0f001392bb5dcd3ff0a9992d618ca761a11c3121547774" +dependencies = [ + "num-conv", + "time-core", +] + +[[package]] +name = "tiny-keccak" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" +dependencies = [ + "crunchy", +] + +[[package]] +name = "tinyvec" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + +[[package]] +name = "tokio" +version = "1.36.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61285f6515fa018fb2d1e46eb21223fff441ee8db5d0f1435e8ab4f5cdb80931" +dependencies = [ + "backtrace", + "bytes", + "libc", + "mio", + "num_cpus", + "parking_lot", + "pin-project-lite", + "signal-hook-registry", + "socket2", + "tokio-macros", + "windows-sys 0.48.0", +] + +[[package]] +name = "tokio-io-timeout" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" +dependencies = [ + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tokio-macros" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.53", +] + +[[package]] +name = "tokio-native-tls" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" +dependencies = [ + "native-tls", + "tokio", +] + +[[package]] +name = "tokio-retry" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f57eb36ecbe0fc510036adff84824dd3c24bb781e21bfa67b69d556aa85214f" +dependencies = [ + "pin-project", + "rand 0.8.5", + "tokio", +] + +[[package]] +name = "tokio-stream" +version = "0.1.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tokio-util" +version = "0.6.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36943ee01a6d67977dd3f84a5a1d2efeb4ada3a1ae771cadfaa535d9d9fc6507" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "log", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tokio-util" +version = "0.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", + "tracing", +] + +[[package]] +name = "toml" +version = "0.5.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_datetime" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" + +[[package]] +name = "toml_edit" +version = "0.19.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" +dependencies = [ + "indexmap 2.2.5", + "toml_datetime", + "winnow", +] + +[[package]] +name = "toml_edit" +version = "0.20.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70f427fce4d84c72b5b732388bf4a9f4531b53f74e2887e3ecb2481f68f66d81" +dependencies = [ + "indexmap 2.2.5", + "toml_datetime", + "winnow", +] + +[[package]] +name = "toml_edit" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" +dependencies = [ + "indexmap 2.2.5", + "toml_datetime", + "winnow", +] + +[[package]] +name = "tonic" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff08f4649d10a70ffa3522ca559031285d8e421d727ac85c60825761818f5d0a" +dependencies = [ + "async-stream", + "async-trait", + "base64 0.13.1", + "bytes", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "hyper", + "hyper-timeout", + "percent-encoding", + "pin-project", + "prost", + "prost-derive", + "tokio", + "tokio-stream", + "tokio-util 0.6.10", + "tower", + "tower-layer", + "tower-service", + "tracing", + "tracing-futures", +] + +[[package]] +name = "tonic-build" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9403f1bafde247186684b230dc6f38b5cd514584e8bec1dd32514be4745fa757" +dependencies = [ + "proc-macro2", + "prost-build", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "indexmap 1.9.3", + "pin-project", + "pin-project-lite", + "rand 0.8.5", + "slab", + "tokio", + "tokio-util 0.7.10", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-layer" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" + +[[package]] +name = "tower-service" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" + +[[package]] +name = "tracing" +version = "0.1.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" +dependencies = [ + "log", + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-appender" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3566e8ce28cc0a3fe42519fc80e6b4c943cc4c8cef275620eb8dac2d3d4e06cf" +dependencies = [ + "crossbeam-channel", + "thiserror", + "time", + "tracing-subscriber", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.53", +] + +[[package]] +name = "tracing-core" +version = "0.1.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" +dependencies = [ + "once_cell", + "valuable", +] + +[[package]] +name = "tracing-futures" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" +dependencies = [ + "pin-project", + "tracing", +] + +[[package]] +name = "tracing-log" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f751112709b4e791d8ce53e32c4ed2d353565a795ce84da2285393f41557bdf2" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-opentelemetry" +version = "0.17.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbbe89715c1dbbb790059e2565353978564924ee85017b5fff365c872ff6721f" +dependencies = [ + "once_cell", + "opentelemetry", + "tracing", + "tracing-core", + "tracing-log 0.1.4", + "tracing-subscriber", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" +dependencies = [ + "matchers", + "nu-ansi-term", + "once_cell", + "regex", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log 0.2.0", +] + +[[package]] +name = "treediff" +version = "4.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d127780145176e2b5d16611cc25a900150e86e9fd79d3bde6ff3a37359c9cb5" +dependencies = [ + "serde_json", +] + +[[package]] +name = "triehash" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1631b201eb031b563d2e85ca18ec8092508e262a3196ce9bd10a67ec87b9f5c" +dependencies = [ + "hash-db", + "rlp", +] + +[[package]] +name = "try-lock" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + +[[package]] +name = "typenum" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" + +[[package]] +name = "uint" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76f64bba2c53b04fcab63c01a7d7427eadc821e3bc48c34dc9ba29c501164b52" +dependencies = [ + "byteorder", + "crunchy", + "hex", + "static_assertions", +] + +[[package]] +name = "unicode-bidi" +version = "0.3.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" + +[[package]] +name = "unicode-ident" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" + +[[package]] +name = "unicode-normalization" +version = "0.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" +dependencies = [ + "tinyvec", +] + +[[package]] +name = "unicode-segmentation" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202" + +[[package]] +name = "unsafe-libyaml" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" + +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + +[[package]] +name = "ureq" +version = "2.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11f214ce18d8b2cbe84ed3aa6486ed3f5b285cf8d8fbdbce9f3f767a724adc35" +dependencies = [ + "base64 0.21.7", + "flate2", + "log", + "once_cell", + "rustls", + "rustls-pki-types", + "rustls-webpki", + "url", + "webpki-roots", +] + +[[package]] +name = "url" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", + "serde", +] + +[[package]] +name = "utf8parse" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" + +[[package]] +name = "uuid" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" + +[[package]] +name = "valuable" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" + +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + +[[package]] +name = "version_check" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" + +[[package]] +name = "want" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +dependencies = [ + "try-lock", +] + +[[package]] +name = "wasi" +version = "0.9.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "wasm-bindgen" +version = "0.2.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" +dependencies = [ + "cfg-if 1.0.0", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" +dependencies = [ + "bumpalo", + "log", + "once_cell", + "proc-macro2", + "quote", + "syn 2.0.53", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" +dependencies = [ + "cfg-if 1.0.0", + "js-sys", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.53", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" + +[[package]] +name = "wasmparser" +version = "0.83.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "718ed7c55c2add6548cca3ddd6383d738cd73b892df400e96b9aa876f0141d7a" + +[[package]] +name = "web-sys" +version = "0.3.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "webpki-roots" +version = "0.26.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3de34ae270483955a94f4b21bdaaeb83d508bb84a01435f393818edb0012009" +dependencies = [ + "rustls-pki-types", +] + +[[package]] +name = "wee_alloc" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbb3b5a6b2bb17cb6ad44a2e68a43e8d2722c997da10e928665c72ec6c0a0b8e" +dependencies = [ + "cfg-if 0.1.10", + "libc", + "memory_units", + "winapi", +] + +[[package]] +name = "which" +version = "4.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" +dependencies = [ + "either", + "home", + "once_cell", + "rustix", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596" +dependencies = [ + "winapi", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows-core" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" +dependencies = [ + "windows-targets 0.52.4", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.4", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + +[[package]] +name = "windows-targets" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7dd37b7e5ab9018759f893a1952c9420d060016fc19a472b4bb20d1bdd694d1b" +dependencies = [ + "windows_aarch64_gnullvm 0.52.4", + "windows_aarch64_msvc 0.52.4", + "windows_i686_gnu 0.52.4", + "windows_i686_msvc 0.52.4", + "windows_x86_64_gnu 0.52.4", + "windows_x86_64_gnullvm 0.52.4", + "windows_x86_64_msvc 0.52.4", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bcf46cf4c365c6f2d1cc93ce535f2c8b244591df96ceee75d8e83deb70a9cac9" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da9f259dd3bcf6990b55bffd094c4f7235817ba4ceebde8e6d11cd0c5633b675" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b474d8268f99e0995f25b9f095bc7434632601028cf86590aea5c8a5cb7801d3" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1515e9a29e5bed743cb4415a9ecf5dfca648ce85ee42e15873c3cd8610ff8e02" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5eee091590e89cc02ad514ffe3ead9eb6b660aedca2183455434b93546371a03" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77ca79f2451b49fa9e2af39f0747fe999fcda4f5e241b2898624dca97a1f2177" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8" + +[[package]] +name = "winnow" +version = "0.5.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876" +dependencies = [ + "memchr", +] + +[[package]] +name = "winreg" +version = "0.50.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" +dependencies = [ + "cfg-if 1.0.0", + "windows-sys 0.48.0", +] + +[[package]] +name = "wyz" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" +dependencies = [ + "tap", +] + +[[package]] +name = "xattr" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8da84f1a25939b27f6820d92aed108f83ff920fdf11a7b19366c27c4cda81d4f" +dependencies = [ + "libc", + "linux-raw-sys", + "rustix", +] + +[[package]] +name = "xml-rs" +version = "0.8.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fcb9cbac069e033553e8bb871be2fbdffcab578eb25bd0f7c508cedc6dcd75a" + +[[package]] +name = "zeroize" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" + +[[package]] +name = "zeropool-bn" +version = "0.5.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71e61de68ede9ffdd69c01664f65a178c5188b73f78faa21f0936016a888ff7c" +dependencies = [ + "byteorder", + "crunchy", + "lazy_static", + "rand 0.8.5", + "rustc-hex", +] + +[[package]] +name = "zip" +version = "0.5.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93ab48844d61251bb3835145c521d88aa4031d7139e8485990f60ca911fa0815" +dependencies = [ + "byteorder", + "crc32fast", + "flate2", + "thiserror", +] + +[[package]] +name = "zip" +version = "0.6.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "760394e246e4c28189f19d488c058bf16f564016aefac5d32bb1f3b51d5e9261" +dependencies = [ + "aes", + "byteorder", + "bzip2", + "constant_time_eq", + "crc32fast", + "crossbeam-utils", + "flate2", + "hmac 0.12.1", + "pbkdf2", + "sha1", + "time", + "zstd", +] + +[[package]] +name = "zstd" +version = "0.11.2+zstd.1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20cc960326ece64f010d2d2107537f26dc589a6573a316bd5b1dba685fa5fde4" +dependencies = [ + "zstd-safe", +] + +[[package]] +name = "zstd-safe" +version = "5.0.2+zstd.1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d2a5585e04f9eea4b2a3d1eca508c4dee9592a89ef6f450c11719da0726f4db" +dependencies = [ + "libc", + "zstd-sys", +] + +[[package]] +name = "zstd-sys" +version = "2.0.9+zstd.1.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e16efa8a874a0481a574084d34cc26fdb3b99627480f785888deb6386506656" +dependencies = [ + "cc", + "pkg-config", +] diff --git a/runtime/near-wallet-contract/implementation/Cargo.toml b/runtime/near-wallet-contract/implementation/Cargo.toml new file mode 100644 index 00000000000..70ef37a20d6 --- /dev/null +++ b/runtime/near-wallet-contract/implementation/Cargo.toml @@ -0,0 +1,37 @@ +[workspace.package] +authors = ["Aurora Labs "] +version = "0.1.0" +edition = "2021" +homepage = "https://github.com/aurora-is-near/eth-wallet-contract" +repository = "https://github.com/aurora-is-near/eth-wallet-contract" +license = "CC0-1.0" + + +[workspace.dependencies] +aurora-engine-transactions = { git = "https://github.com/aurora-is-near/aurora-engine.git", rev = "c03a2d8610cd27a9decb91b3bddb107db2177b29", default-features = false, features = ["contract"]} +base64 = "0.21" +ethabi = { version = "18", default-features = false } +hex = "0.4" +near-sdk = { version = "5.0" } +once_cell = "1.18" +serde = { version = "1", features = ["derive"] } + +# dev-dependencies +anyhow = "1" +aurora-engine-types = { git = "https://github.com/aurora-is-near/aurora-engine.git", rev = "c03a2d8610cd27a9decb91b3bddb107db2177b29", default-features = false } +near-crypto = "0.21" +near-workspaces = "0.10" +rlp = { version = "0.5", default-features = false } +serde_json = "1" +sha3 = "0.10" +tokio = { version = "1", features = ["full"] } + +[workspace] +resolver = "2" +members = [ + "address-registrar", + "wallet-contract", +] + +[profile.release] +panic = 'abort' diff --git a/runtime/near-wallet-contract/implementation/address-registrar/Cargo.toml b/runtime/near-wallet-contract/implementation/address-registrar/Cargo.toml new file mode 100644 index 00000000000..bdc768e8431 --- /dev/null +++ b/runtime/near-wallet-contract/implementation/address-registrar/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "eth-address-registrar" +version.workspace = true +edition.workspace = true +homepage.workspace = true +repository.workspace = true +license.workspace = true + +[lib] +crate-type = ["cdylib", "rlib"] + +[dependencies] +hex.workspace = true +near-sdk.workspace = true +serde.workspace = true diff --git a/runtime/near-wallet-contract/implementation/address-registrar/src/lib.rs b/runtime/near-wallet-contract/implementation/address-registrar/src/lib.rs new file mode 100644 index 00000000000..c37e6ab8e96 --- /dev/null +++ b/runtime/near-wallet-contract/implementation/address-registrar/src/lib.rs @@ -0,0 +1,90 @@ +use near_sdk::{ + borsh::{BorshDeserialize, BorshSerialize}, + env, near_bindgen, + store::{lookup_map::Entry, LookupMap}, + AccountId, BorshStorageKey, PanicOnDefault, +}; + +type Address = [u8; 20]; + +#[derive(BorshSerialize, BorshStorageKey)] +#[borsh(crate = "near_sdk::borsh")] +enum StorageKey { + Addresses, +} + +#[near_bindgen] +#[derive(PanicOnDefault, BorshDeserialize, BorshSerialize)] +#[borsh(crate = "near_sdk::borsh")] +pub struct AddressRegistrar { + pub addresses: LookupMap, +} + +#[near_bindgen] +impl AddressRegistrar { + #[init] + pub fn new() -> Self { + Self { addresses: LookupMap::new(StorageKey::Addresses) } + } + + /// Computes the address associated with the given `account_id` and + /// attempts to store the mapping `address -> account_id`. If there is + /// a collision where the given `account_id` has the same address as a + /// previously registered one then the mapping is NOT updated and `None` + /// is returned. Otherwise, the mapping is stored and the address is + /// returned as a hex-encoded string with `0x` prefix. + pub fn register(&mut self, account_id: AccountId) -> Option { + let address = account_id_to_address(&account_id); + + match self.addresses.entry(address) { + Entry::Vacant(entry) => { + let address = format!("0x{}", hex::encode(address)); + let log_message = format!("Added entry {} -> {}", address, account_id); + entry.insert(account_id); + env::log_str(&log_message); + Some(address) + } + Entry::Occupied(entry) => { + let log_message = format!( + "Address collision between {} and {}. Keeping the former.", + entry.get(), + account_id + ); + env::log_str(&log_message); + None + } + } + } + + /// Attempt to look up the account ID associated with the given address. + /// If an entry for that address is found then the associated account id + /// is returned, otherwise `None` is returned. Use the `register` method + /// to add entries to the map. + /// This function will panic if the given address is not the hex-encoding + /// of a 20-byte array. The `0x` prefix is optional. + pub fn lookup(&self, address: String) -> Option { + let address = { + let mut buf = [0u8; 20]; + hex::decode_to_slice(address.strip_prefix("0x").unwrap_or(&address), &mut buf) + .unwrap_or_else(|_| env::panic_str("Invalid hex encoding")); + buf + }; + self.addresses.get(&address).cloned() + } + + /// Computes the address associated with the given `account_id` and + /// returns it as a hex-encoded string with `0x` prefix. This function + /// does not update the mapping stored in this contract. If you want + /// to register an account ID use the `register` method. + pub fn get_address(&self, account_id: AccountId) -> String { + let address = account_id_to_address(&account_id); + format!("0x{}", hex::encode(address)) + } +} + +fn account_id_to_address(account_id: &AccountId) -> Address { + let hash = near_sdk::env::keccak256_array(account_id.as_bytes()); + let mut result = [0u8; 20]; + result.copy_from_slice(&hash[12..32]); + result +} diff --git a/runtime/near-wallet-contract/implementation/docker-entrypoint.sh b/runtime/near-wallet-contract/implementation/docker-entrypoint.sh new file mode 100755 index 00000000000..1cd67d3997e --- /dev/null +++ b/runtime/near-wallet-contract/implementation/docker-entrypoint.sh @@ -0,0 +1,2 @@ +#!/bin/bash +RUSTFLAGS='-C link-arg=-s' cargo build --release --target wasm32-unknown-unknown diff --git a/runtime/near-wallet-contract/implementation/wallet-contract/Cargo.toml b/runtime/near-wallet-contract/implementation/wallet-contract/Cargo.toml new file mode 100644 index 00000000000..fc8e36dbd84 --- /dev/null +++ b/runtime/near-wallet-contract/implementation/wallet-contract/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "eth-wallet-contract" +version.workspace = true +edition.workspace = true +homepage.workspace = true +repository.workspace = true +license.workspace = true + +[lib] +crate-type = ["cdylib", "rlib"] + +[dependencies] +aurora-engine-transactions.workspace = true +base64.workspace = true +ethabi.workspace = true +hex.workspace = true +near-sdk.workspace = true +once_cell.workspace = true +serde.workspace = true +serde_json.workspace = true + +[dev-dependencies] +anyhow.workspace = true +aurora-engine-types.workspace = true +near-crypto.workspace = true +near-workspaces.workspace = true +rlp.workspace = true +sha3.workspace = true +tokio.workspace = true diff --git a/runtime/near-wallet-contract/implementation/wallet-contract/src/ADDRESS_REGISTRAR_ACCOUNT_ID b/runtime/near-wallet-contract/implementation/wallet-contract/src/ADDRESS_REGISTRAR_ACCOUNT_ID new file mode 100644 index 00000000000..dcdba225d15 --- /dev/null +++ b/runtime/near-wallet-contract/implementation/wallet-contract/src/ADDRESS_REGISTRAR_ACCOUNT_ID @@ -0,0 +1 @@ +address-map.near diff --git a/runtime/near-wallet-contract/implementation/wallet-contract/src/CHAIN_ID b/runtime/near-wallet-contract/implementation/wallet-contract/src/CHAIN_ID new file mode 100644 index 00000000000..9a2941ae67a --- /dev/null +++ b/runtime/near-wallet-contract/implementation/wallet-contract/src/CHAIN_ID @@ -0,0 +1,8 @@ +{ + // The contents of this file are set by the build file in `near-wallet-contract`. + // The purpose is to factor out the hard-coded EVM chain ID so that it can be set for + // each network (mainnet, testnet, etc). The value included in this file is the + // [one for mainnet](https://chainlist.org/chain/397). The one for testnet is + // [398](https://chainlist.org/chain/398). + 397 +} diff --git a/runtime/near-wallet-contract/implementation/wallet-contract/src/error.rs b/runtime/near-wallet-contract/implementation/wallet-contract/src/error.rs new file mode 100644 index 00000000000..17aa61f8325 --- /dev/null +++ b/runtime/near-wallet-contract/implementation/wallet-contract/src/error.rs @@ -0,0 +1,167 @@ +use std::fmt; + +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum Error { + AccountNonceExhausted, + AccountId(AccountIdError), + Relayer(RelayerError), + User(UserError), + Caller(CallerError), +} + +/// Errors that should never happen when the Eth Implicit accounts feature +/// is available on Near. These errors relate to parsing a 20-byte address +/// from a Near account ID. +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum AccountIdError { + AccountIdTooShort, + Missing0xPrefix, + InvalidHex, +} + +/// Errors which should never happen if the relayer is honest. +/// If these errors happen then we should ban the relayer (revoke their access key). +/// An external caller (as opposed to a relayer with a Function Call access key) may +/// also trigger these errors by passing bad arguments, but this is not an issue +/// (there is no ban list for external callers) because they are paying the gas fees +/// for their own mistakes. +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum RelayerError { + /// Relayers should always check the nonce before sending + InvalidNonce, + /// Relayers should always encode the transaction correctly + InvalidBase64, + /// Relayers should always send valid transactions + TxParsing(aurora_engine_transactions::Error), + /// Relayers should always send correctly signed transactions + InvalidSender, + /// Relayers should always give the correct target account + InvalidTarget, + /// Relayers should always check the transaction is signed with the correct chain id. + InvalidChainId, +} + +/// Errors that arise from problems in the data signed by the user +/// (i.e. in the Ethereum transaction itself). A careful power-user +/// should never see these errors because they can review the data +/// they are signing. If a user does see these errors then there is +/// likely a bug in the front-end code that is constructing the Ethereum +/// transaction to be signed. +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum UserError { + EvmDeployDisallowed, + ValueTooLarge, + UnknownPublicKeyKind, + InvalidEd25519Key, + InvalidSecp256k1Key, + InvalidAccessKeyAccountId, + UnsupportedAction(UnsupportedAction), + UnknownFunctionSelector, + InvalidAbiEncodedData, + ExcessYoctoNear, +} + +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum UnsupportedAction { + AddFullAccessKey, + CreateAccount, + Delegate, + DeleteAccount, + DeployContract, + Stake, +} + +/// Errors that arise from external accounts calling the Wallet Contract. +/// The `rlp_execute` function is intentionally public so that any account +/// can pay for the fees on behalf of a Wallet Contract key holder. +/// These errors are not a big deal from the perspective of the Wallet Contract +/// because the cost for executing such erroneous transactions are paid for +/// by that external caller. +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum CallerError { + InsufficientAttachedValue, +} + +impl From for Error { + fn from(value: aurora_engine_transactions::Error) -> Self { + Self::Relayer(RelayerError::TxParsing(value)) + } +} + +impl fmt::Display for AccountIdError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::AccountIdTooShort => f.write_str("Error: account ID too short"), + Self::Missing0xPrefix => f.write_str("Error: account ID missing 0x"), + Self::InvalidHex => f.write_str("Error: account ID is not valid hex encoding"), + } + } +} + +impl fmt::Display for RelayerError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::TxParsing(e) => std::write!(f, "Error parsing RLP {}", e.as_str()), + Self::InvalidSender => f.write_str("Error: signature is not from account owner"), + Self::InvalidBase64 => f.write_str("Error: invalid base64 encoding"), + Self::InvalidTarget => { + f.write_str("Error: target does not match to in signed transaction") + } + Self::InvalidNonce => f.write_str("Error: invalid nonce value"), + Self::InvalidChainId => f.write_str("Error: invalid chain id value"), + } + } +} + +impl fmt::Display for UserError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::EvmDeployDisallowed => { + f.write_str("Error: transactions deploying EVM contracts not allowed") + } + Self::ValueTooLarge => { + f.write_str("Error: transaction value must be representable by 128 bits") + } + Self::UnknownPublicKeyKind => f.write_str("Error: unknown public key kind"), + Self::InvalidEd25519Key => f.write_str("Error: invalid ED25519 public key"), + Self::InvalidSecp256k1Key => f.write_str("Error: invalid SECP256k1 public key"), + Self::InvalidAccessKeyAccountId => f.write_str( + "Error: attempt to add function call access key with invalid account id", + ), + Self::UnsupportedAction(a) => { + std::write!(f, "Error unsupported action {:?}", a) + } + Self::UnknownFunctionSelector => f.write_str("Error: unknown function selector"), + Self::InvalidAbiEncodedData => { + f.write_str("Error: invalid ABI encoding in transaction data") + } + Self::ExcessYoctoNear => f.write_str( + "Error: only at most 1_000_000 yoctoNear can be included directly in an action", + ), + } + } +} + +impl fmt::Display for CallerError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::InsufficientAttachedValue => { + f.write_str("Error: external calls must attach Near to pay for their transactions") + } + } + } +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::AccountNonceExhausted => f.write_str("Error: no nonce values remain"), + Self::AccountId(e) => e.fmt(f), + Self::Relayer(e) => e.fmt(f), + Self::User(e) => e.fmt(f), + Self::Caller(e) => e.fmt(f), + } + } +} + +impl std::error::Error for Error {} diff --git a/runtime/near-wallet-contract/implementation/wallet-contract/src/eth_emulation.rs b/runtime/near-wallet-contract/implementation/wallet-contract/src/eth_emulation.rs new file mode 100644 index 00000000000..4174e92acc3 --- /dev/null +++ b/runtime/near-wallet-contract/implementation/wallet-contract/src/eth_emulation.rs @@ -0,0 +1,94 @@ +//! This module contains logic for emulating Ethereum standards with the +//! corresponding Near actions. For now only the ERC-20 standard is supported +//! (which corresponds to Near's NEP-141). + +use crate::{ + error::{Error, UserError}, + ethabi_utils, + types::{Action, ExecutionContext}, +}; +use aurora_engine_transactions::NormalizedEthTransaction; +use ethabi::{Address, ParamType}; +use near_sdk::AccountId; + +const FIVE_TERA_GAS: u64 = near_sdk::Gas::from_tgas(5).as_gas(); + +pub const ERC20_BALANCE_OF_SELECTOR: &[u8] = &[0x70, 0xa0, 0x82, 0x31]; +const ERC20_BALANCE_OF_SIGNATURE: [ParamType; 1] = [ParamType::Address]; + +pub const ERC20_TRANSFER_SELECTOR: &[u8] = &[0xa9, 0x05, 0x9c, 0xbb]; +const ERC20_TRANSFER_SIGNATURE: [ParamType; 2] = [ + ParamType::Address, // to + ParamType::Uint(256), // value +]; + +pub fn try_emulation( + target: &AccountId, + tx: &NormalizedEthTransaction, + context: &ExecutionContext, +) -> Result { + if tx.data.len() < 4 { + return Err(Error::User(UserError::InvalidAbiEncodedData)); + } + // In production eth-implicit accounts are top-level, so this suffix will + // always be empty. The purpose of finding a suffix is that it allows for + // testing environments where the wallet contract is deployed to an address + // that is a sub-account. For example, this allows testing on Near testnet + // before the eth-implicit accounts feature is stabilized. + // The suffix is only needed in testing. + let suffix = context + .current_account_id + .as_str() + .find('.') + .map(|index| &context.current_account_id.as_str()[index..]) + .unwrap_or(""); + match &tx.data[0..4] { + ERC20_BALANCE_OF_SELECTOR => { + let (address,): (Address,) = + ethabi_utils::abi_decode(&ERC20_BALANCE_OF_SIGNATURE, &tx.data[4..])?; + // The account ID is assumed to have the same suffix as the current account because + // (1) in production this is correct as all eth-implicit accounts are top-level and + // (2) in testing environments where the addresses are sub-accounts, they are still + // assumed to all be deployed to the same namespace so that they will all have the + // same suffix. + let args = format!(r#"{{"account_id": "0x{}{}"}}"#, hex::encode(address), suffix); + Ok(Action::FunctionCall { + receiver_id: target.to_string(), + method_name: "ft_balance_of".into(), + args: args.into_bytes(), + gas: FIVE_TERA_GAS, + yocto_near: 0, + }) + } + ERC20_TRANSFER_SELECTOR => { + // We intentionally map to `u128` instead of `U256` because the NEP-141 standard + // is to use u128. + let (to, value): (Address, u128) = + ethabi_utils::abi_decode(&ERC20_TRANSFER_SIGNATURE, &tx.data[4..])?; + let args = format!( + r#"{{"receiver_id": "0x{}{}", "amount": "{}", "memo": null}}"#, + hex::encode(to), + suffix, + value + ); + Ok(Action::FunctionCall { + receiver_id: target.to_string(), + method_name: "ft_transfer".into(), + args: args.into_bytes(), + gas: 2 * FIVE_TERA_GAS, + yocto_near: 1, + }) + } + _ => Err(Error::User(UserError::UnknownFunctionSelector)), + } +} + +#[test] +fn test_function_selectors() { + let balance_of_signature = ethabi::short_signature("balanceOf", &ERC20_BALANCE_OF_SIGNATURE); + + let transfer_signature = ethabi::short_signature("transfer", &ERC20_TRANSFER_SIGNATURE); + + assert_eq!(balance_of_signature, ERC20_BALANCE_OF_SELECTOR); // 0x70a08231 + assert_eq!(transfer_signature, ERC20_TRANSFER_SELECTOR); // 0xa9059cbb +} diff --git a/runtime/near-wallet-contract/implementation/wallet-contract/src/ethabi_utils.rs b/runtime/near-wallet-contract/implementation/wallet-contract/src/ethabi_utils.rs new file mode 100644 index 00000000000..acda0f80a2e --- /dev/null +++ b/runtime/near-wallet-contract/implementation/wallet-contract/src/ethabi_utils.rs @@ -0,0 +1,250 @@ +use crate::error::{Error, UserError}; +use ethabi::{ethereum_types::U256, Address, ParamType, Token}; + +const INVALID_ABI_DATA: Error = Error::User(UserError::InvalidAbiEncodedData); + +pub fn abi_decode(types: &[ParamType; N], data: &[u8]) -> Result +where + T: AbiTuple, +{ + let tokens = data_to_tokens(types, data)?; + T::try_from_token(tokens) +} + +pub trait AbiTuple: Sized { + fn try_from_token(tokens: [Token; N]) -> Result; +} + +impl AbiTuple<1> for (T,) +where + T: TryFromToken, +{ + fn try_from_token(tokens: [Token; 1]) -> Result { + let (t,) = tokens.into(); + T::try_from_token(t).map(|t| (t,)) + } +} + +impl AbiTuple<2> for (T1, T2) +where + T1: TryFromToken, + T2: TryFromToken, +{ + fn try_from_token(tokens: [Token; 2]) -> Result { + let (t1, t2) = tokens.into(); + Ok((T1::try_from_token(t1)?, T2::try_from_token(t2)?)) + } +} + +impl AbiTuple<3> for (T1, T2, T3) +where + T1: TryFromToken, + T2: TryFromToken, + T3: TryFromToken, +{ + fn try_from_token(tokens: [Token; 3]) -> Result { + let (t1, t2, t3) = tokens.into(); + Ok((T1::try_from_token(t1)?, T2::try_from_token(t2)?, T3::try_from_token(t3)?)) + } +} + +impl AbiTuple<4> for (T1, T2, T3, T4) +where + T1: TryFromToken, + T2: TryFromToken, + T3: TryFromToken, + T4: TryFromToken, +{ + fn try_from_token(tokens: [Token; 4]) -> Result { + let (t1, t2, t3, t4) = tokens.into(); + Ok(( + T1::try_from_token(t1)?, + T2::try_from_token(t2)?, + T3::try_from_token(t3)?, + T4::try_from_token(t4)?, + )) + } +} + +impl AbiTuple<5> for (T1, T2, T3, T4, T5) +where + T1: TryFromToken, + T2: TryFromToken, + T3: TryFromToken, + T4: TryFromToken, + T5: TryFromToken, +{ + fn try_from_token(tokens: [Token; 5]) -> Result { + let (t1, t2, t3, t4, t5) = tokens.into(); + Ok(( + T1::try_from_token(t1)?, + T2::try_from_token(t2)?, + T3::try_from_token(t3)?, + T4::try_from_token(t4)?, + T5::try_from_token(t5)?, + )) + } +} + +impl AbiTuple<6> for (T1, T2, T3, T4, T5, T6) +where + T1: TryFromToken, + T2: TryFromToken, + T3: TryFromToken, + T4: TryFromToken, + T5: TryFromToken, + T6: TryFromToken, +{ + fn try_from_token(tokens: [Token; 6]) -> Result { + let (t1, t2, t3, t4, t5, t6) = tokens.into(); + Ok(( + T1::try_from_token(t1)?, + T2::try_from_token(t2)?, + T3::try_from_token(t3)?, + T4::try_from_token(t4)?, + T5::try_from_token(t5)?, + T6::try_from_token(t6)?, + )) + } +} + +impl AbiTuple<7> for (T1, T2, T3, T4, T5, T6, T7) +where + T1: TryFromToken, + T2: TryFromToken, + T3: TryFromToken, + T4: TryFromToken, + T5: TryFromToken, + T6: TryFromToken, + T7: TryFromToken, +{ + fn try_from_token(tokens: [Token; 7]) -> Result { + let (t1, t2, t3, t4, t5, t6, t7) = tokens.into(); + Ok(( + T1::try_from_token(t1)?, + T2::try_from_token(t2)?, + T3::try_from_token(t3)?, + T4::try_from_token(t4)?, + T5::try_from_token(t5)?, + T6::try_from_token(t6)?, + T7::try_from_token(t7)?, + )) + } +} + +impl AbiTuple<8> for (T1, T2, T3, T4, T5, T6, T7, T8) +where + T1: TryFromToken, + T2: TryFromToken, + T3: TryFromToken, + T4: TryFromToken, + T5: TryFromToken, + T6: TryFromToken, + T7: TryFromToken, + T8: TryFromToken, +{ + fn try_from_token(tokens: [Token; 8]) -> Result { + let (t1, t2, t3, t4, t5, t6, t7, t8) = tokens.into(); + Ok(( + T1::try_from_token(t1)?, + T2::try_from_token(t2)?, + T3::try_from_token(t3)?, + T4::try_from_token(t4)?, + T5::try_from_token(t5)?, + T6::try_from_token(t6)?, + T7::try_from_token(t7)?, + T8::try_from_token(t8)?, + )) + } +} + +trait TryFromToken: Sized { + fn try_from_token(t: Token) -> Result; +} + +impl TryFromToken for u8 { + fn try_from_token(t: Token) -> Result { + const U8_MAX: U256 = U256([u8::MAX as u64, 0, 0, 0]); + let x = t.into_uint().ok_or(INVALID_ABI_DATA)?; + if x <= U8_MAX { + Ok(x.low_u32() as u8) + } else { + Err(INVALID_ABI_DATA) + } + } +} + +impl TryFromToken for u32 { + fn try_from_token(t: Token) -> Result { + const U32_MAX: U256 = U256([u32::MAX as u64, 0, 0, 0]); + let x = t.into_uint().ok_or(INVALID_ABI_DATA)?; + if x <= U32_MAX { + Ok(x.low_u32()) + } else { + Err(INVALID_ABI_DATA) + } + } +} + +impl TryFromToken for u64 { + fn try_from_token(t: Token) -> Result { + const U64_MAX: U256 = U256([u64::MAX, 0, 0, 0]); + let x = t.into_uint().ok_or(INVALID_ABI_DATA)?; + if x <= U64_MAX { + Ok(x.low_u64()) + } else { + Err(INVALID_ABI_DATA) + } + } +} + +impl TryFromToken for u128 { + fn try_from_token(t: Token) -> Result { + const U128_MAX: U256 = U256([u64::MAX, u64::MAX, 0, 0]); + let x = t.into_uint().ok_or(INVALID_ABI_DATA)?; + if x <= U128_MAX { + Ok(x.low_u128()) + } else { + Err(INVALID_ABI_DATA) + } + } +} + +impl TryFromToken for bool { + fn try_from_token(t: Token) -> Result { + t.into_bool().ok_or(INVALID_ABI_DATA) + } +} + +impl TryFromToken for String { + fn try_from_token(t: Token) -> Result { + t.into_string().ok_or(INVALID_ABI_DATA) + } +} + +impl TryFromToken for Address { + fn try_from_token(t: Token) -> Result { + t.into_address().ok_or(INVALID_ABI_DATA) + } +} + +impl TryFromToken for Vec { + fn try_from_token(t: Token) -> Result { + t.into_bytes().ok_or(INVALID_ABI_DATA) + } +} + +impl TryFromToken for Vec { + fn try_from_token(t: Token) -> Result { + let elems = t.into_array().ok_or(INVALID_ABI_DATA)?; + elems.into_iter().map(String::try_from_token).collect() + } +} + +fn data_to_tokens( + types: &[ParamType; N], + data: &[u8], +) -> Result<[ethabi::Token; N], Error> { + let result = ethabi::decode(types.as_slice(), data).map_err(|_| INVALID_ABI_DATA)?; + result.try_into().map_err(|_| INVALID_ABI_DATA) +} diff --git a/runtime/near-wallet-contract/implementation/wallet-contract/src/internal.rs b/runtime/near-wallet-contract/implementation/wallet-contract/src/internal.rs new file mode 100644 index 00000000000..369d10da8a0 --- /dev/null +++ b/runtime/near-wallet-contract/implementation/wallet-contract/src/internal.rs @@ -0,0 +1,269 @@ +use crate::{ + error::{AccountIdError, CallerError, Error, RelayerError, UserError}, + eth_emulation, ethabi_utils, near_action, + types::{ + Action, ExecutionContext, TransactionValidationOutcome, ADD_KEY_SELECTOR, + ADD_KEY_SIGNATURE, DELETE_KEY_SELECTOR, DELETE_KEY_SIGNATURE, FUNCTION_CALL_SELECTOR, + FUNCTION_CALL_SIGNATURE, TRANSFER_SELECTOR, TRANSFER_SIGNATURE, + }, +}; +use aurora_engine_transactions::{EthTransactionKind, NormalizedEthTransaction}; +use base64::Engine; +use ethabi::{ethereum_types::U256, Address}; +use near_sdk::{AccountId, NearToken}; + +// TODO(eth-implicit): Decide on chain id. +pub const CHAIN_ID: u64 = std::include!("CHAIN_ID"); +const U64_MAX: U256 = U256([u64::MAX, 0, 0, 0]); +/// Only up to this amount of yoctoNear can be directly mentioned in an action, +/// the rest should be included in the `value` field of the Ethereum transaction. +pub const MAX_YOCTO_NEAR: u32 = 1_000_000; + +/// The largest accepted `value` field of a transaction. +/// Computed as `(2**128 - 1) // 1_000_000` since Near balances are +/// 128-bit numbers, but with 24 decimals instead of 18. So to covert +/// an Ethereum transaction value into a Near value we need to multiply +/// by `1_000_000` and then add back any lower digits that were truncated. +const VALUE_MAX: U256 = U256([10175519178963368024, 18446744073709, 0, 0]); + +/// Given an RLP-encoded Ethereum transaction (bytes encoded in base64), +/// a Near account the transaction is supposed to interact with, the current +/// account ID, and the current nonce, this function will attempt to transform +/// the Ethereum transaction into a Near action. +pub fn parse_rlp_tx_to_action( + tx_bytes_b64: &str, + target: &AccountId, + context: &ExecutionContext, + expected_nonce: &mut u64, +) -> Result<(near_action::Action, TransactionValidationOutcome), Error> { + let tx_bytes = decode_b64(tx_bytes_b64)?; + let tx_kind: EthTransactionKind = tx_bytes.as_slice().try_into()?; + let tx: NormalizedEthTransaction = tx_kind.try_into()?; + let validation_outcome = validate_tx_relayer_data(&tx, target, context, *expected_nonce)?; + + // If the transaction is valid then increment the nonce to prevent replay + *expected_nonce = expected_nonce.saturating_add(1); + + let to = tx.to.ok_or(Error::User(UserError::EvmDeployDisallowed))?.raw(); + let action = if to != context.current_address + && extract_address(target).map(|a| a == to).unwrap_or(false) + { + // If target is another Ethereum implicit account then the action + // must be a transfer (because EOAs are not contracts on Ethereum). + Action::Transfer { receiver_id: target.to_string(), yocto_near: 0 } + } else { + parse_tx_data(target, &tx, context)? + }; + validate_tx_value(&tx, context, &action)?; + + // Call to `low_u128` here is safe because of the validation done in `validate_tx_value` + let near_action = action + .try_into_near_action(tx.value.raw().low_u128().saturating_mul(MAX_YOCTO_NEAR.into()))?; + + Ok((near_action, validation_outcome)) +} + +/// Extracts a 20-byte address from a Near account ID. +/// This is done by assuming the account ID is of the form `^0x[0-9a-f]{40}`, +/// i.e. it starts with `0x` and then hex-encoded 20 bytes. +pub fn extract_address(current_account_id: &AccountId) -> Result { + let hex_str = current_account_id.as_bytes(); + + // The length must be at least 42 characters because it begins with + // `0x` and then a 20-byte hex-encoded string. In production it will + // be exactly 42 characters because eth-implicit accounts will always + // be top-level, but for testing we may have them be sub-accounts. + // In this case then the length will be longer than 42 characters. + if hex_str.len() < 42 { + return Err(Error::AccountId(AccountIdError::AccountIdTooShort)); + } + + if &hex_str[0..2] != b"0x" { + return Err(Error::AccountId(AccountIdError::Missing0xPrefix)); + } + + let mut bytes = [0u8; 20]; + hex::decode_to_slice(&hex_str[2..42], &mut bytes) + .map_err(|_| Error::AccountId(AccountIdError::InvalidHex))?; + + Ok(bytes.into()) +} + +/// Decode a base-64 encoded string into raw bytes. +fn decode_b64(input: &str) -> Result, Error> { + let engine = base64::engine::general_purpose::STANDARD; + engine.decode(input).map_err(|_| Error::Relayer(RelayerError::InvalidBase64)) +} + +/// Coverts any Near account ID into a 20-byte address by taking the last 20 bytes +/// of the keccak256 hash. +pub fn account_id_to_address(account_id: &AccountId) -> Address { + let hash = keccak256(account_id.as_bytes()); + let mut result = [0u8; 20]; + result.copy_from_slice(&hash[12..32]); + result.into() +} + +pub fn keccak256(bytes: &[u8]) -> [u8; 32] { + #[cfg(test)] + { + use sha3::{Digest, Keccak256}; + let hash = Keccak256::digest(bytes); + hash.into() + } + + #[cfg(not(test))] + near_sdk::env::keccak256_array(bytes) +} + +fn parse_tx_data( + target: &AccountId, + tx: &NormalizedEthTransaction, + context: &ExecutionContext, +) -> Result { + if tx.data.len() < 4 { + return Err(Error::User(UserError::InvalidAbiEncodedData)); + } + match &tx.data[0..4] { + FUNCTION_CALL_SELECTOR => { + let (receiver_id, method_name, args, gas, yocto_near): (String, _, _, _, _) = + ethabi_utils::abi_decode(&FUNCTION_CALL_SIGNATURE, &tx.data[4..])?; + if target.as_str() != receiver_id.as_str() { + return Err(Error::Relayer(RelayerError::InvalidTarget)); + } + if yocto_near > MAX_YOCTO_NEAR { + return Err(Error::User(UserError::ExcessYoctoNear)); + } + Ok(Action::FunctionCall { receiver_id, method_name, args, gas, yocto_near }) + } + TRANSFER_SELECTOR => { + let (receiver_id, yocto_near): (String, u32) = + ethabi_utils::abi_decode(&TRANSFER_SIGNATURE, &tx.data[4..])?; + if target.as_str() != receiver_id.as_str() { + return Err(Error::Relayer(RelayerError::InvalidTarget)); + } + if yocto_near > MAX_YOCTO_NEAR { + return Err(Error::User(UserError::ExcessYoctoNear)); + } + Ok(Action::Transfer { receiver_id, yocto_near }) + } + ADD_KEY_SELECTOR => { + let ( + public_key_kind, + public_key, + nonce, + is_full_access, + is_limited_allowance, + allowance, + receiver_id, + method_names, + ) = ethabi_utils::abi_decode(&ADD_KEY_SIGNATURE, &tx.data[4..])?; + Ok(Action::AddKey { + public_key_kind, + public_key, + nonce, + is_full_access, + is_limited_allowance, + allowance, + receiver_id, + method_names, + }) + } + DELETE_KEY_SELECTOR => { + let (public_key_kind, public_key) = + ethabi_utils::abi_decode(&DELETE_KEY_SIGNATURE, &tx.data[4..])?; + Ok(Action::DeleteKey { public_key_kind, public_key }) + } + _ => eth_emulation::try_emulation(target, tx, context), + } +} + +/// Validates the transaction is following the Wallet Contract protocol. +/// This includes checks for: +/// - from address matches current account address +/// - to address is present and matches the target address (or hash of target account ID) +/// - nonce matches expected nonce +/// If this validation fails then the relayer that sent it is faulty and should be banned. +fn validate_tx_relayer_data( + tx: &NormalizedEthTransaction, + target: &AccountId, + context: &ExecutionContext, + expected_nonce: u64, +) -> Result { + if tx.address.raw() != context.current_address { + return Err(Error::Relayer(RelayerError::InvalidSender)); + } + + if tx.chain_id != Some(CHAIN_ID) { + return Err(Error::Relayer(RelayerError::InvalidChainId)); + } + + let to = tx.to.ok_or(Error::User(UserError::EvmDeployDisallowed))?.raw(); + let target_as_address = extract_address(target).ok(); + let to_equals_target = target_as_address.map(|target| to == target).unwrap_or(false); + + // Only valid targets satisfy `to == target` or `to == hash(target)` + if !to_equals_target && to != account_id_to_address(target) { + return Err(Error::Relayer(RelayerError::InvalidTarget)); + } + + let nonce = if tx.nonce <= U64_MAX { + tx.nonce.low_u64() + } else { + return Err(Error::Relayer(RelayerError::InvalidNonce)); + }; + if nonce != expected_nonce { + return Err(Error::Relayer(RelayerError::InvalidNonce)); + } + + // If `to == target` and this is not a self-transaction then the address must not + // be registered in the address registry. The purpose of this check is to prevent + // lazy relayers from skipping this check themselves (relayers are supposed to use + // the address registry to fill in the `target`). + if to_equals_target && to != context.current_address { + Ok(TransactionValidationOutcome::AddressCheckRequired(to)) + } else { + Ok(TransactionValidationOutcome::Validated) + } +} + +fn validate_tx_value( + tx: &NormalizedEthTransaction, + context: &ExecutionContext, + action: &Action, +) -> Result<(), Error> { + if tx.value.raw() > VALUE_MAX { + return Err(Error::User(UserError::ValueTooLarge)); + } + + let total_value = tx + .value + .raw() + .low_u128() + .saturating_mul(MAX_YOCTO_NEAR.into()) + .saturating_add(action.value().as_yoctonear()); + + if total_value > 0 { + let is_self_call = context.predecessor_account_id == context.current_account_id; + let sufficient_attached_deposit = + context.attached_deposit >= NearToken::from_yoctonear(total_value); + if !is_self_call && !sufficient_attached_deposit { + return Err(Error::Caller(CallerError::InsufficientAttachedValue)); + } + } + + Ok(()) +} + +#[test] +fn test_value_max() { + assert_eq!(VALUE_MAX, U256::from(u128::MAX / 1_000_000)); +} + +#[test] +fn test_account_id_to_address() { + let account_id: AccountId = "aurora".parse().unwrap(); + let address = + Address::from_slice(&hex::decode("4444588443c3a91288c5002483449aba1054192b").unwrap()); + assert_eq!(account_id_to_address(&account_id), address); +} diff --git a/runtime/near-wallet-contract/implementation/wallet-contract/src/lib.rs b/runtime/near-wallet-contract/implementation/wallet-contract/src/lib.rs new file mode 100644 index 00000000000..5e81dde5cad --- /dev/null +++ b/runtime/near-wallet-contract/implementation/wallet-contract/src/lib.rs @@ -0,0 +1,244 @@ +use crate::{ + error::Error, + types::{ExecuteResponse, ExecutionContext}, +}; +use error::{UnsupportedAction, UserError}; +use near_sdk::{ + borsh::{BorshDeserialize, BorshSerialize}, + env, + json_types::U64, + near_bindgen, AccountId, Allowance, Gas, GasWeight, NearToken, Promise, PromiseOrValue, + PromiseResult, +}; +use types::TransactionValidationOutcome; + +pub mod error; +pub mod eth_emulation; +pub mod ethabi_utils; +pub mod internal; +pub mod near_action; +pub mod types; + +#[cfg(test)] +mod tests; + +const ADDRESS_REGISTRAR_ACCOUNT_ID: &str = std::include_str!("ADDRESS_REGISTRAR_ACCOUNT_ID"); + +#[near_bindgen] +#[derive(Default, BorshDeserialize, BorshSerialize)] +#[borsh(crate = "near_sdk::borsh")] +pub struct WalletContract { + pub nonce: u64, +} + +#[near_bindgen] +impl WalletContract { + /// Return the nonce value currently stored in the contract. + /// Following the Ethereum protocol, only transactions with nonce equal + /// to the current value will be accepted. + /// Additionally, the Ethereum protocol requires the nonce of an account increment + /// by 1 each time a transaction with the correct nonce and a valid signature + /// is submitted (even if that transaction eventually fails). In this way, each + /// nonce value can only be used once (hence the name "nonce") and thus transaction + /// replay is prevented. + pub fn get_nonce(&self) -> U64 { + U64(self.nonce) + } + + /// This is the main entry point into this contract. It accepts an RLP-encoded + /// Ethereum transaction signed by the private key associated with the address + /// for the account where this contract is deployed. RLP is a binary format, + /// so the argument is actually passed as a base64-encoded string. + /// The Ethereum transaction represents a Near action the owner of the address + /// wants to perform. This method decodes that action from the Ethereum transaction + /// and crates a promise to perform that action. + /// Actions on Near are sent to a particular account ID where they are supposed to + /// be executed (for example, a `FunctionCall` action is sent to the contract + /// which will execute the method). In the Ethereum transaction only the address + /// of the target can be specified because it does not have a notion of named accounts + /// like Near has. The `target` field of this method gives the actual account ID + /// that the action will be sent to. The `target` must itself be an eth-implicit + /// account and match the `to` address of the Ethereum transaction; or `target` + /// must hash to the address given in the `to` field of the Ethereum transaction. + /// The output of this function is an `ExecuteResponse` which gives the output + /// of the Near action or an error message if there was a problem during the execution. + #[payable] + pub fn rlp_execute( + &mut self, + target: AccountId, + tx_bytes_b64: String, + ) -> PromiseOrValue { + let current_account_id = env::current_account_id(); + let predecessor_account_id = env::predecessor_account_id(); + let result = inner_rlp_execute( + current_account_id.clone(), + predecessor_account_id, + target, + tx_bytes_b64, + &mut self.nonce, + ); + + match result { + Ok(promise) => PromiseOrValue::Promise(promise), + Err(Error::Relayer(_)) if env::signer_account_id() == current_account_id => { + let promise = create_ban_relayer_promise(current_account_id); + PromiseOrValue::Promise(promise) + } + Err(e) => PromiseOrValue::Value(e.into()), + } + } + + /// Callback after checking if an address is contained in the registrar. + /// This check happens when the target is another eth implicit account to + /// confirm that the relayer really did check for a named account with that address. + #[private] + pub fn address_check_callback( + &mut self, + target: AccountId, + action: near_action::Action, + ) -> PromiseOrValue { + let maybe_account_id: Option = match env::promise_result(0) { + PromiseResult::Failed => { + return PromiseOrValue::Value(ExecuteResponse { + success: false, + success_value: None, + error: Some("Call to Address Registrar contract failed".into()), + }); + } + PromiseResult::Successful(value) => serde_json::from_slice(&value) + .unwrap_or_else(|_| env::panic_str("Unexpected response from account registrar")), + }; + let current_account_id = env::current_account_id(); + let promise = if maybe_account_id.is_some() { + if env::signer_account_id() == current_account_id { + create_ban_relayer_promise(current_account_id) + } else { + return PromiseOrValue::Value(ExecuteResponse { + success: false, + success_value: None, + error: Some("Invalid target: target is address corresponding to existing named account_id".into()), + }); + } + } else { + let ext = WalletContract::ext(current_account_id).with_unused_gas_weight(1); + match action_to_promise(target, action).map(|p| p.then(ext.rlp_execute_callback())) { + Ok(p) => p, + Err(e) => { + return PromiseOrValue::Value(e.into()); + } + } + }; + PromiseOrValue::Promise(promise) + } + + #[private] + pub fn rlp_execute_callback(&mut self) -> ExecuteResponse { + let n = env::promise_results_count(); + let mut success_value = None; + for i in 0..n { + match env::promise_result(i) { + PromiseResult::Failed => { + return ExecuteResponse { + success: false, + success_value: None, + error: Some("Failed Near promise".into()), + }; + } + PromiseResult::Successful(value) => success_value = Some(value), + } + } + ExecuteResponse { success: true, success_value, error: None } + } + + #[private] + pub fn ban_relayer(&mut self) -> ExecuteResponse { + ExecuteResponse { + success: false, + success_value: None, + error: Some("Error: faulty relayer".into()), + } + } +} + +fn inner_rlp_execute( + current_account_id: AccountId, + predecessor_account_id: AccountId, + target: AccountId, + tx_bytes_b64: String, + nonce: &mut u64, +) -> Result { + if *nonce == u64::MAX { + return Err(Error::AccountNonceExhausted); + } + let context = ExecutionContext::new( + current_account_id.clone(), + predecessor_account_id, + env::attached_deposit(), + )?; + + let (action, validation_outcome) = + internal::parse_rlp_tx_to_action(&tx_bytes_b64, &target, &context, nonce)?; + let promise = match validation_outcome { + TransactionValidationOutcome::Validated => { + let ext = WalletContract::ext(current_account_id).with_unused_gas_weight(1); + action_to_promise(target, action)?.then(ext.rlp_execute_callback()) + } + TransactionValidationOutcome::AddressCheckRequired(address) => { + let ext = WalletContract::ext(current_account_id).with_unused_gas_weight(1); + let address_registrar = { + let account_id = ADDRESS_REGISTRAR_ACCOUNT_ID + .trim() + .parse() + .unwrap_or_else(|_| env::panic_str("Invalid address registrar")); + ext_registrar::ext(account_id).with_static_gas(Gas::from_tgas(5)) + }; + let address = format!("0x{}", hex::encode(address)); + address_registrar.lookup(address).then(ext.address_check_callback(target, action)) + } + }; + Ok(promise) +} + +fn action_to_promise(target: AccountId, action: near_action::Action) -> Result { + match action { + near_action::Action::FunctionCall(action) => Ok(Promise::new(target).function_call( + action.method_name, + action.args, + action.deposit, + action.gas, + )), + near_action::Action::Transfer(action) => Ok(Promise::new(target).transfer(action.deposit)), + near_action::Action::AddKey(action) => match action.access_key.permission { + near_action::AccessKeyPermission::FullAccess => { + Err(Error::User(UserError::UnsupportedAction(UnsupportedAction::AddFullAccessKey))) + } + near_action::AccessKeyPermission::FunctionCall(access) => Ok(Promise::new(target) + .add_access_key_allowance_with_nonce( + action.public_key, + access.allowance.and_then(Allowance::limited).unwrap_or(Allowance::Unlimited), + access.receiver_id, + access.method_names.join(","), + action.access_key.nonce, + )), + }, + near_action::Action::DeleteKey(action) => { + Ok(Promise::new(target).delete_key(action.public_key)) + } + } +} + +fn create_ban_relayer_promise(current_account_id: AccountId) -> Promise { + let pk = env::signer_account_pk(); + Promise::new(current_account_id).delete_key(pk).function_call_weight( + "ban_relayer".into(), + Vec::new(), + NearToken::from_yoctonear(0), + Gas::from_tgas(1), + GasWeight(1), + ) +} + +#[near_sdk::ext_contract(ext_registrar)] +trait AddressRegistrar { + fn lookup(&self, address: String) -> Option; +} diff --git a/runtime/near-wallet-contract/implementation/wallet-contract/src/near_action.rs b/runtime/near-wallet-contract/implementation/wallet-contract/src/near_action.rs new file mode 100644 index 00000000000..4b7f9491270 --- /dev/null +++ b/runtime/near-wallet-contract/implementation/wallet-contract/src/near_action.rs @@ -0,0 +1,61 @@ +//! Partial definition of `Action` for Near protocol. +//! Unfortunately we cannot use `near-primitives` directly in the contract +//! because it uses dependencies that do not compile to Wasm (at least +//! not without some extra feature flags that `near-primitives` currently +//! does not include). +//! Some variants of `near_primitives::Action` are intentionally left out +//! because they are not possible to do with the wallet contract +//! (e.g. `DeleteAccount`). + +use near_sdk::{AccountId, Gas, NearToken, PublicKey}; + +#[derive(Debug, serde::Deserialize, serde::Serialize)] +pub enum Action { + FunctionCall(FunctionCallAction), + Transfer(TransferAction), + AddKey(AddKeyAction), + DeleteKey(DeleteKeyAction), +} + +#[derive(Debug, serde::Deserialize, serde::Serialize)] +pub struct FunctionCallAction { + pub method_name: String, + pub args: Vec, + pub gas: Gas, + pub deposit: NearToken, +} + +#[derive(Debug, serde::Deserialize, serde::Serialize)] +pub struct TransferAction { + pub deposit: NearToken, +} + +#[derive(Debug, serde::Deserialize, serde::Serialize)] +pub struct AddKeyAction { + pub public_key: PublicKey, + pub access_key: AccessKey, +} + +#[derive(Debug, serde::Deserialize, serde::Serialize)] +pub struct AccessKey { + pub nonce: u64, + pub permission: AccessKeyPermission, +} + +#[derive(Debug, serde::Deserialize, serde::Serialize)] +pub enum AccessKeyPermission { + FullAccess, + FunctionCall(FunctionCallPermission), +} + +#[derive(Debug, serde::Deserialize, serde::Serialize)] +pub struct FunctionCallPermission { + pub allowance: Option, + pub receiver_id: AccountId, + pub method_names: Vec, +} + +#[derive(Debug, serde::Deserialize, serde::Serialize)] +pub struct DeleteKeyAction { + pub public_key: PublicKey, +} diff --git a/runtime/near-wallet-contract/implementation/wallet-contract/src/tests/caller_error.rs b/runtime/near-wallet-contract/implementation/wallet-contract/src/tests/caller_error.rs new file mode 100644 index 00000000000..333ff1db6d2 --- /dev/null +++ b/runtime/near-wallet-contract/implementation/wallet-contract/src/tests/caller_error.rs @@ -0,0 +1,105 @@ +//! A suite of tests for code paths handling error cases where the `rlp_execute` function +//! is called by an external account (as opposed to the Wallet Contract calling itself). +//! Since `rlp_execute` is public, it must be impossible for an external account to +//! cause harm to the Wallet Contract by calling this function. + +use crate::{ + error::{CallerError, Error}, + internal::MAX_YOCTO_NEAR, + tests::{ + utils::{self, codec, test_context::TestContext}, + RLP_EXECUTE, + }, + types::{Action, ExecuteResponse}, +}; +use aurora_engine_types::types::Wei; +use near_workspaces::types::NearToken; + +// If an external account is submitting a valid Ethereum transaction signed by +// the user then it is expected that this external account should cover the entire +// cost of that transaction (including any attached $NEAR). +#[tokio::test] +async fn test_insufficient_value() -> anyhow::Result<()> { + let TestContext { worker, wallet_contract, wallet_sk, .. } = TestContext::new().await?; + + let external_account = worker.dev_create_account().await?; + + // Create a transaction (NEP-141 transfer) that requires 1 yoctoNear attached. + let account_id = "aurora"; + let action = Action::FunctionCall { + receiver_id: account_id.into(), + method_name: "ft_transfer".into(), + args: r#"{"receiver_id": "some.account", "amount": "1"}"#.into(), + gas: 5, + yocto_near: 1, + }; + let signed_transaction = utils::create_signed_transaction( + 0, + &account_id.parse().unwrap(), + Wei::zero(), + action, + &wallet_sk, + ); + + let result = wallet_contract + .external_rlp_execute(&external_account, account_id, &signed_transaction) + .await?; + + assert!(!result.success); + assert_eq!( + result.error, + Some(Error::Caller(CallerError::InsufficientAttachedValue).to_string()) + ); + + // Try again with a transaction that has some attached Wei + let transfer_amount = NearToken::from_near(1).as_yoctonear(); + let action = Action::Transfer { receiver_id: account_id.into(), yocto_near: 0 }; + let signed_transaction = utils::create_signed_transaction( + 1, + &account_id.parse().unwrap(), + Wei::new_u128(transfer_amount / (MAX_YOCTO_NEAR as u128)), + action, + &wallet_sk, + ); + let result = wallet_contract + .external_rlp_execute(&external_account, account_id, &signed_transaction) + .await?; + + assert!(!result.success); + assert_eq!( + result.error, + Some(Error::Caller(CallerError::InsufficientAttachedValue).to_string()) + ); + + // It works if we attach the right amount of Near and does not + // spend any tokens from the Wallet Contract. + let initial_wallet_balance = wallet_contract.inner.as_account().view_account().await?.balance; + let action = Action::Transfer { receiver_id: external_account.id().to_string(), yocto_near: 0 }; + let signed_transaction = utils::create_signed_transaction( + 2, + external_account.id(), + Wei::new_u128(transfer_amount / (MAX_YOCTO_NEAR as u128)), + action, + &wallet_sk, + ); + let result: ExecuteResponse = external_account + .call(wallet_contract.inner.id(), RLP_EXECUTE) + .args_json(serde_json::json!({ + "target": external_account.id(), + "tx_bytes_b64": codec::encode_b64(&codec::rlp_encode(&signed_transaction)) + })) + .max_gas() + .deposit(NearToken::from_yoctonear(transfer_amount)) + .transact() + .await? + .into_result()? + .json()?; + + assert!(result.success); + + let final_wallet_balance = wallet_contract.inner.as_account().view_account().await?.balance; + + assert!(final_wallet_balance >= initial_wallet_balance); + + Ok(()) +} diff --git a/runtime/near-wallet-contract/implementation/wallet-contract/src/tests/emulation.rs b/runtime/near-wallet-contract/implementation/wallet-contract/src/tests/emulation.rs new file mode 100644 index 00000000000..4837564b252 --- /dev/null +++ b/runtime/near-wallet-contract/implementation/wallet-contract/src/tests/emulation.rs @@ -0,0 +1,145 @@ +use crate::{ + internal::{account_id_to_address, CHAIN_ID, MAX_YOCTO_NEAR}, + tests::utils::{crypto, nep141, test_context::TestContext}, +}; +use aurora_engine_types::types::{Address, Wei}; +use near_sdk::json_types::U128; +use near_workspaces::types::NearToken; + +// The Wallet Contract should understand that transactions to other Wallet +// Contract instances are base token transactions. +#[tokio::test] +async fn test_base_token_transfer() -> anyhow::Result<()> { + const TRANSFER_AMOUNT: NearToken = NearToken::from_near(2); + + let TestContext { worker, wallet_contract, wallet_sk, wallet_contract_bytes, .. } = + TestContext::new().await?; + + let (other_wallet, other_address) = + TestContext::deploy_wallet(&worker, &wallet_contract_bytes).await?; + + let initial_wallet_balance = wallet_contract.inner.as_account().view_account().await?.balance; + let initial_other_balance = other_wallet.inner.as_account().view_account().await?.balance; + + let transaction = aurora_engine_transactions::eip_2930::Transaction2930 { + nonce: 0.into(), + gas_price: 0.into(), + gas_limit: 0.into(), + to: Some(Address::new(other_address)), + value: Wei::new_u128(TRANSFER_AMOUNT.as_yoctonear() / u128::from(MAX_YOCTO_NEAR)), + data: b"A message for the recipient".to_vec(), + chain_id: CHAIN_ID, + access_list: Vec::new(), + }; + let signed_transaction = crypto::sign_transaction(transaction, &wallet_sk); + + let result = + wallet_contract.rlp_execute(other_wallet.inner.id().as_str(), &signed_transaction).await?; + + assert!(result.success); + + let final_wallet_balance = wallet_contract.inner.as_account().view_account().await?.balance; + let final_other_balance = other_wallet.inner.as_account().view_account().await?.balance; + + // Receiver balance increases + assert_eq!( + final_other_balance.as_yoctonear(), + initial_other_balance.as_yoctonear() + TRANSFER_AMOUNT.as_yoctonear() + ); + // Sender balance decreases (by a little more than the + // `TRANSFER_AMOUNT` due to gas spent to execute the transaction) + let diff = NearToken::from_yoctonear( + initial_wallet_balance.as_yoctonear() + - (final_wallet_balance.as_yoctonear() + TRANSFER_AMOUNT.as_yoctonear()), + ); + assert!(diff < NearToken::from_millinear(2)); + + Ok(()) +} + +// The Wallet Contract should understand the ERC-20 standard and map +// it to NEP-141 function calls. +#[tokio::test] +async fn test_erc20_emulation() -> anyhow::Result<()> { + const MINT_AMOUNT: NearToken = NearToken::from_near(100); + const TRANSFER_AMOUNT: NearToken = NearToken::from_near(32); + + let TestContext { + worker, + wallet_contract, + wallet_sk, + wallet_address, + wallet_contract_bytes, + .. + } = TestContext::new().await?; + + let token_contract = nep141::Nep141::deploy(&worker).await?; + token_contract.mint(wallet_contract.inner.id(), MINT_AMOUNT.as_yoctonear()).await?; + + // Check balance + let transaction = aurora_engine_transactions::eip_2930::Transaction2930 { + nonce: 0.into(), + gas_price: 0.into(), + gas_limit: 0.into(), + to: Some(Address::new(account_id_to_address( + &token_contract.contract.id().as_str().parse().unwrap(), + ))), + value: Wei::zero(), + data: [ + crate::eth_emulation::ERC20_BALANCE_OF_SELECTOR.to_vec(), + ethabi::encode(&[ethabi::Token::Address(wallet_address)]), + ] + .concat(), + chain_id: CHAIN_ID, + access_list: Vec::new(), + }; + let signed_transaction = crypto::sign_transaction(transaction, &wallet_sk); + + let result = wallet_contract + .rlp_execute(token_contract.contract.id().as_str(), &signed_transaction) + .await?; + + let balance: U128 = serde_json::from_slice(result.success_value.as_ref().unwrap())?; + assert_eq!(balance.0, token_contract.ft_balance_of(wallet_contract.inner.id()).await?); + + // Do a transfer to another account + let (other_wallet, other_address) = + TestContext::deploy_wallet(&worker, &wallet_contract_bytes).await?; + token_contract.mint(other_wallet.inner.id(), MINT_AMOUNT.as_yoctonear()).await?; + let transaction = aurora_engine_transactions::eip_2930::Transaction2930 { + nonce: 1.into(), + gas_price: 0.into(), + gas_limit: 0.into(), + to: Some(Address::new(account_id_to_address( + &token_contract.contract.id().as_str().parse().unwrap(), + ))), + value: Wei::zero(), + data: [ + crate::eth_emulation::ERC20_TRANSFER_SELECTOR.to_vec(), + ethabi::encode(&[ + ethabi::Token::Address(other_address), + ethabi::Token::Uint(TRANSFER_AMOUNT.as_yoctonear().into()), + ]), + ] + .concat(), + chain_id: CHAIN_ID, + access_list: Vec::new(), + }; + let signed_transaction = crypto::sign_transaction(transaction, &wallet_sk); + + let result = wallet_contract + .rlp_execute(token_contract.contract.id().as_str(), &signed_transaction) + .await?; + + assert!(result.success); + assert_eq!( + MINT_AMOUNT.as_yoctonear() - TRANSFER_AMOUNT.as_yoctonear(), + token_contract.ft_balance_of(wallet_contract.inner.id()).await? + ); + assert_eq!( + MINT_AMOUNT.as_yoctonear() + TRANSFER_AMOUNT.as_yoctonear(), + token_contract.ft_balance_of(other_wallet.inner.id()).await? + ); + + Ok(()) +} diff --git a/runtime/near-wallet-contract/implementation/wallet-contract/src/tests/mod.rs b/runtime/near-wallet-contract/implementation/wallet-contract/src/tests/mod.rs new file mode 100644 index 00000000000..e8d1c5cf0be --- /dev/null +++ b/runtime/near-wallet-contract/implementation/wallet-contract/src/tests/mod.rs @@ -0,0 +1,9 @@ +mod caller_error; +mod emulation; +mod relayer; +mod sanity; +mod user_error; +mod utils; + +pub const RLP_EXECUTE: &str = "rlp_execute"; +pub const GET_NONCE: &str = "get_nonce"; diff --git a/runtime/near-wallet-contract/implementation/wallet-contract/src/tests/relayer.rs b/runtime/near-wallet-contract/implementation/wallet-contract/src/tests/relayer.rs new file mode 100644 index 00000000000..10b17adaa7c --- /dev/null +++ b/runtime/near-wallet-contract/implementation/wallet-contract/src/tests/relayer.rs @@ -0,0 +1,283 @@ +use crate::{ + internal::{account_id_to_address, CHAIN_ID}, + tests::{ + utils::{ + self, codec, crypto, nep141, + test_context::{TestContext, WalletContract}, + }, + RLP_EXECUTE, + }, + types::{Action, ExecuteResponse}, +}; +use aurora_engine_types::types::{Address, Wei}; +use near_workspaces::{ + network::Sandbox, + types::{AccessKeyPermission, SecretKey}, + Contract, Worker, +}; + +// A relayer can use its own Near account to send a transaction containing data +// signed by the user which adds a FunctionCall access key to the Wallet +// Contract account. This allows the relayer to send transactions on the user's +// behalf while the user covers the gas costs. +#[tokio::test] +async fn test_register_relayer() -> anyhow::Result<()> { + let TestContext { worker, mut wallet_contract, wallet_sk, .. } = TestContext::new().await?; + + let relayer_pk = wallet_contract.register_relayer(&worker).await?; + let key = wallet_contract.inner.as_account().view_access_key(&relayer_pk).await?; + match &key.permission { + AccessKeyPermission::FunctionCall(access) => { + assert_eq!(access.allowance, None); + assert_eq!(access.receiver_id.as_str(), wallet_contract.inner.id().as_str()); + assert_eq!(&access.method_names, &[RLP_EXECUTE]); + } + _ => panic!("Unexpected full access key"), + } + + // Should be able to submit transactions using the new key + utils::deploy_and_call_hello(&worker, &wallet_contract, &wallet_sk, 1).await?; + + // If the relayer is dishonest then its key is revoked. + // In this case the relayer will try to repeat a nonce value. + let result = utils::deploy_and_call_hello(&worker, &wallet_contract, &wallet_sk, 1).await; + let error_message = format!("{:?}", result.unwrap_err()); + assert!(error_message.contains("faulty relayer")); + + assert_revoked_key(&wallet_contract.inner, &relayer_pk).await; + + Ok(()) +} + +// If the relayer sends garbage data to the Wallet Contract then it is banned. +#[tokio::test] +async fn test_relayer_invalid_tx_data() -> anyhow::Result<()> { + let TestContext { worker, mut wallet_contract, .. } = TestContext::new().await?; + + async fn new_relayer( + worker: &Worker, + wc: &mut WalletContract, + ) -> anyhow::Result { + wc.register_relayer(worker).await?; + let sk = wc.inner.as_account().secret_key().clone(); + Ok(sk) + } + + async fn rlp_execute( + relayer_key: &SecretKey, + wc: &WalletContract, + tx_bytes: &[u8], + ) -> anyhow::Result<()> { + let relayer_pk = relayer_key.public_key(); + + let result: ExecuteResponse = wc + .inner + .call(RLP_EXECUTE) + .args_json(serde_json::json!({ + "target": "some.account.near", + "tx_bytes_b64": codec::encode_b64(tx_bytes) + })) + .max_gas() + .transact() + .await? + .into_result()? + .json()?; + + assert!(!result.success); + assert_eq!(result.error.as_deref(), Some("Error: faulty relayer")); + + assert_revoked_key(&wc.inner, &relayer_pk).await; + + Ok(()) + } + + let inputs: [&[u8]; 2] = [b"random_garbage_data", &[]]; + let relayer_keys = { + // Need to generate all the relayer keys first because they are + // going to get banned as we run the different inputs in the later loop. + let mut tmp = Vec::new(); + for _ in 0..(inputs.len()) { + tmp.push(new_relayer(&worker, &mut wallet_contract).await?); + } + tmp + }; + + for (input, sk) in inputs.into_iter().zip(relayer_keys) { + wallet_contract.inner.as_account_mut().set_secret_key(sk.clone()); + rlp_execute(&sk, &wallet_contract, input).await?; + } + + Ok(()) +} + +// Tests case where relayer sends a transaction signed by the wrong account. +#[tokio::test] +async fn test_relayer_invalid_sender() -> anyhow::Result<()> { + let TestContext { worker, mut wallet_contract, wallet_contract_bytes, .. } = + TestContext::new().await?; + + let wrong_wallet_sk = TestContext::deploy_wallet(&worker, &wallet_contract_bytes).await?.0.sk; + let relayer_pk = wallet_contract.register_relayer(&worker).await?; + + let target = "aurora"; + let action = Action::Transfer { receiver_id: target.into(), yocto_near: 0 }; + // Transaction signed by wrong secret key + let signed_transaction = utils::create_signed_transaction( + 0, + &target.parse().unwrap(), + Wei::zero(), + action, + &wrong_wallet_sk, + ); + + let result = wallet_contract.rlp_execute(target, &signed_transaction).await?; + + assert!(!result.success); + assert_eq!(result.error.as_deref(), Some("Error: faulty relayer")); + + assert_revoked_key(&wallet_contract.inner, &relayer_pk).await; + + Ok(()) +} + +// Tests the case where the relayer sets the `target` to a named account which does not +// hash to the `to` field of the user's signed Ethereum transaction. +#[tokio::test] +async fn test_relayer_invalid_target() -> anyhow::Result<()> { + let TestContext { worker, mut wallet_contract, wallet_sk, .. } = TestContext::new().await?; + + let relayer_pk = wallet_contract.register_relayer(&worker).await?; + + let real_target = "aurora"; + let action = Action::Transfer { receiver_id: real_target.into(), yocto_near: 0 }; + let signed_transaction = utils::create_signed_transaction( + 0, + &real_target.parse().unwrap(), + Wei::zero(), + action, + &wallet_sk, + ); + + let result = + wallet_contract.rlp_execute(&format!("other.{real_target}"), &signed_transaction).await?; + + assert!(!result.success); + assert_eq!(result.error.as_deref(), Some("Error: faulty relayer")); + + assert_revoked_key(&wallet_contract.inner, &relayer_pk).await; + + Ok(()) +} + +// Tests the situation where the relayer sets `target == tx.to` when it should have +// looked up the named account corresponding to `tx.to`. In this case the relayer +// should be banned for being lazy. +#[tokio::test] +async fn test_relayer_invalid_address_target() -> anyhow::Result<()> { + let TestContext { + worker, + mut wallet_contract, + wallet_sk, + wallet_address, + address_registrar, + .. + } = TestContext::new().await?; + + // Deploy a NEP-141 contract and register its address. + // Registering should prevent a lazy relayer from setting the target incorrectly. + let token_contract = nep141::Nep141::deploy(&worker).await?; + let register_output: Option = address_registrar + .call("register") + .args_json(serde_json::json!({ + "account_id": token_contract.contract.id().as_str() + })) + .max_gas() + .transact() + .await? + .json()?; + let token_address: [u8; 20] = + hex::decode(register_output.as_ref().unwrap().strip_prefix("0x").unwrap())? + .try_into() + .unwrap(); + assert_eq!( + token_address, + account_id_to_address(&token_contract.contract.id().as_str().parse().unwrap(),).0 + ); + + // Set up a relayer with control to send transactions via the Wallet Contract account. + let relayer_pk = wallet_contract.register_relayer(&worker).await?; + + // The user submits a transaction to interact with the NEP-141 contract. + let transaction = aurora_engine_transactions::eip_2930::Transaction2930 { + nonce: 0.into(), + gas_price: 0.into(), + gas_limit: 0.into(), + to: Some(Address::from_array(token_address)), + value: Wei::zero(), + data: [ + crate::eth_emulation::ERC20_BALANCE_OF_SELECTOR.to_vec(), + ethabi::encode(&[ethabi::Token::Address(wallet_address)]), + ] + .concat(), + chain_id: CHAIN_ID, + access_list: Vec::new(), + }; + let signed_transaction = crypto::sign_transaction(transaction, &wallet_sk); + + // Relayer fails to set `target` correctly + let result = + wallet_contract.rlp_execute(register_output.unwrap().as_str(), &signed_transaction).await?; + + assert!(!result.success); + assert_eq!(result.error.as_deref(), Some("Error: faulty relayer")); + + assert_revoked_key(&wallet_contract.inner, &relayer_pk).await; + + Ok(()) +} + +// A relayer sending a transaction signed with the wrong chain id is a ban-worthy offense. +#[tokio::test] +async fn test_relayer_wrong_chain_id() -> anyhow::Result<()> { + let TestContext { worker, mut wallet_contract, wallet_sk, wallet_address, .. } = + TestContext::new().await?; + + let relayer_pk = wallet_contract.register_relayer(&worker).await?; + + let transaction = aurora_engine_transactions::eip_2930::Transaction2930 { + nonce: 0.into(), + gas_price: 0.into(), + gas_limit: 0.into(), + to: Some(Address::new(wallet_address)), + value: Wei::zero(), + data: [ + crate::eth_emulation::ERC20_BALANCE_OF_SELECTOR.to_vec(), + ethabi::encode(&[ethabi::Token::Address(wallet_address)]), + ] + .concat(), + chain_id: CHAIN_ID + 1, + access_list: Vec::new(), + }; + let signed_transaction = crypto::sign_transaction(transaction, &wallet_sk); + + let result = wallet_contract + .rlp_execute(wallet_contract.inner.id().as_str(), &signed_transaction) + .await?; + + assert!(!result.success); + assert_eq!(result.error.as_deref(), Some("Error: faulty relayer")); + + assert_revoked_key(&wallet_contract.inner, &relayer_pk).await; + + Ok(()) +} + +async fn assert_revoked_key( + wallet_contract: &Contract, + relayer_pk: &near_workspaces::types::PublicKey, +) { + let key_query = wallet_contract.as_account().view_access_key(relayer_pk).await; + + let error_message = format!("{:?}", key_query.unwrap_err()); + assert!(error_message.contains("UnknownAccessKey")); +} diff --git a/runtime/near-wallet-contract/implementation/wallet-contract/src/tests/res/hello.wasm b/runtime/near-wallet-contract/implementation/wallet-contract/src/tests/res/hello.wasm new file mode 100755 index 00000000000..4cba2085bf2 Binary files /dev/null and b/runtime/near-wallet-contract/implementation/wallet-contract/src/tests/res/hello.wasm differ diff --git a/runtime/near-wallet-contract/implementation/wallet-contract/src/tests/res/nep141.wasm b/runtime/near-wallet-contract/implementation/wallet-contract/src/tests/res/nep141.wasm new file mode 100755 index 00000000000..b501278c161 Binary files /dev/null and b/runtime/near-wallet-contract/implementation/wallet-contract/src/tests/res/nep141.wasm differ diff --git a/runtime/near-wallet-contract/implementation/wallet-contract/src/tests/sanity.rs b/runtime/near-wallet-contract/implementation/wallet-contract/src/tests/sanity.rs new file mode 100644 index 00000000000..37b42ab49f0 --- /dev/null +++ b/runtime/near-wallet-contract/implementation/wallet-contract/src/tests/sanity.rs @@ -0,0 +1,74 @@ +use aurora_engine_types::types::Wei; +use near_sdk::NearToken; + +use crate::{ + internal::MAX_YOCTO_NEAR, + tests::utils::{self, test_context::TestContext}, + types::Action, +}; + +// The initial nonce value for a Wallet Contract should be 0. +#[tokio::test] +async fn test_initial_nonce() -> anyhow::Result<()> { + let TestContext { wallet_contract, .. } = TestContext::new().await?; + + let nonce = wallet_contract.get_nonce().await?; + assert_eq!(nonce, 0); + + Ok(()) +} + +// The Wallet Contract should be able to call other Near smart contracts +#[tokio::test] +async fn test_function_call_action_success() -> anyhow::Result<()> { + let TestContext { worker, wallet_contract, wallet_sk, .. } = TestContext::new().await?; + + utils::deploy_and_call_hello(&worker, &wallet_contract, &wallet_sk, 0).await?; + + // After the transaction the nonce is incremented + let nonce = wallet_contract.get_nonce().await?; + assert_eq!(nonce, 1); + + Ok(()) +} + +// The Wallet Contract should be able to send $NEAR to other Near accounts. +#[tokio::test] +async fn test_base_token_transfer_success() -> anyhow::Result<()> { + let TestContext { worker, wallet_contract, wallet_sk, .. } = TestContext::new().await?; + + let transfer_amount = NearToken::from_near(2).as_yoctonear() + 1; + let receiver_account = worker.root_account().unwrap(); + + let initial_wallet_balance = + wallet_contract.inner.as_account().view_account().await.unwrap().balance; + let initial_receiver_balance = receiver_account.view_account().await.unwrap().balance; + + let receiver_id = receiver_account.id().as_str().into(); + let action = Action::Transfer { receiver_id, yocto_near: 1 }; + let value = Wei::new_u128(transfer_amount / (MAX_YOCTO_NEAR as u128)); + let signed_transaction = + utils::create_signed_transaction(0, receiver_account.id(), value, action, &wallet_sk); + + let result = + wallet_contract.rlp_execute(receiver_account.id().as_str(), &signed_transaction).await?; + assert!(result.success); + + let final_wallet_balance = + wallet_contract.inner.as_account().view_account().await.unwrap().balance; + let final_receiver_balance = receiver_account.view_account().await.unwrap().balance; + + // Check token balances + assert_eq!( + final_receiver_balance.as_yoctonear() - initial_receiver_balance.as_yoctonear(), + transfer_amount + ); + // Wallet loses a little more $NEAR than the transfer amount + // due to gas spent on the transaction. + let diff = initial_wallet_balance.as_yoctonear() + - final_wallet_balance.as_yoctonear() + - transfer_amount; + assert!(diff < NearToken::from_millinear(2).as_yoctonear()); + + Ok(()) +} diff --git a/runtime/near-wallet-contract/implementation/wallet-contract/src/tests/user_error.rs b/runtime/near-wallet-contract/implementation/wallet-contract/src/tests/user_error.rs new file mode 100644 index 00000000000..b9a11fd93be --- /dev/null +++ b/runtime/near-wallet-contract/implementation/wallet-contract/src/tests/user_error.rs @@ -0,0 +1,343 @@ +//! A suite of tests for code paths handling cases where the user signed transaction +//! data that is invalid in some way. This is as opposed to errors which arise +//! from faulty relayers. + +use crate::{ + error::{Error, UnsupportedAction, UserError}, + internal::{account_id_to_address, CHAIN_ID}, + tests::utils::{self, crypto, test_context::TestContext}, + types::{Action, FUNCTION_CALL_SELECTOR}, +}; +use aurora_engine_types::types::{Address, Wei}; +use near_workspaces::types::{KeyType, SecretKey}; + +// Transactions which would deploy an EVM contract are not allowed because +// there is no native EVM bytecode interpreter on Near. +#[tokio::test] +async fn test_evm_deploy() -> anyhow::Result<()> { + let TestContext { wallet_contract, wallet_sk, .. } = TestContext::new().await?; + + let transaction = aurora_engine_transactions::eip_2930::Transaction2930 { + nonce: 0.into(), + gas_price: 0.into(), + gas_limit: 0.into(), + to: None, + value: Wei::zero(), + data: Vec::new(), + chain_id: CHAIN_ID, + access_list: Vec::new(), + }; + let signed_transaction = crypto::sign_transaction(transaction, &wallet_sk); + + let result = wallet_contract.rlp_execute("aurora", &signed_transaction).await?; + + assert!(!result.success); + assert_eq!(result.error, Some(Error::User(UserError::EvmDeployDisallowed).to_string())); + + Ok(()) +} + +// The Near value of a transaction is equal to `tx.value * 1e6 + action.yocto_near`. +// Near values must be 128-bit numbers. Therefore `tx.value` cannot be larger than +// `u128::MAX // 1e6`. +#[tokio::test] +async fn test_value_too_large() -> anyhow::Result<()> { + let TestContext { wallet_contract, wallet_sk, .. } = TestContext::new().await?; + + let account_id = "aurora"; + let action = Action::Transfer { receiver_id: account_id.into(), yocto_near: 0 }; + let signed_transaction = utils::create_signed_transaction( + 0, + &account_id.parse().unwrap(), + Wei::new_u128(u128::MAX), + action, + &wallet_sk, + ); + + let result = wallet_contract.rlp_execute(account_id, &signed_transaction).await?; + + assert!(!result.success); + assert_eq!(result.error, Some(Error::User(UserError::ValueTooLarge).to_string())); + + Ok(()) +} + +// Test case where `AddKey`/`DeleteKey` action contains an unknown public key kind +#[tokio::test] +async fn test_unknown_public_key_kind() -> anyhow::Result<()> { + let TestContext { wallet_contract, wallet_sk, .. } = TestContext::new().await?; + + let account_id = "aurora"; + let action = Action::DeleteKey { public_key_kind: 2, public_key: b"a_new_key_type".to_vec() }; + let signed_transaction = utils::create_signed_transaction( + 0, + &account_id.parse().unwrap(), + Wei::zero(), + action, + &wallet_sk, + ); + + let result = wallet_contract.rlp_execute(account_id, &signed_transaction).await?; + + assert!(!result.success); + assert_eq!(result.error, Some(Error::User(UserError::UnknownPublicKeyKind).to_string())); + + let action = Action::AddKey { + public_key_kind: 2, + public_key: b"some_key".to_vec(), + nonce: 0, + is_full_access: false, + is_limited_allowance: false, + allowance: 0, + receiver_id: account_id.into(), + method_names: Vec::new(), + }; + let signed_transaction = utils::create_signed_transaction( + 1, + &account_id.parse().unwrap(), + Wei::zero(), + action, + &wallet_sk, + ); + + let result = wallet_contract.rlp_execute(account_id, &signed_transaction).await?; + + assert!(!result.success); + assert_eq!(result.error, Some(Error::User(UserError::UnknownPublicKeyKind).to_string())); + + Ok(()) +} + +// Test case where `AddKey`/`DeleteKey` action contains invalid public key bytes +#[tokio::test] +async fn test_invalid_public_key() -> anyhow::Result<()> { + async fn assert_invalid_pk( + ctx: &TestContext, + public_key_kind: u8, + public_key: Vec, + expected_error: UserError, + ) -> anyhow::Result<()> { + let wallet_contract = &ctx.wallet_contract; + let wallet_sk = &ctx.wallet_sk; + + let nonce = wallet_contract.get_nonce().await?; + let account_id = "aurora"; + let action = Action::DeleteKey { public_key_kind, public_key: public_key.clone() }; + let signed_transaction = utils::create_signed_transaction( + nonce, + &account_id.parse().unwrap(), + Wei::zero(), + action, + wallet_sk, + ); + + let result = wallet_contract.rlp_execute(account_id, &signed_transaction).await?; + + assert!(!result.success); + assert_eq!(result.error, Some(Error::User(expected_error.clone()).to_string())); + + let action = Action::AddKey { + public_key_kind, + public_key, + nonce: 0, + is_full_access: false, + is_limited_allowance: false, + allowance: 0, + receiver_id: account_id.into(), + method_names: Vec::new(), + }; + let signed_transaction = utils::create_signed_transaction( + nonce + 1, + &account_id.parse().unwrap(), + Wei::zero(), + action, + wallet_sk, + ); + + let result = wallet_contract.rlp_execute(account_id, &signed_transaction).await?; + + assert!(!result.success); + assert_eq!(result.error, Some(Error::User(expected_error).to_string())); + + Ok(()) + } + + let ctx = TestContext::new().await?; + + assert_invalid_pk(&ctx, 0, Vec::new(), UserError::InvalidEd25519Key).await?; + assert_invalid_pk(&ctx, 0, b"wrong_length".to_vec(), UserError::InvalidEd25519Key).await?; + + assert_invalid_pk(&ctx, 1, Vec::new(), UserError::InvalidSecp256k1Key).await?; + assert_invalid_pk(&ctx, 1, b"wrong_length".to_vec(), UserError::InvalidSecp256k1Key).await?; + + Ok(()) +} + +// Tests case where we try to add an access key with an invalid `receiver_id` +#[tokio::test] +async fn test_invalid_public_key_account_id() -> anyhow::Result<()> { + let TestContext { wallet_contract, wallet_sk, .. } = TestContext::new().await?; + + let key = SecretKey::from_random(KeyType::ED25519); + let account_id = "aurora"; + let non_account_id = "---***---"; + let action = Action::AddKey { + public_key_kind: 0, + public_key: key.public_key().key_data().to_vec(), + nonce: 0, + is_full_access: false, + is_limited_allowance: false, + allowance: 0, + receiver_id: non_account_id.into(), + method_names: Vec::new(), + }; + let signed_transaction = utils::create_signed_transaction( + 0, + &account_id.parse().unwrap(), + Wei::zero(), + action, + &wallet_sk, + ); + + let result = wallet_contract.rlp_execute(account_id, &signed_transaction).await?; + + assert!(!result.success); + assert_eq!(result.error, Some(Error::User(UserError::InvalidAccessKeyAccountId).to_string())); + + Ok(()) +} + +// User's are not allowed to add full access keys to the account. +// This would be too dangerous as it could allow for undefined behaviour +// such as deploying a different contract to an Eth implicit address. +#[tokio::test] +async fn test_cannot_add_full_access_key() -> anyhow::Result<()> { + let TestContext { wallet_contract, wallet_sk, .. } = TestContext::new().await?; + + let key = SecretKey::from_random(KeyType::ED25519); + let action = Action::AddKey { + public_key_kind: 0, + public_key: key.public_key().key_data().to_vec(), + nonce: 0, + is_full_access: true, + is_limited_allowance: false, + allowance: 0, + receiver_id: String::new(), + method_names: Vec::new(), + }; + let signed_transaction = utils::create_signed_transaction( + 0, + wallet_contract.inner.id(), + Wei::zero(), + action, + &wallet_sk, + ); + + let result = wallet_contract + .rlp_execute(wallet_contract.inner.id().as_str(), &signed_transaction) + .await?; + + assert!(!result.success); + assert_eq!( + result.error, + Some( + Error::User(UserError::UnsupportedAction(UnsupportedAction::AddFullAccessKey)) + .to_string() + ) + ); + + Ok(()) +} + +// Cases where `tx.data` cannot be parsed into a known +// Action or emulated Ethereum standard. +#[tokio::test] +async fn test_bad_data() -> anyhow::Result<()> { + let TestContext { wallet_contract, wallet_sk, .. } = TestContext::new().await?; + + let account_id = "aurora"; + let to = Address::new(account_id_to_address(&account_id.parse().unwrap())); + let transaction = aurora_engine_transactions::eip_2930::Transaction2930 { + nonce: 0.into(), + gas_price: 0.into(), + gas_limit: 0.into(), + to: Some(to), + value: Wei::zero(), + data: hex::decode("deadbeef").unwrap(), + chain_id: CHAIN_ID, + access_list: Vec::new(), + }; + let signed_transaction = crypto::sign_transaction(transaction, &wallet_sk); + + let result = wallet_contract.rlp_execute(account_id, &signed_transaction).await?; + + assert!(!result.success); + assert_eq!(result.error, Some(Error::User(UserError::UnknownFunctionSelector).to_string())); + + let transaction = aurora_engine_transactions::eip_2930::Transaction2930 { + nonce: 1.into(), + gas_price: 0.into(), + gas_limit: 0.into(), + to: Some(to), + value: Wei::zero(), + data: [ + FUNCTION_CALL_SELECTOR.to_vec(), + hex::decode("0000000000000000000000000000000000000000000000000000000000000000") + .unwrap(), + ] + .concat(), + chain_id: CHAIN_ID, + access_list: Vec::new(), + }; + let signed_transaction = crypto::sign_transaction(transaction, &wallet_sk); + + let result = wallet_contract.rlp_execute(account_id, &signed_transaction).await?; + + assert!(!result.success); + assert_eq!(result.error, Some(Error::User(UserError::InvalidAbiEncodedData).to_string())); + + let transaction = aurora_engine_transactions::eip_2930::Transaction2930 { + nonce: 2.into(), + gas_price: 0.into(), + gas_limit: 0.into(), + to: Some(to), + value: Wei::zero(), + data: Vec::new(), + chain_id: CHAIN_ID, + access_list: Vec::new(), + }; + let signed_transaction = crypto::sign_transaction(transaction, &wallet_sk); + + let result = wallet_contract.rlp_execute(account_id, &signed_transaction).await?; + + assert!(!result.success); + assert_eq!(result.error, Some(Error::User(UserError::InvalidAbiEncodedData).to_string())); + + Ok(()) +} + +// Test case where the action contains more than 1_000_000 yoctoNear directly. +#[tokio::test] +async fn test_excess_yocto() -> anyhow::Result<()> { + let TestContext { wallet_contract, wallet_sk, .. } = TestContext::new().await?; + + let account_id = "aurora"; + let action = Action::Transfer { + receiver_id: account_id.into(), + yocto_near: crate::internal::MAX_YOCTO_NEAR + 1, + }; + let signed_transaction = utils::create_signed_transaction( + 0, + &account_id.parse().unwrap(), + Wei::new_u64(1), + action, + &wallet_sk, + ); + + let result = wallet_contract.rlp_execute(account_id, &signed_transaction).await?; + + assert!(!result.success); + assert_eq!(result.error, Some(Error::User(UserError::ExcessYoctoNear).to_string())); + + Ok(()) +} diff --git a/runtime/near-wallet-contract/implementation/wallet-contract/src/tests/utils/codec.rs b/runtime/near-wallet-contract/implementation/wallet-contract/src/tests/utils/codec.rs new file mode 100644 index 00000000000..fd56b204da0 --- /dev/null +++ b/runtime/near-wallet-contract/implementation/wallet-contract/src/tests/utils/codec.rs @@ -0,0 +1,65 @@ +use crate::types::Action; +use aurora_engine_transactions::EthTransactionKind; + +pub fn abi_encode(action: Action) -> Vec { + let mut buf = Vec::new(); + match action { + Action::FunctionCall { receiver_id, method_name, args, gas, yocto_near } => { + buf.extend_from_slice(crate::types::FUNCTION_CALL_SELECTOR); + let tokens = &[ + ethabi::Token::String(receiver_id), + ethabi::Token::String(method_name), + ethabi::Token::Bytes(args), + ethabi::Token::Uint(gas.into()), + ethabi::Token::Uint(yocto_near.into()), + ]; + buf.extend_from_slice(ðabi::encode(tokens)); + } + Action::Transfer { receiver_id, yocto_near } => { + buf.extend_from_slice(crate::types::TRANSFER_SELECTOR); + let tokens = + &[ethabi::Token::String(receiver_id), ethabi::Token::Uint(yocto_near.into())]; + buf.extend_from_slice(ðabi::encode(tokens)); + } + Action::AddKey { + public_key_kind, + public_key, + nonce, + is_full_access, + is_limited_allowance, + allowance, + receiver_id, + method_names, + } => { + buf.extend_from_slice(crate::types::ADD_KEY_SELECTOR); + let tokens = &[ + ethabi::Token::Uint(public_key_kind.into()), + ethabi::Token::Bytes(public_key), + ethabi::Token::Uint(nonce.into()), + ethabi::Token::Bool(is_full_access), + ethabi::Token::Bool(is_limited_allowance), + ethabi::Token::Uint(allowance.into()), + ethabi::Token::String(receiver_id), + ethabi::Token::Array(method_names.into_iter().map(ethabi::Token::String).collect()), + ]; + buf.extend_from_slice(ðabi::encode(tokens)); + } + Action::DeleteKey { public_key_kind, public_key } => { + buf.extend_from_slice(crate::types::DELETE_KEY_SELECTOR); + let tokens = + &[ethabi::Token::Uint(public_key_kind.into()), ethabi::Token::Bytes(public_key)]; + buf.extend_from_slice(ðabi::encode(tokens)); + } + }; + buf +} + +pub fn rlp_encode(transaction: &EthTransactionKind) -> Vec { + transaction.into() +} + +pub fn encode_b64(input: &[u8]) -> String { + use base64::Engine; + let engine = base64::engine::general_purpose::STANDARD; + engine.encode(input) +} diff --git a/runtime/near-wallet-contract/implementation/wallet-contract/src/tests/utils/crypto.rs b/runtime/near-wallet-contract/implementation/wallet-contract/src/tests/utils/crypto.rs new file mode 100644 index 00000000000..602c5902b29 --- /dev/null +++ b/runtime/near-wallet-contract/implementation/wallet-contract/src/tests/utils/crypto.rs @@ -0,0 +1,25 @@ +use aurora_engine_transactions::{eip_2930::Transaction2930, EthTransactionKind}; +use aurora_engine_types::U256; +use near_crypto::{SecretKey, Signature}; + +pub fn sign_transaction(transaction: Transaction2930, sk: &SecretKey) -> EthTransactionKind { + let mut rlp_stream = rlp::RlpStream::new(); + rlp_stream.append(&aurora_engine_transactions::eip_2930::TYPE_BYTE); + transaction.rlp_append_unsigned(&mut rlp_stream); + let message_hash = crate::internal::keccak256(rlp_stream.as_raw()); + let signature = sk.sign(&message_hash); + let bytes: [u8; 65] = match signature { + Signature::SECP256K1(x) => x.into(), + _ => panic!(), + }; + let v = bytes[64]; + let r = U256::from_big_endian(&bytes[0..32]); + let s = U256::from_big_endian(&bytes[32..64]); + let signed_transaction = aurora_engine_transactions::eip_2930::SignedTransaction2930 { + transaction, + parity: v, + r, + s, + }; + EthTransactionKind::Eip2930(signed_transaction) +} diff --git a/runtime/near-wallet-contract/implementation/wallet-contract/src/tests/utils/mod.rs b/runtime/near-wallet-contract/implementation/wallet-contract/src/tests/utils/mod.rs new file mode 100644 index 00000000000..d142a7235bf --- /dev/null +++ b/runtime/near-wallet-contract/implementation/wallet-contract/src/tests/utils/mod.rs @@ -0,0 +1,63 @@ +use crate::{ + internal::{account_id_to_address, CHAIN_ID}, + tests::utils::test_context::WalletContract, + types::Action, +}; +use aurora_engine_transactions::EthTransactionKind; +use aurora_engine_types::types::{Address, Wei}; +use near_crypto::SecretKey; +use near_workspaces::{network::Sandbox, AccountId, Worker}; + +pub mod codec; +pub mod crypto; +pub mod nep141; +pub mod test_context; + +pub async fn deploy_and_call_hello( + worker: &Worker, + wallet_contract: &WalletContract, + wallet_sk: &SecretKey, + nonce: u64, +) -> anyhow::Result<()> { + let hello_bytes = tokio::fs::read("src/tests/res/hello.wasm").await?; + let hello_contract = worker.dev_deploy(&hello_bytes).await?; + + let action = Action::FunctionCall { + receiver_id: hello_contract.id().to_string(), + method_name: "greet".into(), + args: br#"{"name": "Aurora"}"#.to_vec(), + gas: 5_000_000_000_000, + yocto_near: 0, + }; + let signed_transaction = + create_signed_transaction(nonce, hello_contract.id(), Wei::zero(), action, wallet_sk); + + let result = + wallet_contract.rlp_execute(hello_contract.id().as_str(), &signed_transaction).await?; + + if result.success_value.as_deref() != Some(br#""Hello, Aurora!""#.as_slice()) { + anyhow::bail!("Call to hello contract failed: {:?}", result.error); + } + + Ok(()) +} + +pub fn create_signed_transaction( + nonce: u64, + target: &AccountId, + value: Wei, + action: Action, + wallet_sk: &SecretKey, +) -> EthTransactionKind { + let transaction = aurora_engine_transactions::eip_2930::Transaction2930 { + nonce: nonce.into(), + gas_price: 0.into(), + gas_limit: 0.into(), + to: Some(Address::new(account_id_to_address(&target.as_str().parse().unwrap()))), + value, + data: codec::abi_encode(action), + chain_id: CHAIN_ID, + access_list: Vec::new(), + }; + crypto::sign_transaction(transaction, wallet_sk) +} diff --git a/runtime/near-wallet-contract/implementation/wallet-contract/src/tests/utils/nep141.rs b/runtime/near-wallet-contract/implementation/wallet-contract/src/tests/utils/nep141.rs new file mode 100644 index 00000000000..1e100afe385 --- /dev/null +++ b/runtime/near-wallet-contract/implementation/wallet-contract/src/tests/utils/nep141.rs @@ -0,0 +1,67 @@ +use near_sdk::json_types::U128; +use near_workspaces::{network::Sandbox, types::NearToken, AccountId, Contract, Worker}; + +const STORAGE_DEPOSIT_AMOUNT: u128 = 1_250_000_000_000_000_000_000; + +pub struct Nep141 { + pub contract: Contract, +} + +impl Nep141 { + pub async fn deploy(worker: &Worker) -> anyhow::Result { + let bytes = tokio::fs::read("src/tests/res/nep141.wasm").await?; + let contract = worker.dev_deploy(&bytes).await?; + + contract + .call("new") + .args_json(serde_json::json!({ + "name": "TestToken", + "symbol": "TTT", + "decimals": 24, + })) + .max_gas() + .transact() + .await? + .into_result()?; + + Ok(Self { contract }) + } + + pub async fn mint(&self, account_id: &AccountId, amount: u128) -> anyhow::Result<()> { + self.contract + .call("storage_deposit") + .args_json(serde_json::json!({ + "account_id": account_id.as_str(), + })) + .deposit(NearToken::from_yoctonear(STORAGE_DEPOSIT_AMOUNT)) + .max_gas() + .transact() + .await? + .into_result()?; + + self.contract + .call("mint") + .args_json(serde_json::json!({ + "account_id": account_id.as_str(), + "amount": U128(amount), + })) + .max_gas() + .transact() + .await? + .into_result()?; + + Ok(()) + } + + pub async fn ft_balance_of(&self, account_id: &AccountId) -> anyhow::Result { + let result: U128 = self + .contract + .view("ft_balance_of") + .args_json(serde_json::json!({ + "account_id": account_id.as_str(), + })) + .await? + .json()?; + Ok(result.0) + } +} diff --git a/runtime/near-wallet-contract/implementation/wallet-contract/src/tests/utils/test_context.rs b/runtime/near-wallet-contract/implementation/wallet-contract/src/tests/utils/test_context.rs new file mode 100644 index 00000000000..b20ac6eaa7b --- /dev/null +++ b/runtime/near-wallet-contract/implementation/wallet-contract/src/tests/utils/test_context.rs @@ -0,0 +1,235 @@ +use crate::{ + tests::{ + utils::{self, codec}, + GET_NONCE, RLP_EXECUTE, + }, + types::{Action, ExecuteResponse}, +}; +use aurora_engine_transactions::EthTransactionKind; +use aurora_engine_types::types::Wei; +use ethabi::Address; +use near_sdk::json_types::U64; +use near_workspaces::{ + network::Sandbox, + types::{KeyType, NearToken, PublicKey, SecretKey}, + Account, Contract, Worker, +}; +use std::path::{Path, PathBuf}; +use tokio::{process::Command, sync::Mutex}; + +const BASE_DIR: &str = std::env!("CARGO_MANIFEST_DIR"); +const PACKAGE_NAME: &str = std::env!("CARGO_PKG_NAME"); +const INITIAL_BALANCE: NearToken = NearToken::from_near(20); + +// Prevents multiple tests from trying to compile the contracts at the same time. +static LOCK: Mutex<()> = Mutex::const_new(()); + +pub struct WalletContract { + pub inner: Contract, + pub sk: near_crypto::SecretKey, +} + +impl WalletContract { + pub async fn rlp_execute( + &self, + target: &str, + tx: &EthTransactionKind, + ) -> anyhow::Result { + let result: ExecuteResponse = self + .inner + .call(RLP_EXECUTE) + .args_json(serde_json::json!({ + "target": target, + "tx_bytes_b64": codec::encode_b64(&codec::rlp_encode(tx)) + })) + .max_gas() + .transact() + .await? + .into_result()? + .json()?; + + Ok(result) + } + + pub async fn external_rlp_execute( + &self, + caller: &Account, + target: &str, + tx: &EthTransactionKind, + ) -> anyhow::Result { + let result: ExecuteResponse = caller + .call(self.inner.id(), RLP_EXECUTE) + .args_json(serde_json::json!({ + "target": target, + "tx_bytes_b64": codec::encode_b64(&codec::rlp_encode(tx)) + })) + .max_gas() + .transact() + .await? + .into_result()? + .json()?; + + Ok(result) + } + + pub async fn get_nonce(&self) -> anyhow::Result { + let nonce: U64 = self.inner.view(GET_NONCE).await?.json()?; + Ok(nonce.0) + } + + /// Add a new `FunctionCall` access key to the Wallet Contract. + /// The idea is that this allows the relayer to submit transactions signed by + /// the Wallet Contract directly. + pub async fn register_relayer( + &mut self, + worker: &Worker, + ) -> anyhow::Result { + let relayer_account = worker.dev_create_account().await?; + let relayer_key = SecretKey::from_random(KeyType::ED25519); + let relayer_pk = relayer_key.public_key(); + + let action = Action::AddKey { + public_key_kind: 0, + public_key: relayer_pk.key_data().to_vec(), + nonce: 0, + is_full_access: false, + is_limited_allowance: false, + allowance: 0, + receiver_id: self.inner.id().to_string(), + method_names: vec![RLP_EXECUTE.into()], + }; + let nonce = self.get_nonce().await?; + let signed_transaction = + utils::create_signed_transaction(nonce, self.inner.id(), Wei::zero(), action, &self.sk); + + // Call the Wallet Contract from the relayer account to add the key + let result: ExecuteResponse = relayer_account + .call(self.inner.id(), RLP_EXECUTE) + .args_json(serde_json::json!({ + "target": self.inner.id(), + "tx_bytes_b64": codec::encode_b64(&codec::rlp_encode(&signed_transaction)) + })) + .max_gas() + .transact() + .await? + .into_result()? + .json()?; + + assert!(result.success, "Adding Relayer's key failed: {:?}", result.error); + + // Tell near-workspaces to use this new key instead when + // signing transactions from the Wallet Contract + self.inner.as_account_mut().set_secret_key(relayer_key); + + Ok(relayer_pk) + } +} + +pub struct TestContext { + pub worker: Worker, + pub wallet_contract: WalletContract, + pub wallet_sk: near_crypto::SecretKey, + pub wallet_address: Address, + pub address_registrar: Contract, + pub wallet_contract_bytes: Vec, +} + +impl TestContext { + pub async fn new() -> anyhow::Result { + let _guard = LOCK.lock().await; + let worker = near_workspaces::sandbox().await?; + + let registrar_id_path = address_registrar_account_id_path(BASE_DIR); + let original_registrar_id = tokio::fs::read(®istrar_id_path).await?; + let address_registrar = Self::deploy_address_registrar(&worker).await?; + let wallet_contract_bytes = build_contract(BASE_DIR, PACKAGE_NAME).await?; + // Restore address registrar account id file + tokio::fs::write(registrar_id_path, &original_registrar_id).await?; + + let (wallet_contract, wallet_address) = + Self::deploy_wallet(&worker, &wallet_contract_bytes).await?; + let wallet_sk = wallet_contract.sk.clone(); + + Ok(Self { + worker, + wallet_contract, + wallet_sk, + wallet_address, + address_registrar, + wallet_contract_bytes, + }) + } + + async fn deploy_address_registrar(worker: &Worker) -> anyhow::Result { + let base_dir = Path::new(BASE_DIR).parent().unwrap().join("address-registrar"); + let contract_bytes = build_contract(base_dir, "eth-address-registrar").await?; + let contract = worker.dev_deploy(&contract_bytes).await?; + + // Initialize the contract + contract.call("new").transact().await.unwrap().into_result().unwrap(); + + // Update the file where the Wallet Contract gets the address registrar account id from + tokio::fs::write(address_registrar_account_id_path(BASE_DIR), contract.id().as_bytes()) + .await?; + + Ok(contract) + } + + pub async fn deploy_wallet( + worker: &Worker, + contract_bytes: &[u8], + ) -> anyhow::Result<(WalletContract, Address)> { + let wallet_sk = near_crypto::SecretKey::from_random(near_crypto::KeyType::SECP256K1); + let wallet_address = { + let wallet_pk = wallet_sk.public_key(); + let hash = crate::internal::keccak256(wallet_pk.key_data()); + Address::from_slice(&hash[12..32]) + }; + let wallet_account = worker + .root_account()? + .create_subaccount(&format!("0x{}", hex::encode(wallet_address))) + .keys(SecretKey::from_random(KeyType::ED25519)) + .initial_balance(INITIAL_BALANCE) + .transact() + .await? + .result; + let wallet_contract = WalletContract { + inner: wallet_account.deploy(contract_bytes).await?.result, + sk: wallet_sk, + }; + + Ok((wallet_contract, wallet_address)) + } +} + +async fn build_contract>( + base_dir: P, + package_name: &str, +) -> anyhow::Result> { + let output = Command::new("cargo") + .env("RUSTFLAGS", "-C link-arg=-s") + .current_dir(base_dir.as_ref()) + .args(["build", "--target", "wasm32-unknown-unknown", "--release"]) + .output() + .await?; + + if !output.status.success() { + anyhow::bail!("Build failed: {}", String::from_utf8_lossy(&output.stderr)); + } + + let artifact_path = base_dir + .as_ref() + .parent() + .unwrap() + .join("target") + .join("wasm32-unknown-unknown") + .join("release") + .join([package_name.replace('-', "_").as_str(), ".wasm"].concat()); + + let bytes = tokio::fs::read(artifact_path).await?; + Ok(bytes) +} + +fn address_registrar_account_id_path(base_dir: &str) -> PathBuf { + Path::new(base_dir).join("src").join("ADDRESS_REGISTRAR_ACCOUNT_ID") +} diff --git a/runtime/near-wallet-contract/implementation/wallet-contract/src/types.rs b/runtime/near-wallet-contract/implementation/wallet-contract/src/types.rs new file mode 100644 index 00000000000..975e1304a25 --- /dev/null +++ b/runtime/near-wallet-contract/implementation/wallet-contract/src/types.rs @@ -0,0 +1,233 @@ +use crate::{ + error::{Error, UserError}, + near_action::{ + self, AccessKey, AccessKeyPermission, AddKeyAction, DeleteKeyAction, FunctionCallAction, + FunctionCallPermission, TransferAction, + }, +}; +use ethabi::{Address, ParamType}; +use near_sdk::{AccountId, Gas, NearToken, PublicKey}; +use once_cell::sync::Lazy; + +pub const FUNCTION_CALL_SELECTOR: &[u8] = &[0x61, 0x79, 0xb7, 0x07]; +pub const FUNCTION_CALL_SIGNATURE: [ParamType; 5] = [ + ParamType::String, // receiver_id + ParamType::String, // method_name + ParamType::Bytes, // args + ParamType::Uint(64), // gas + ParamType::Uint(32), // yocto_near +]; + +pub const TRANSFER_SELECTOR: &[u8] = &[0x3e, 0xd6, 0x41, 0x24]; +pub const TRANSFER_SIGNATURE: [ParamType; 2] = [ + ParamType::String, // receiver_id + ParamType::Uint(32), // yocto_near +]; + +pub const ADD_KEY_SELECTOR: &[u8] = &[0x75, 0x3c, 0xe5, 0xab]; +// This one needs to be `Lazy` because it requires `Box` (non-const) in the `Array`. +pub static ADD_KEY_SIGNATURE: Lazy<[ParamType; 8]> = Lazy::new(|| { + [ + ParamType::Uint(8), // public_key_kind + ParamType::Bytes, // public_key + ParamType::Uint(64), // nonce + ParamType::Bool, // is_full_access + ParamType::Bool, // is_limited_allowance + ParamType::Uint(128), // allowance + ParamType::String, // receiver_id + ParamType::Array(Box::new(ParamType::String)), // method_names + ] +}); + +pub const DELETE_KEY_SELECTOR: &[u8] = &[0x3f, 0xc6, 0xd4, 0x04]; +pub const DELETE_KEY_SIGNATURE: [ParamType; 2] = [ + ParamType::Uint(8), // public_key_kind + ParamType::Bytes, // public_key +]; + +/// Response given from the `rlp_execute` entry point to the contract. +/// The error information is needed because that method is not meant to panic, +/// therefore success/failure must be communicated via the return value. +/// The reason that method should never panic is to ensure the contract's state +/// can be changed even in error cases. For example, banning a dishonest relayer. +#[derive(Debug, PartialEq, Eq, Clone, serde::Serialize, serde::Deserialize)] +pub struct ExecuteResponse { + pub success: bool, + pub success_value: Option>, + pub error: Option, +} + +impl From for ExecuteResponse { + fn from(value: Error) -> Self { + Self { success: false, success_value: None, error: Some(format!("{value}")) } + } +} + +/// Struct holding environment parameters that are needed to validate transactions +/// before executing them. This struct is used in the `internal` module so that it +/// can be unit tested without mocking up the whole Near runtime. In the Wasm contract, +/// the struct is constructed via functions in `near_sdk::env`. +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct ExecutionContext { + pub current_address: Address, + pub attached_deposit: NearToken, + pub predecessor_account_id: AccountId, + pub current_account_id: AccountId, +} + +impl ExecutionContext { + pub fn new( + current_account_id: AccountId, + predecessor_account_id: AccountId, + attached_deposit: NearToken, + ) -> Result { + let current_address = crate::internal::extract_address(¤t_account_id)?; + Ok(Self { current_address, attached_deposit, predecessor_account_id, current_account_id }) + } +} + +#[must_use] +pub enum TransactionValidationOutcome { + Validated, + AddressCheckRequired(Address), +} + +/// The Near protocol actions represented in a form that is suitable for the +/// Solidity ABI. This allows them to be encoded into the `data` field of an +/// Ethereum transaction in a way that can be parsed by Ethereum tooling. +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum Action { + FunctionCall { + receiver_id: String, + method_name: String, + args: Vec, + gas: u64, + yocto_near: u32, + }, + Transfer { + receiver_id: String, + yocto_near: u32, + }, + AddKey { + public_key_kind: u8, + public_key: Vec, + nonce: u64, + is_full_access: bool, + is_limited_allowance: bool, + allowance: u128, + receiver_id: String, + method_names: Vec, + }, + DeleteKey { + public_key_kind: u8, + public_key: Vec, + }, +} + +impl Action { + pub fn value(&self) -> NearToken { + match self { + Action::FunctionCall { yocto_near, .. } => { + NearToken::from_yoctonear((*yocto_near).into()) + } + Action::Transfer { yocto_near, .. } => NearToken::from_yoctonear((*yocto_near).into()), + Action::AddKey { .. } => NearToken::from_yoctonear(0), + Action::DeleteKey { .. } => NearToken::from_yoctonear(0), + } + } + + pub fn try_into_near_action( + self, + additional_value: u128, + ) -> Result { + let action = match self { + Action::FunctionCall { receiver_id: _, method_name, args, gas, yocto_near } => { + let action = FunctionCallAction { + method_name, + args, + gas: Gas::from_gas(gas), + deposit: NearToken::from_yoctonear( + additional_value.saturating_add(yocto_near.into()), + ), + }; + near_action::Action::FunctionCall(action) + } + Action::Transfer { receiver_id: _, yocto_near } => { + let action = TransferAction { + deposit: NearToken::from_yoctonear( + additional_value.saturating_add(yocto_near.into()), + ), + }; + near_action::Action::Transfer(action) + } + Action::AddKey { + public_key_kind, + public_key, + nonce, + is_full_access, + is_limited_allowance, + allowance, + receiver_id, + method_names, + } => { + let public_key = construct_public_key(public_key_kind, &public_key)?; + let access_key = if is_full_access { + AccessKey { nonce, permission: AccessKeyPermission::FullAccess } + } else { + let allowance = if is_limited_allowance { Some(allowance) } else { None }; + AccessKey { + nonce, + permission: AccessKeyPermission::FunctionCall(FunctionCallPermission { + allowance: allowance.map(NearToken::from_yoctonear), + receiver_id: receiver_id + .parse() + .map_err(|_| Error::User(UserError::InvalidAccessKeyAccountId))?, + method_names, + }), + } + }; + let action = AddKeyAction { public_key, access_key }; + near_action::Action::AddKey(action) + } + Action::DeleteKey { public_key_kind, public_key } => { + let action = DeleteKeyAction { + public_key: construct_public_key(public_key_kind, &public_key)?, + }; + near_action::Action::DeleteKey(action) + } + }; + Ok(action) + } +} + +fn construct_public_key(public_key_kind: u8, public_key: &[u8]) -> Result { + if public_key_kind > 1 { + return Err(Error::User(UserError::UnknownPublicKeyKind)); + } + let mut bytes = Vec::with_capacity(public_key.len() + 1); + bytes.push(public_key_kind); + bytes.extend_from_slice(public_key); + bytes.try_into().map_err(|_| { + if public_key_kind == 0 { + Error::User(UserError::InvalidEd25519Key) + } else { + Error::User(UserError::InvalidSecp256k1Key) + } + }) +} + +#[test] +fn test_function_selectors() { + let function_call_signature = ethabi::short_signature("functionCall", &FUNCTION_CALL_SIGNATURE); + + let transfer_signature = ethabi::short_signature("transfer", &TRANSFER_SIGNATURE); + + let add_key_signature = ethabi::short_signature("addKey", ADD_KEY_SIGNATURE.as_ref()); + + let delete_key = ethabi::short_signature("deleteKey", &DELETE_KEY_SIGNATURE); + + assert_eq!(function_call_signature, FUNCTION_CALL_SELECTOR); // 0x6179b707 + assert_eq!(transfer_signature, TRANSFER_SELECTOR); // 0x3ed64124 + assert_eq!(add_key_signature, ADD_KEY_SELECTOR); // 0x753ce5ab + assert_eq!(delete_key, DELETE_KEY_SELECTOR); // 0x3fc6d404 +} diff --git a/runtime/near-wallet-contract/res/.gitignore b/runtime/near-wallet-contract/res/.gitignore deleted file mode 100644 index 19e1bced9ad..00000000000 --- a/runtime/near-wallet-contract/res/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.wasm diff --git a/runtime/near-wallet-contract/res/wallet_contract.wasm b/runtime/near-wallet-contract/res/wallet_contract.wasm deleted file mode 100755 index d82afe3cc67..00000000000 Binary files a/runtime/near-wallet-contract/res/wallet_contract.wasm and /dev/null differ diff --git a/runtime/near-wallet-contract/res/wallet_contract_localnet.wasm b/runtime/near-wallet-contract/res/wallet_contract_localnet.wasm new file mode 100755 index 00000000000..8b6f4f8e84e Binary files /dev/null and b/runtime/near-wallet-contract/res/wallet_contract_localnet.wasm differ diff --git a/runtime/near-wallet-contract/res/wallet_contract_mainnet.wasm b/runtime/near-wallet-contract/res/wallet_contract_mainnet.wasm new file mode 100755 index 00000000000..789b2f5c613 Binary files /dev/null and b/runtime/near-wallet-contract/res/wallet_contract_mainnet.wasm differ diff --git a/runtime/near-wallet-contract/res/wallet_contract_testnet.wasm b/runtime/near-wallet-contract/res/wallet_contract_testnet.wasm new file mode 100755 index 00000000000..3791ec1141c Binary files /dev/null and b/runtime/near-wallet-contract/res/wallet_contract_testnet.wasm differ diff --git a/runtime/near-wallet-contract/src/lib.rs b/runtime/near-wallet-contract/src/lib.rs index fa204b713f6..fd4b36e641d 100644 --- a/runtime/near-wallet-contract/src/lib.rs +++ b/runtime/near-wallet-contract/src/lib.rs @@ -1,68 +1,128 @@ #![doc = include_str!("../README.md")] +use near_primitives_core::chains; use near_vm_runner::ContractCode; use std::sync::{Arc, OnceLock}; -/// Temporary (placeholder) Wallet Contract. -pub fn wallet_contract() -> Arc { - static CONTRACT: OnceLock> = OnceLock::new(); - CONTRACT.get_or_init(|| Arc::new(read_contract())).clone() +static MAINNET: WalletContract = + WalletContract::new(include_bytes!("../res/wallet_contract_mainnet.wasm")); + +static TESTNET: WalletContract = + WalletContract::new(include_bytes!("../res/wallet_contract_testnet.wasm")); + +static LOCALNET: WalletContract = + WalletContract::new(include_bytes!("../res/wallet_contract_localnet.wasm")); + +/// Get wallet contract code for different Near chains. +pub fn wallet_contract(chain_id: &str) -> Arc { + match chain_id { + chains::MAINNET => MAINNET.read_contract(), + chains::TESTNET => TESTNET.read_contract(), + _ => LOCALNET.read_contract(), + } +} + +/// near[wallet contract hash] +pub fn wallet_contract_magic_bytes(chain_id: &str) -> Arc { + match chain_id { + chains::MAINNET => MAINNET.magic_bytes(), + chains::TESTNET => TESTNET.magic_bytes(), + _ => LOCALNET.magic_bytes(), + } +} + +struct WalletContract { + contract: OnceLock>, + magic_bytes: OnceLock>, + code: &'static [u8], } -/// Include the WASM file content directly in the binary at compile time. -fn read_contract() -> ContractCode { +impl WalletContract { #[cfg(feature = "nightly")] - let code = include_bytes!("../res/wallet_contract.wasm"); + const fn new(code: &'static [u8]) -> Self { + Self { contract: OnceLock::new(), magic_bytes: OnceLock::new(), code } + } #[cfg(not(feature = "nightly"))] - let code = &[]; + const fn new(_code: &'static [u8]) -> Self { + Self { contract: OnceLock::new(), magic_bytes: OnceLock::new(), code: &[] } + } - ContractCode::new(code.to_vec(), None) -} + fn read_contract(&self) -> Arc { + self.contract.get_or_init(|| Arc::new(ContractCode::new(self.code.to_vec(), None))).clone() + } -/// near[wallet contract hash] -pub fn wallet_contract_magic_bytes() -> Arc { - static CONTRACT: OnceLock> = OnceLock::new(); - CONTRACT - .get_or_init(|| { - let wallet_contract_hash = *wallet_contract().hash(); - let magic_bytes = format!("near{}", wallet_contract_hash); - Arc::new(ContractCode::new(magic_bytes.into(), None)) - }) - .clone() + fn magic_bytes(&self) -> Arc { + self.magic_bytes + .get_or_init(|| { + let wallet_contract = self.read_contract(); + let magic_bytes = format!("near{}", wallet_contract.hash()); + Arc::new(ContractCode::new(magic_bytes.into_bytes(), None)) + }) + .clone() + } } #[cfg(feature = "nightly")] #[cfg(test)] mod tests { use crate::{wallet_contract, wallet_contract_magic_bytes}; - use near_primitives_core::hash::CryptoHash; + use near_primitives_core::{ + chains::{MAINNET, TESTNET}, + hash::CryptoHash, + }; use std::str::FromStr; - const WALLET_CONTRACT_HASH: &'static str = "5wJJ2YaCq75kVSfx8zoZpevg1uLAn4h7nqUd2njKUEXe"; - const MAGIC_BYTES_HASH: &'static str = "31PSU4diHE4cpWju91fb2zTqn5JSDRZ6xNGM2ub8Lgdg"; + #[test] + fn check_mainnet_wallet_contract() { + const WALLET_CONTRACT_HASH: &'static str = "G1tEPtdS2ffvbqx8iMjBS4Tqg5sCZF1X22tvgnMfk91R"; + const MAGIC_BYTES_HASH: &'static str = "AsdzRwGoYZwvNsC5Eb3TJze92rLHefCxd6rmttTyCiNP"; + check_wallet_contract(MAINNET, WALLET_CONTRACT_HASH); + check_wallet_contract_magic_bytes(MAINNET, WALLET_CONTRACT_HASH, MAGIC_BYTES_HASH); + } + + #[test] + fn check_testnet_wallet_contract() { + const WALLET_CONTRACT_HASH: &'static str = "6zgiNrGnV2LfVxc7uDARPXm7qwGmJLBsJRWbYnNfvgF2"; + const MAGIC_BYTES_HASH: &'static str = "CmE5sZBRrJXqhBQEvWiH1ioPWMFE5GeK2Sofr4iedWxC"; + check_wallet_contract(TESTNET, WALLET_CONTRACT_HASH); + check_wallet_contract_magic_bytes(TESTNET, WALLET_CONTRACT_HASH, MAGIC_BYTES_HASH); + } #[test] - #[ignore] - // TODO(eth-implicit) Do not ignore when Wallet Contract build becomes reproducible, - // see https://github.com/near/nearcore/pull/10269#discussion_r1430139987. - fn check_wallet_contract() { - assert!(!wallet_contract().code().is_empty()); + fn check_localnet_wallet_contract() { + const WALLET_CONTRACT_HASH: &'static str = "AW2bzCGgMAgQeifG52N817KhGRUQVvxKygmbM2HdTxvy"; + const MAGIC_BYTES_HASH: &'static str = "EXJnccvEKmm1Fea7UTEvoUwCCKWgEiJvfjpCZ4L2vmyu"; + const LOCALNET: &str = "localnet"; + check_wallet_contract(LOCALNET, WALLET_CONTRACT_HASH); + check_wallet_contract_magic_bytes(LOCALNET, WALLET_CONTRACT_HASH, MAGIC_BYTES_HASH); + } + + fn check_wallet_contract(chain_id: &str, expected_hash: &str) { + assert!(!wallet_contract(chain_id).code().is_empty()); let expected_hash = - CryptoHash::from_str(WALLET_CONTRACT_HASH).expect("Failed to parse hash from string"); - assert_eq!(*wallet_contract().hash(), expected_hash); + CryptoHash::from_str(expected_hash).expect("Failed to parse hash from string"); + assert_eq!( + *wallet_contract(chain_id).hash(), + expected_hash, + "wallet contract hash mismatch" + ); } - #[test] - #[ignore] - // TODO(eth-implicit) Do not ignore when Wallet Contract build becomes reproducible, - // see https://github.com/near/nearcore/pull/10269#discussion_r1430139987. - fn check_wallet_contract_magic_bytes() { - assert!(!wallet_contract_magic_bytes().code().is_empty()); + fn check_wallet_contract_magic_bytes( + chain_id: &str, + expected_code_hash: &str, + expected_magic_hash: &str, + ) { + assert!(!wallet_contract_magic_bytes(chain_id).code().is_empty()); let expected_hash = - CryptoHash::from_str(MAGIC_BYTES_HASH).expect("Failed to parse hash from string"); - assert_eq!(*wallet_contract_magic_bytes().hash(), expected_hash); + CryptoHash::from_str(expected_magic_hash).expect("Failed to parse hash from string"); + assert_eq!( + *wallet_contract_magic_bytes(chain_id).hash(), + expected_hash, + "magic bytes hash mismatch" + ); - let expected_code = format!("near{}", WALLET_CONTRACT_HASH); - assert_eq!(wallet_contract_magic_bytes().code(), expected_code.as_bytes()); + let expected_code = format!("near{}", expected_code_hash); + assert_eq!(wallet_contract_magic_bytes(chain_id).code(), expected_code.as_bytes()); } } diff --git a/runtime/near-wallet-contract/wallet-contract/Cargo.lock b/runtime/near-wallet-contract/wallet-contract/Cargo.lock deleted file mode 100644 index 7184a20c2ae..00000000000 --- a/runtime/near-wallet-contract/wallet-contract/Cargo.lock +++ /dev/null @@ -1,1529 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "Inflector" -version = "0.11.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" - -[[package]] -name = "ahash" -version = "0.7.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a824f2aa7e75a0c98c5a504fceb80649e9c35265d44525b5f94de4771a395cd" -dependencies = [ - "getrandom 0.2.11", - "once_cell", - "version_check", -] - -[[package]] -name = "android-tzdata" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" - -[[package]] -name = "android_system_properties" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" -dependencies = [ - "libc", -] - -[[package]] -name = "arrayref" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" - -[[package]] -name = "arrayvec" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" - -[[package]] -name = "arrayvec" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" - -[[package]] -name = "autocfg" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" - -[[package]] -name = "base64" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7" - -[[package]] -name = "base64" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" - -[[package]] -name = "bitvec" -version = "0.20.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7774144344a4faa177370406a7ff5f1da24303817368584c6206c8303eb07848" -dependencies = [ - "funty", - "radium", - "tap", - "wyz", -] - -[[package]] -name = "blake2" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a4e37d16930f5459780f5621038b6382b9bb37c19016f39fb6b5808d831f174" -dependencies = [ - "crypto-mac", - "digest 0.9.0", - "opaque-debug", -] - -[[package]] -name = "block-buffer" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" -dependencies = [ - "generic-array", -] - -[[package]] -name = "block-buffer" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" -dependencies = [ - "generic-array", -] - -[[package]] -name = "borsh" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15bf3650200d8bffa99015595e10f1fbd17de07abbc25bb067da79e769939bfa" -dependencies = [ - "borsh-derive", - "hashbrown 0.11.2", -] - -[[package]] -name = "borsh-derive" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6441c552f230375d18e3cc377677914d2ca2b0d36e52129fe15450a2dce46775" -dependencies = [ - "borsh-derive-internal", - "borsh-schema-derive-internal", - "proc-macro-crate 0.1.5", - "proc-macro2", - "syn 1.0.109", -] - -[[package]] -name = "borsh-derive-internal" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5449c28a7b352f2d1e592a8a28bf139bc71afb0764a14f3c02500935d8c44065" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "borsh-schema-derive-internal" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdbd5696d8bfa21d53d9fe39a714a18538bad11492a42d066dbbc395fb1951c0" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "bs58" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" - -[[package]] -name = "bumpalo" -version = "3.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" - -[[package]] -name = "byte-slice-cast" -version = "1.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" - -[[package]] -name = "byteorder" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" - -[[package]] -name = "bytesize" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e368af43e418a04d52505cf3dbc23dda4e3407ae2fa99fd0e4f308ce546acc" - -[[package]] -name = "c2-chacha" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d27dae93fe7b1e0424dc57179ac396908c26b035a87234809f5c4dfd1b47dc80" -dependencies = [ - "cipher", - "ppv-lite86", -] - -[[package]] -name = "cc" -version = "1.0.83" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" -dependencies = [ - "libc", -] - -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" - -[[package]] -name = "cfg-if" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" - -[[package]] -name = "chrono" -version = "0.4.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f2c685bad3eb3d45a01354cedb7d5faa66194d1d58ba6e267a8de788f79db38" -dependencies = [ - "android-tzdata", - "iana-time-zone", - "js-sys", - "num-traits", - "serde", - "wasm-bindgen", - "windows-targets", -] - -[[package]] -name = "cipher" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12f8e7987cbd042a63249497f41aed09f8e65add917ea6566effbc56578d6801" -dependencies = [ - "generic-array", -] - -[[package]] -name = "convert_case" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" - -[[package]] -name = "core-foundation-sys" -version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" - -[[package]] -name = "cpufeatures" -version = "0.2.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce420fe07aecd3e67c5f910618fe65e94158f6dcc0adf44e00d69ce2bdfe0fd0" -dependencies = [ - "libc", -] - -[[package]] -name = "crunchy" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" - -[[package]] -name = "crypto-common" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" -dependencies = [ - "generic-array", - "typenum", -] - -[[package]] -name = "crypto-mac" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" -dependencies = [ - "generic-array", - "subtle", -] - -[[package]] -name = "curve25519-dalek" -version = "3.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90f9d052967f590a76e62eb387bd0bbb1b000182c3cefe5364db6b7211651bc0" -dependencies = [ - "byteorder", - "digest 0.9.0", - "rand_core 0.5.1", - "subtle", - "zeroize", -] - -[[package]] -name = "derive_more" -version = "0.99.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" -dependencies = [ - "convert_case", - "proc-macro2", - "quote", - "rustc_version", - "syn 1.0.109", -] - -[[package]] -name = "digest" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" -dependencies = [ - "generic-array", -] - -[[package]] -name = "digest" -version = "0.10.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" -dependencies = [ - "block-buffer 0.10.4", - "crypto-common", -] - -[[package]] -name = "dyn-clone" -version = "1.0.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "545b22097d44f8a9581187cdf93de7a71e4722bf51200cfaba810865b49a495d" - -[[package]] -name = "easy-ext" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53aff6fdc1b181225acdcb5b14c47106726fd8e486707315b1b138baed68ee31" - -[[package]] -name = "ed25519" -version = "1.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91cff35c70bba8a626e3185d8cd48cc11b5437e1a5bcd15b9b5fa3c64b6dfee7" -dependencies = [ - "signature", -] - -[[package]] -name = "ed25519-dalek" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" -dependencies = [ - "curve25519-dalek", - "ed25519", - "rand 0.7.3", - "serde", - "sha2 0.9.9", - "zeroize", -] - -[[package]] -name = "equivalent" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" - -[[package]] -name = "fixed-hash" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c" -dependencies = [ - "byteorder", - "rand 0.8.5", - "rustc-hex", - "static_assertions", -] - -[[package]] -name = "funty" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" - -[[package]] -name = "generic-array" -version = "0.14.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" -dependencies = [ - "typenum", - "version_check", -] - -[[package]] -name = "getrandom" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "wasi 0.9.0+wasi-snapshot-preview1", -] - -[[package]] -name = "getrandom" -version = "0.2.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe9006bed769170c11f845cf00c7c1e9092aeb3f268e007c3e760ac68008070f" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "wasi 0.11.0+wasi-snapshot-preview1", -] - -[[package]] -name = "hashbrown" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" -dependencies = [ - "ahash", -] - -[[package]] -name = "hashbrown" -version = "0.14.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" - -[[package]] -name = "heck" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" - -[[package]] -name = "hex" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" - -[[package]] -name = "iana-time-zone" -version = "0.1.58" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8326b86b6cff230b97d0d312a6c40a60726df3332e721f72a1b035f451663b20" -dependencies = [ - "android_system_properties", - "core-foundation-sys", - "iana-time-zone-haiku", - "js-sys", - "wasm-bindgen", - "windows-core", -] - -[[package]] -name = "iana-time-zone-haiku" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" -dependencies = [ - "cc", -] - -[[package]] -name = "impl-codec" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "161ebdfec3c8e3b52bf61c4f3550a1eea4f9579d10dc1b936f3171ebdcd6c443" -dependencies = [ - "parity-scale-codec", -] - -[[package]] -name = "impl-trait-for-tuples" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "indexmap" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f" -dependencies = [ - "equivalent", - "hashbrown 0.14.3", -] - -[[package]] -name = "itoa" -version = "1.0.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" - -[[package]] -name = "js-sys" -version = "0.3.66" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cee9c64da59eae3b50095c18d3e74f8b73c0b86d2792824ff01bbce68ba229ca" -dependencies = [ - "wasm-bindgen", -] - -[[package]] -name = "keccak" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f6d5ed8676d904364de097082f4e7d240b571b67989ced0240f08b7f966f940" -dependencies = [ - "cpufeatures", -] - -[[package]] -name = "lazy_static" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" -dependencies = [ - "spin", -] - -[[package]] -name = "libc" -version = "0.2.150" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89d92a4743f9a61002fae18374ed11e7973f530cb3a3255fb354818118b2203c" - -[[package]] -name = "log" -version = "0.4.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" - -[[package]] -name = "memchr" -version = "2.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" - -[[package]] -name = "memory_units" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8452105ba047068f40ff7093dd1d9da90898e63dd61736462e9cdda6a90ad3c3" - -[[package]] -name = "near-abi" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "885db39b08518fa700b73fa2214e8adbbfba316ba82dd510f50519173eadaf73" -dependencies = [ - "borsh", - "schemars", - "semver", - "serde", -] - -[[package]] -name = "near-account-id" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71d258582a1878e6db67400b0504a5099db85718d22c2e07f747fe1706ae7150" -dependencies = [ - "borsh", - "serde", -] - -[[package]] -name = "near-crypto" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e75673d69fd7365508f3d32483669fe45b03bfb34e4d9363e90adae9dfb416c" -dependencies = [ - "arrayref", - "blake2", - "borsh", - "bs58", - "c2-chacha", - "curve25519-dalek", - "derive_more", - "ed25519-dalek", - "near-account-id", - "once_cell", - "parity-secp256k1", - "primitive-types", - "rand 0.7.3", - "rand_core 0.5.1", - "serde", - "serde_json", - "subtle", - "thiserror", -] - -[[package]] -name = "near-primitives" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ad1a9a1640539c81f065425c31bffcfbf6b31ef1aeaade59ce905f5df6ac860" -dependencies = [ - "borsh", - "byteorder", - "bytesize", - "chrono", - "derive_more", - "easy-ext", - "hex", - "near-crypto", - "near-primitives-core", - "near-rpc-error-macro", - "near-vm-errors", - "num-rational", - "once_cell", - "primitive-types", - "rand 0.7.3", - "reed-solomon-erasure", - "serde", - "serde_json", - "smart-default", - "strum", - "thiserror", -] - -[[package]] -name = "near-primitives-core" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91d508f0fc340f6461e4e256417685720d3c4c00bb5a939b105160e49137caba" -dependencies = [ - "base64 0.11.0", - "borsh", - "bs58", - "derive_more", - "near-account-id", - "num-rational", - "serde", - "sha2 0.10.8", - "strum", -] - -[[package]] -name = "near-rpc-error-core" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93ee0b41c75ef859c193a8ff1dadfa0c8207bc0ac447cc22259721ad769a1408" -dependencies = [ - "quote", - "serde", - "syn 1.0.109", -] - -[[package]] -name = "near-rpc-error-macro" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e837bd4bacd807073ec5ceb85708da7f721b46a4c2a978de86027fb0034ce31" -dependencies = [ - "near-rpc-error-core", - "serde", - "syn 1.0.109", -] - -[[package]] -name = "near-sdk" -version = "4.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15eb3de2defe3626260cc209a6cdb985c6b27b0bd4619fad97dcfae002c3c5bd" -dependencies = [ - "base64 0.13.1", - "borsh", - "bs58", - "near-abi", - "near-crypto", - "near-primitives", - "near-primitives-core", - "near-sdk-macros", - "near-sys", - "near-vm-logic", - "once_cell", - "schemars", - "serde", - "serde_json", - "wee_alloc", -] - -[[package]] -name = "near-sdk-macros" -version = "4.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4907affc9f5ed559456509188ff0024f1f2099c0830e6bdb66eb61d5b75912c0" -dependencies = [ - "Inflector", - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "near-sys" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "397688591acf8d3ebf2c2485ba32d4b24fc10aad5334e3ad8ec0b7179bfdf06b" - -[[package]] -name = "near-vm-errors" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0da466a30f0446639cbd788c30865086fac3e8dcb07a79e51d2b0775ed4261e" -dependencies = [ - "borsh", - "near-account-id", - "near-rpc-error-macro", - "serde", -] - -[[package]] -name = "near-vm-logic" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81b534828419bacbf1f7b11ef7b00420f248c548c485d3f0cfda8bb6931152f2" -dependencies = [ - "base64 0.13.1", - "borsh", - "bs58", - "byteorder", - "near-account-id", - "near-crypto", - "near-primitives", - "near-primitives-core", - "near-vm-errors", - "ripemd", - "serde", - "sha2 0.10.8", - "sha3", - "zeropool-bn", -] - -[[package]] -name = "num-bigint" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f6f7833f2cbf2360a6cfd58cd41a53aa7a90bd4c202f5b1c7dd2ed73c57b2c3" -dependencies = [ - "autocfg", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-integer" -version = "0.1.45" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" -dependencies = [ - "autocfg", - "num-traits", -] - -[[package]] -name = "num-rational" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12ac428b1cb17fce6f731001d307d351ec70a6d202fc2e60f7d4c5e42d8f4f07" -dependencies = [ - "autocfg", - "num-bigint", - "num-integer", - "num-traits", - "serde", -] - -[[package]] -name = "num-traits" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" -dependencies = [ - "autocfg", -] - -[[package]] -name = "once_cell" -version = "1.19.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" - -[[package]] -name = "opaque-debug" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" - -[[package]] -name = "parity-scale-codec" -version = "2.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "373b1a4c1338d9cd3d1fa53b3a11bdab5ab6bd80a20f7f7becd76953ae2be909" -dependencies = [ - "arrayvec 0.7.4", - "bitvec", - "byte-slice-cast", - "impl-trait-for-tuples", - "parity-scale-codec-derive", - "serde", -] - -[[package]] -name = "parity-scale-codec-derive" -version = "2.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1557010476e0595c9b568d16dcfb81b93cdeb157612726f5170d31aa707bed27" -dependencies = [ - "proc-macro-crate 1.3.1", - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "parity-secp256k1" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fca4f82fccae37e8bbdaeb949a4a218a1bbc485d11598f193d2a908042e5fc1" -dependencies = [ - "arrayvec 0.5.2", - "cc", - "cfg-if 0.1.10", - "rand 0.7.3", -] - -[[package]] -name = "ppv-lite86" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" - -[[package]] -name = "primitive-types" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05e4722c697a58a99d5d06a08c30821d7c082a4632198de1eaa5a6c22ef42373" -dependencies = [ - "fixed-hash", - "impl-codec", - "uint", -] - -[[package]] -name = "proc-macro-crate" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" -dependencies = [ - "toml", -] - -[[package]] -name = "proc-macro-crate" -version = "1.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" -dependencies = [ - "once_cell", - "toml_edit", -] - -[[package]] -name = "proc-macro2" -version = "1.0.70" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39278fbbf5fb4f646ce651690877f89d1c5811a3d4acb27700c1cb3cdb78fd3b" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "quote" -version = "1.0.33" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "radium" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "643f8f41a8ebc4c5dc4515c82bb8abd397b527fc20fd681b7c011c2aee5d44fb" - -[[package]] -name = "rand" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" -dependencies = [ - "getrandom 0.1.16", - "libc", - "rand_chacha 0.2.2", - "rand_core 0.5.1", - "rand_hc", -] - -[[package]] -name = "rand" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" -dependencies = [ - "libc", - "rand_chacha 0.3.1", - "rand_core 0.6.4", -] - -[[package]] -name = "rand_chacha" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" -dependencies = [ - "ppv-lite86", - "rand_core 0.5.1", -] - -[[package]] -name = "rand_chacha" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" -dependencies = [ - "ppv-lite86", - "rand_core 0.6.4", -] - -[[package]] -name = "rand_core" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" -dependencies = [ - "getrandom 0.1.16", -] - -[[package]] -name = "rand_core" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" -dependencies = [ - "getrandom 0.2.11", -] - -[[package]] -name = "rand_hc" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" -dependencies = [ - "rand_core 0.5.1", -] - -[[package]] -name = "reed-solomon-erasure" -version = "4.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a415a013dd7c5d4221382329a5a3482566da675737494935cbbbcdec04662f9d" -dependencies = [ - "smallvec", -] - -[[package]] -name = "ripemd" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd124222d17ad93a644ed9d011a40f4fb64aa54275c08cc216524a9ea82fb09f" -dependencies = [ - "digest 0.10.7", -] - -[[package]] -name = "rlp" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1190dcc8c3a512f1eef5d09bb8c84c7f39e1054e174d1795482e18f5272f2e73" -dependencies = [ - "rustc-hex", -] - -[[package]] -name = "rustc-hex" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" - -[[package]] -name = "rustc_version" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" -dependencies = [ - "semver", -] - -[[package]] -name = "rustversion" -version = "1.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" - -[[package]] -name = "ryu" -version = "1.0.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" - -[[package]] -name = "schemars" -version = "0.8.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45a28f4c49489add4ce10783f7911893516f15afe45d015608d41faca6bc4d29" -dependencies = [ - "dyn-clone", - "schemars_derive", - "serde", - "serde_json", -] - -[[package]] -name = "schemars_derive" -version = "0.8.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c767fd6fa65d9ccf9cf026122c1b555f2ef9a4f0cea69da4d7dbc3e258d30967" -dependencies = [ - "proc-macro2", - "quote", - "serde_derive_internals", - "syn 1.0.109", -] - -[[package]] -name = "semver" -version = "1.0.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "836fa6a3e1e547f9a2c4040802ec865b5d85f4014efe00555d7090a3dcaa1090" - -[[package]] -name = "serde" -version = "1.0.193" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25dd9975e68d0cb5aa1120c288333fc98731bd1dd12f561e468ea4728c042b89" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde_derive" -version = "1.0.193" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.39", -] - -[[package]] -name = "serde_derive_internals" -version = "0.26.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85bf8229e7920a9f636479437026331ce11aa132b4dde37d121944a44d6e5f3c" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "serde_json" -version = "1.0.108" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d1c7e3eac408d115102c4c24ad393e0821bb3a5df4d506a80f85f7a742a526b" -dependencies = [ - "itoa", - "ryu", - "serde", -] - -[[package]] -name = "sha2" -version = "0.9.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" -dependencies = [ - "block-buffer 0.9.0", - "cfg-if 1.0.0", - "cpufeatures", - "digest 0.9.0", - "opaque-debug", -] - -[[package]] -name = "sha2" -version = "0.10.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" -dependencies = [ - "cfg-if 1.0.0", - "cpufeatures", - "digest 0.10.7", -] - -[[package]] -name = "sha3" -version = "0.10.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" -dependencies = [ - "digest 0.10.7", - "keccak", -] - -[[package]] -name = "signature" -version = "1.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" - -[[package]] -name = "smallvec" -version = "1.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dccd0940a2dcdf68d092b8cbab7dc0ad8fa938bf95787e1b916b0e3d0e8e970" - -[[package]] -name = "smart-default" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "133659a15339456eeeb07572eb02a91c91e9815e9cbc89566944d2c8d3efdbf6" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "spin" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" - -[[package]] -name = "static_assertions" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" - -[[package]] -name = "strum" -version = "0.24.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "063e6045c0e62079840579a7e47a355ae92f60eb74daaf156fb1e84ba164e63f" -dependencies = [ - "strum_macros", -] - -[[package]] -name = "strum_macros" -version = "0.24.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" -dependencies = [ - "heck", - "proc-macro2", - "quote", - "rustversion", - "syn 1.0.109", -] - -[[package]] -name = "subtle" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" - -[[package]] -name = "syn" -version = "1.0.109" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "syn" -version = "2.0.39" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23e78b90f2fcf45d3e842032ce32e3f2d1545ba6636271dcbf24fa306d87be7a" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "tap" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" - -[[package]] -name = "thiserror" -version = "1.0.50" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2" -dependencies = [ - "thiserror-impl", -] - -[[package]] -name = "thiserror-impl" -version = "1.0.50" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.39", -] - -[[package]] -name = "toml" -version = "0.5.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" -dependencies = [ - "serde", -] - -[[package]] -name = "toml_datetime" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" - -[[package]] -name = "toml_edit" -version = "0.19.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" -dependencies = [ - "indexmap", - "toml_datetime", - "winnow", -] - -[[package]] -name = "typenum" -version = "1.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" - -[[package]] -name = "uint" -version = "0.9.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76f64bba2c53b04fcab63c01a7d7427eadc821e3bc48c34dc9ba29c501164b52" -dependencies = [ - "byteorder", - "crunchy", - "hex", - "static_assertions", -] - -[[package]] -name = "unicode-ident" -version = "1.0.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" - -[[package]] -name = "version_check" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" - -[[package]] -name = "wallet-contract" -version = "0.1.0" -dependencies = [ - "hex", - "near-sdk", - "rlp", - "serde_json", -] - -[[package]] -name = "wasi" -version = "0.9.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" - -[[package]] -name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" - -[[package]] -name = "wasm-bindgen" -version = "0.2.89" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ed0d4f68a3015cc185aff4db9506a015f4b96f95303897bfa23f846db54064e" -dependencies = [ - "cfg-if 1.0.0", - "wasm-bindgen-macro", -] - -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.89" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b56f625e64f3a1084ded111c4d5f477df9f8c92df113852fa5a374dbda78826" -dependencies = [ - "bumpalo", - "log", - "once_cell", - "proc-macro2", - "quote", - "syn 2.0.39", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-macro" -version = "0.2.89" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0162dbf37223cd2afce98f3d0785506dcb8d266223983e4b5b525859e6e182b2" -dependencies = [ - "quote", - "wasm-bindgen-macro-support", -] - -[[package]] -name = "wasm-bindgen-macro-support" -version = "0.2.89" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0eb82fcb7930ae6219a7ecfd55b217f5f0893484b7a13022ebb2b2bf20b5283" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.39", - "wasm-bindgen-backend", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-shared" -version = "0.2.89" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ab9b36309365056cd639da3134bf87fa8f3d86008abf99e612384a6eecd459f" - -[[package]] -name = "wee_alloc" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbb3b5a6b2bb17cb6ad44a2e68a43e8d2722c997da10e928665c72ec6c0a0b8e" -dependencies = [ - "cfg-if 0.1.10", - "libc", - "memory_units", - "winapi", -] - -[[package]] -name = "winapi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" - -[[package]] -name = "windows-core" -version = "0.51.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1f8cf84f35d2db49a46868f947758c7a1138116f7fac3bc844f43ade1292e64" -dependencies = [ - "windows-targets", -] - -[[package]] -name = "windows-targets" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" -dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", -] - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" - -[[package]] -name = "windows_i686_gnu" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" - -[[package]] -name = "windows_i686_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" - -[[package]] -name = "winnow" -version = "0.5.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b67b5f0a4e7a27a64c651977932b9dc5667ca7fc31ac44b03ed37a0cf42fdfff" -dependencies = [ - "memchr", -] - -[[package]] -name = "wyz" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" - -[[package]] -name = "zeroize" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4756f7db3f7b5574938c3eb1c117038b8e07f95ee6718c0efad4ac21508f1efd" -dependencies = [ - "zeroize_derive", -] - -[[package]] -name = "zeroize_derive" -version = "1.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.39", -] - -[[package]] -name = "zeropool-bn" -version = "0.5.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71e61de68ede9ffdd69c01664f65a178c5188b73f78faa21f0936016a888ff7c" -dependencies = [ - "borsh", - "byteorder", - "crunchy", - "lazy_static", - "rand 0.8.5", - "rustc-hex", -] diff --git a/runtime/near-wallet-contract/wallet-contract/Cargo.toml b/runtime/near-wallet-contract/wallet-contract/Cargo.toml deleted file mode 100644 index a7c0259d244..00000000000 --- a/runtime/near-wallet-contract/wallet-contract/Cargo.toml +++ /dev/null @@ -1,30 +0,0 @@ -[package] -name = "wallet-contract" -version = "0.1.0" -publish = false -edition = "2021" - -[lib] -crate-type = ["cdylib"] - -[dependencies] -hex = "0.4.2" -serde_json = "1.0.68" -near-sdk = "4.1.1" -rlp = "0.4.6" - -[profile.release] -codegen-units = 1 -# Tell `rustc` to optimize for small code size. -opt-level = "z" -strip = true -lto = true -debug = false -panic = "abort" -rpath = false -debug-assertions = false -incremental = false -overflow-checks = true - -[workspace] -members = [] diff --git a/runtime/near-wallet-contract/wallet-contract/src/lib.rs b/runtime/near-wallet-contract/wallet-contract/src/lib.rs deleted file mode 100644 index 193e0942cc9..00000000000 --- a/runtime/near-wallet-contract/wallet-contract/src/lib.rs +++ /dev/null @@ -1,53 +0,0 @@ -//! Temporary implementation of the Wallet Contract. -//! See https://github.com/near/NEPs/issues/518. -//! Must not use in production! -// TODO(eth-implicit) Change to a real Wallet Contract implementation. - -use hex; -use near_sdk::borsh::{self, BorshDeserialize, BorshSerialize}; -use near_sdk::{env, near_bindgen, AccountId, Promise}; -use rlp::Rlp; - -#[near_bindgen] -#[derive(Default, BorshDeserialize, BorshSerialize)] -pub struct WalletContract {} - -#[near_bindgen] -impl WalletContract { - /// For the sake of this placeholder implementation, we assume simplified version of the `rlp_transaction` - /// that only has 3 values: `To`, `Value`, and `PublicKey`. We assume this is a transfer transaction. - /// The real implementation would obtain the public key from `Signature`. - pub fn execute_rlp(&self, target: AccountId, rlp_transaction: Vec) { - let rlp = Rlp::new(&rlp_transaction); - - let to: String = match rlp.val_at(0) { - Ok(to) => to, - _ => env::panic_str("Missing `to` field in RLP-encoded transaction."), - }; - if target.to_string() != to { - env::panic_str("`target` not equal to transaction's `To` address."); - } - - let value_bytes: Vec = match rlp.val_at(1) { - Ok(value_bytes) => value_bytes, - _ => env::panic_str("Missing `value` field in RLP-encoded transaction."), - }; - let value = u128::from_be_bytes( - value_bytes.try_into().expect("Incorrect `value` field in RLP-encoded transaction."), - ); - - let signer_public_key_bytes: Vec = match rlp.val_at(2) { - Ok(signer_public_key_bytes) => signer_public_key_bytes, - _ => env::panic_str("Signature extraction failed for RLP-encoded transaction."), - }; - - let hash = env::keccak256(&signer_public_key_bytes); - let signer_address = format!("0x{}", hex::encode(&hash[12..32])); - - if signer_address != env::current_account_id().to_string() { - env::panic_str("Public key does not match the Wallet Contract address."); - } - - Promise::new(target).transfer(value); - } -} diff --git a/runtime/runtime-params-estimator/src/cost.rs b/runtime/runtime-params-estimator/src/cost.rs index f1f73f71414..a27b5efdce1 100644 --- a/runtime/runtime-params-estimator/src/cost.rs +++ b/runtime/runtime-params-estimator/src/cost.rs @@ -5,6 +5,7 @@ use std::str::FromStr; /// /// TODO: Deduplicate this enum with `ExtCosts` and `ActionCosts`. #[derive(Copy, Clone, PartialEq, Eq, Debug, PartialOrd, Ord, clap::ValueEnum)] +#[clap(rename_all = "PascalCase")] #[repr(u8)] pub enum Cost { // Every set of actions in a transaction needs to be transformed into a diff --git a/runtime/runtime/src/actions.rs b/runtime/runtime/src/actions.rs index 8e730c1db1e..5f25f6af87c 100644 --- a/runtime/runtime/src/actions.rs +++ b/runtime/runtime/src/actions.rs @@ -57,8 +57,9 @@ fn get_contract_code( if checked_feature!("stable", EthImplicitAccounts, protocol_version) && account_id.get_account_type() == AccountType::EthImplicitAccount { - assert!(code_hash == *wallet_contract_magic_bytes().hash()); - return Ok(Some(wallet_contract())); + let chain_id = runtime_ext.chain_id(); + assert!(&code_hash == wallet_contract_magic_bytes(&chain_id).hash()); + return Ok(Some(wallet_contract(&chain_id))); } runtime_ext.get_code(code_hash).map(|option| option.map(Arc::new)) } @@ -561,6 +562,7 @@ pub(crate) fn action_implicit_account_creation_transfer( block_height: BlockHeight, current_protocol_version: ProtocolVersion, nonrefundable_storage_transfer: bool, + epoch_info_provider: &dyn EpochInfoProvider, ) { *actor_id = account_id.clone(); @@ -607,10 +609,12 @@ pub(crate) fn action_implicit_account_creation_transfer( // It holds because in the only calling site, we've checked the permissions before. AccountType::EthImplicitAccount => { if checked_feature!("stable", EthImplicitAccounts, current_protocol_version) { + let chain_id = epoch_info_provider.chain_id(); + // We deploy "near[wallet contract hash]" magic bytes as the contract code, // to mark that this is a neard-defined contract. It will not be used on a function call. // Instead, neard-defined Wallet Contract implementation will be used. - let magic_bytes = wallet_contract_magic_bytes(); + let magic_bytes = wallet_contract_magic_bytes(&chain_id); let storage_usage = fee_config.storage_usage_config.num_bytes_account + magic_bytes.code().len() as u64 @@ -630,7 +634,7 @@ pub(crate) fn action_implicit_account_creation_transfer( // Note this contract is shared among ETH-implicit accounts and `precompile_contract` // is a no-op if the contract was already compiled. precompile_contract( - &wallet_contract(), + &wallet_contract(&chain_id), &apply_state.config.wasm_config, apply_state.cache.as_deref(), ) diff --git a/runtime/runtime/src/config.rs b/runtime/runtime/src/config.rs index aa4e976e1bd..c5681c8481f 100644 --- a/runtime/runtime/src/config.rs +++ b/runtime/runtime/src/config.rs @@ -2,6 +2,8 @@ use near_primitives::account::AccessKeyPermission; use near_primitives::errors::IntegerOverflowError; +use near_primitives::version::FIXED_MINIMUM_NEW_RECEIPT_GAS_VERSION; +use near_primitives_core::types::ProtocolVersion; use num_bigint::BigUint; use num_traits::cast::ToPrimitive; use num_traits::pow::Pow; @@ -248,6 +250,7 @@ pub fn tx_cost( transaction: &Transaction, gas_price: Balance, sender_is_receiver: bool, + protocol_version: ProtocolVersion, ) -> Result { let fees = &config.fees; let mut gas_burnt: Gas = fees.fee(ActionCosts::new_action_receipt).send_fee(sender_is_receiver); @@ -267,7 +270,15 @@ pub fn tx_cost( // If signer is equals to receiver the receipt will be processed at the same block as this // transaction. Otherwise it will processed in the next block and the gas might be inflated. let initial_receipt_hop = if transaction.signer_id == transaction.receiver_id { 0 } else { 1 }; - let minimum_new_receipt_gas = fees.min_receipt_with_function_call_gas(); + let minimum_new_receipt_gas = if protocol_version < FIXED_MINIMUM_NEW_RECEIPT_GAS_VERSION { + fees.min_receipt_with_function_call_gas() + } else { + // The pessimistic gas pricing is a best-effort limit which can be breached in case of + // congestion when receipts are delayed before they execute. Hence there is not much + // value to tie this limit to the function call base cost. Making it constant limits + // overcharging to 6x, which was the value before the cost increase. + 4_855_842_000_000 // 4.855TGas. + }; // In case the config is free, we don't care about the maximum depth. let receipt_gas_price = if gas_price == 0 { 0 diff --git a/runtime/runtime/src/ext.rs b/runtime/runtime/src/ext.rs index 94efdabb4af..a8b30c70824 100644 --- a/runtime/runtime/src/ext.rs +++ b/runtime/runtime/src/ext.rs @@ -102,6 +102,10 @@ impl<'a> RuntimeExt<'a> { pub fn protocol_version(&self) -> ProtocolVersion { self.current_protocol_version } + + pub fn chain_id(&self) -> String { + self.epoch_info_provider.chain_id() + } } fn wrap_storage_error(error: StorageError) -> VMLogicError { diff --git a/runtime/runtime/src/lib.rs b/runtime/runtime/src/lib.rs index 5801211b84c..13c4cbbeb03 100644 --- a/runtime/runtime/src/lib.rs +++ b/runtime/runtime/src/lib.rs @@ -157,6 +157,7 @@ pub struct ApplyResult { pub state_changes: Vec, pub stats: ApplyStats, pub processed_delayed_receipts: Vec, + pub processed_yield_timeouts: Vec, pub proof: Option, pub delayed_receipts_count: u64, pub metrics: Option, @@ -432,6 +433,7 @@ impl Runtime { state_update, apply_state, actor_id, + epoch_info_provider, )?; } #[cfg(feature = "protocol_feature_nonrefundable_transfer_nep491")] @@ -448,6 +450,7 @@ impl Runtime { state_update, apply_state, actor_id, + epoch_info_provider, )?; } Action::Stake(stake) => { @@ -1364,6 +1367,7 @@ impl Runtime { state_changes, stats, processed_delayed_receipts: vec![], + processed_yield_timeouts: vec![], proof, delayed_receipts_count: delayed_receipts_indices.len(), metrics: None, @@ -1422,6 +1426,9 @@ impl Runtime { ) .entered(); let node_counter_before = state_update.trie().get_trie_nodes_count(); + let recorded_storage_size_before = state_update.trie().recorded_storage_size(); + let storage_proof_size_upper_bound_before = + state_update.trie().recorded_storage_size_upper_bound(); let result = self.process_receipt( state_update, apply_state, @@ -1434,6 +1441,26 @@ impl Runtime { let node_counter_after = state_update.trie().get_trie_nodes_count(); tracing::trace!(target: "runtime", ?node_counter_before, ?node_counter_after); + let recorded_storage_diff = state_update + .trie() + .recorded_storage_size() + .saturating_sub(recorded_storage_size_before) + as f64; + let recorded_storage_upper_bound_diff = state_update + .trie() + .recorded_storage_size_upper_bound() + .saturating_sub(storage_proof_size_upper_bound_before) + as f64; + metrics::RECEIPT_RECORDED_SIZE.observe(recorded_storage_diff); + metrics::RECEIPT_RECORDED_SIZE_UPPER_BOUND.observe(recorded_storage_upper_bound_diff); + let recorded_storage_proof_ratio = + recorded_storage_upper_bound_diff / f64::max(1.0, recorded_storage_diff); + // Record the ratio only for large receipts, small receipts can have a very high ratio, + // but the ratio is not that important for them. + if recorded_storage_upper_bound_diff > 100_000. { + metrics::RECEIPT_RECORDED_SIZE_UPPER_BOUND_RATIO + .observe(recorded_storage_proof_ratio); + } if let Some(outcome_with_id) = result? { let gas_burnt = outcome_with_id.outcome.gas_burnt; let compute_usage = outcome_with_id @@ -1463,8 +1490,8 @@ impl Runtime { }; // We first process local receipts. They contain staking, local contract calls, etc. + let local_processing_start = std::time::Instant::now(); if let Some(prefetcher) = &mut prefetcher { - prefetcher.clear(); // Prefetcher is allowed to fail _ = prefetcher.prefetch_receipts_data(&local_receipts); } @@ -1480,9 +1507,16 @@ impl Runtime { process_receipt(receipt, &mut state_update, &mut total)?; } } - metrics.local_receipts_done(total.gas, total.compute); + metrics.local_receipts_done( + local_receipts.len() as u64, + local_processing_start.elapsed(), + total.gas, + total.compute, + ); // Then we process the delayed receipts. It's a backlog of receipts from the past blocks. + let delayed_processing_start = std::time::Instant::now(); + let mut delayed_receipt_count = 0; while delayed_receipts_indices.first_index < delayed_receipts_indices.next_available_index { if total.compute >= compute_limit || proof_size_limit @@ -1490,6 +1524,7 @@ impl Runtime { { break; } + delayed_receipt_count += 1; let key = TrieKey::DelayedReceipt { index: delayed_receipts_indices.first_index }; let receipt: Receipt = get(&state_update, &key)?.ok_or_else(|| { StorageError::StorageInconsistentState(format!( @@ -1499,7 +1534,6 @@ impl Runtime { })?; if let Some(prefetcher) = &mut prefetcher { - prefetcher.clear(); // Prefetcher is allowed to fail _ = prefetcher.prefetch_receipts_data(std::slice::from_ref(&receipt)); } @@ -1523,11 +1557,16 @@ impl Runtime { process_receipt(&receipt, &mut state_update, &mut total)?; processed_delayed_receipts.push(receipt); } - metrics.delayed_receipts_done(total.gas, total.compute); + metrics.delayed_receipts_done( + delayed_receipt_count, + delayed_processing_start.elapsed(), + total.gas, + total.compute, + ); // And then we process the new incoming receipts. These are receipts from other shards. + let incoming_processing_start = std::time::Instant::now(); if let Some(prefetcher) = &mut prefetcher { - prefetcher.clear(); // Prefetcher is allowed to fail _ = prefetcher.prefetch_receipts_data(&incoming_receipts); } @@ -1549,12 +1588,12 @@ impl Runtime { process_receipt(receipt, &mut state_update, &mut total)?; } } - metrics.incoming_receipts_done(total.gas, total.compute); - - // No more receipts are executed on this trie, stop any pending prefetches on it. - if let Some(prefetcher) = &prefetcher { - prefetcher.clear(); - } + metrics.incoming_receipts_done( + incoming_receipts.len() as u64, + incoming_processing_start.elapsed(), + total.gas, + total.compute, + ); // Resolve timed-out PromiseYield receipts let mut promise_yield_indices: PromiseYieldIndices = @@ -1562,7 +1601,9 @@ impl Runtime { let initial_promise_yield_indices = promise_yield_indices.clone(); let mut new_receipt_index: usize = 0; + let mut processed_yield_timeouts = vec![]; let mut timeout_receipts = vec![]; + let yield_processing_start = std::time::Instant::now(); while promise_yield_indices.first_index < promise_yield_indices.next_available_index { if total.compute >= compute_limit || proof_size_limit @@ -1588,11 +1629,11 @@ impl Runtime { } // Check if the yielded promise still needs to be resolved - let yielded_promise_key = TrieKey::PromiseYieldReceipt { + let promise_yield_key = TrieKey::PromiseYieldReceipt { receiver_id: queue_entry.account_id.clone(), data_id: queue_entry.data_id, }; - if state_update.contains_key(&yielded_promise_key)? { + if state_update.contains_key(&promise_yield_key)? { let new_receipt_id = create_receipt_id_from_receipt_id( protocol_version, &queue_entry.data_id, @@ -1622,11 +1663,17 @@ impl Runtime { timeout_receipts.push(resume_receipt); } + processed_yield_timeouts.push(queue_entry); state_update.remove(queue_entry_key); // Math checked above: first_index is less than next_available_index promise_yield_indices.first_index += 1; } - metrics.yield_timeouts_done(total.gas, total.compute); + metrics.yield_timeouts_done( + processed_yield_timeouts.len() as u64, + yield_processing_start.elapsed(), + total.gas, + total.compute, + ); let _span = tracing::debug_span!(target: "runtime", "apply_commit").entered(); if delayed_receipts_indices != initial_delayed_receipt_indices { @@ -1650,7 +1697,24 @@ impl Runtime { state_update.commit(StateChangeCause::UpdatedDelayedReceipts); self.apply_state_patch(&mut state_update, state_patch); + let chunk_recorded_size_upper_bound = + state_update.trie.recorded_storage_size_upper_bound() as f64; + metrics::CHUNK_RECORDED_SIZE_UPPER_BOUND.observe(chunk_recorded_size_upper_bound); let (trie, trie_changes, state_changes) = state_update.finalize()?; + if let Some(prefetcher) = &prefetcher { + // Only clear the prefetcher queue after finalize is done because as part of receipt + // processing we also prefetch account data and access keys that are accessed in + // finalize. This data can take a very long time otherwise if not prefetched. + // + // (This probably results in more data being accessed than strictly necessary and + // prefetcher may touch data that is no longer relevant as a result but...) + // + // In the future it may make sense to have prefetcher have a mode where it has two + // queues: one for data that is going to be required soon, and the other that it would + // only work when otherwise idle. + let discarded_prefetch_requests = prefetcher.clear(); + tracing::debug!(target: "runtime", discarded_prefetch_requests); + } // Dedup proposals from the same account. // The order is deterministically changed. @@ -1665,6 +1729,10 @@ impl Runtime { } let state_root = trie_changes.new_root; + let chunk_recorded_size = trie.recorded_storage_size() as f64; + metrics::CHUNK_RECORDED_SIZE.observe(chunk_recorded_size); + metrics::CHUNK_RECORDED_SIZE_UPPER_BOUND_RATIO + .observe(chunk_recorded_size_upper_bound / f64::max(1.0, chunk_recorded_size)); let proof = trie.recorded_storage(); Ok(ApplyResult { state_root, @@ -1675,6 +1743,7 @@ impl Runtime { state_changes, stats, processed_delayed_receipts, + processed_yield_timeouts, proof, delayed_receipts_count: delayed_receipts_indices.len(), metrics: Some(metrics), @@ -1720,6 +1789,7 @@ fn action_transfer_or_implicit_account_creation( state_update: &mut TrieUpdate, apply_state: &ApplyState, actor_id: &mut AccountId, + epoch_info_provider: &dyn EpochInfoProvider, ) -> Result<(), RuntimeError> { Ok(if let Some(account) = account.as_mut() { if nonrefundable { @@ -1757,6 +1827,7 @@ fn action_transfer_or_implicit_account_creation( apply_state.block_height, apply_state.current_protocol_version, nonrefundable, + epoch_info_provider, ); }) } diff --git a/runtime/runtime/src/metrics.rs b/runtime/runtime/src/metrics.rs index cbfffaad2e9..0d9edee67af 100644 --- a/runtime/runtime/src/metrics.rs +++ b/runtime/runtime/src/metrics.rs @@ -1,8 +1,10 @@ use near_o11y::metrics::{ - try_create_histogram_vec, try_create_int_counter, try_create_int_counter_vec, HistogramVec, - IntCounter, IntCounterVec, + exponential_buckets, linear_buckets, try_create_counter_vec, try_create_histogram_vec, + try_create_histogram_with_buckets, try_create_int_counter, try_create_int_counter_vec, + CounterVec, Histogram, HistogramVec, IntCounter, IntCounterVec, }; use once_cell::sync::Lazy; +use std::time::Duration; pub static ACTION_CALLED_COUNT: Lazy = Lazy::new(|| { try_create_int_counter_vec( @@ -20,6 +22,79 @@ pub static TRANSACTION_PROCESSED_TOTAL: Lazy = Lazy::new(|| { ) .unwrap() }); + +pub static INCOMING_RECEIPT_PROCESSED_TOTAL: Lazy = Lazy::new(|| { + try_create_int_counter_vec( + "near_incoming_receipt_processed_total", + "The number of incoming receipts processed since starting this node", + &["shard_id"], + ) + .unwrap() +}); + +pub static INCOMING_RECEIPT_PROCESSING_SECONDS_TOTAL: Lazy = Lazy::new(|| { + try_create_counter_vec( + "near_incoming_receipt_processing_seconds_total", + "The time spent on processing incoming receipts since starting this node", + &["shard_id"], + ) + .unwrap() +}); + +pub static DELAYED_RECEIPT_PROCESSED_TOTAL: Lazy = Lazy::new(|| { + try_create_int_counter_vec( + "near_delayed_receipt_processed_total", + "The number of delayed receipts processed since starting this node", + &["shard_id"], + ) + .unwrap() +}); + +pub static DELAYED_RECEIPT_PROCESSING_SECONDS_TOTAL: Lazy = Lazy::new(|| { + try_create_counter_vec( + "near_delayed_receipt_processing_seconds_total", + "The time spent on processing delayed receipts since starting this node", + &["shard_id"], + ) + .unwrap() +}); + +pub static LOCAL_RECEIPT_PROCESSED_TOTAL: Lazy = Lazy::new(|| { + try_create_int_counter_vec( + "near_local_receipt_processed_total", + "The number of local receipts processed since starting this node", + &["shard_id"], + ) + .unwrap() +}); + +pub static LOCAL_RECEIPT_PROCESSING_SECONDS_TOTAL: Lazy = Lazy::new(|| { + try_create_counter_vec( + "near_local_receipt_processing_seconds_total", + "The time spent on processing local receipts since starting this node", + &["shard_id"], + ) + .unwrap() +}); + +pub static YIELD_TIMEOUTS_PROCESSED_TOTAL: Lazy = Lazy::new(|| { + try_create_int_counter_vec( + "near_yield_timeouts_processed_total", + "The number of yield timeouts processed since starting this node", + &["shard_id"], + ) + .unwrap() +}); + +pub static YIELD_TIMEOUTS_PROCESSING_SECONDS_TOTAL: Lazy = Lazy::new(|| { + try_create_counter_vec( + "near_yield_timeouts_processing_seconds_total", + "The time spent on processing yield timeouts since starting this node", + &["shard_id"], + ) + .unwrap() +}); + pub static TRANSACTION_PROCESSED_SUCCESSFULLY_TOTAL: Lazy = Lazy::new(|| { try_create_int_counter( "near_transaction_processed_successfully_total", @@ -27,6 +102,7 @@ pub static TRANSACTION_PROCESSED_SUCCESSFULLY_TOTAL: Lazy = Lazy::ne ) .unwrap() }); + pub static TRANSACTION_PROCESSED_FAILED_TOTAL: Lazy = Lazy::new(|| { try_create_int_counter( "near_transaction_processed_failed_total", @@ -214,6 +290,54 @@ static CHUNK_TX_TGAS: Lazy = Lazy::new(|| { ) .unwrap() }); +pub static RECEIPT_RECORDED_SIZE: Lazy = Lazy::new(|| { + try_create_histogram_with_buckets( + "near_receipt_recorded_size", + "Size of storage proof recorded when executing a receipt", + buckets_for_receipt_storage_proof_size(), + ) + .unwrap() +}); +pub static RECEIPT_RECORDED_SIZE_UPPER_BOUND: Lazy = Lazy::new(|| { + try_create_histogram_with_buckets( + "near_receipt_recorded_size_upper_bound", + "Upper bound estimation (e.g with extra size added for deletes) of storage proof size recorded when executing a receipt", + buckets_for_receipt_storage_proof_size(), + ) + .unwrap() +}); +pub static RECEIPT_RECORDED_SIZE_UPPER_BOUND_RATIO: Lazy = Lazy::new(|| { + try_create_histogram_with_buckets( + "near_receipt_recorded_size_upper_bound_ratio", + "Ratio of upper bound to true recorded size, calculated only for sizes larger than 100KB, equal to (near_receipt_recorded_size_upper_bound / near_receipt_recorded_size)", + buckets_for_storage_proof_size_ratio(), + ) + .unwrap() +}); +pub static CHUNK_RECORDED_SIZE: Lazy = Lazy::new(|| { + try_create_histogram_with_buckets( + "near_chunk_recorded_size", + "Total size of storage proof (recorded trie nodes for state witness, post-finalization) for a single chunk", + buckets_for_chunk_storage_proof_size(), + ) + .unwrap() +}); +pub static CHUNK_RECORDED_SIZE_UPPER_BOUND: Lazy = Lazy::new(|| { + try_create_histogram_with_buckets( + "near_chunk_recorded_size_upper_bound", + "Upper bound of storage proof size (recorded trie nodes size + estimated charges, pre-finalization) for a single chunk", + buckets_for_chunk_storage_proof_size(), + ) + .unwrap() +}); +pub static CHUNK_RECORDED_SIZE_UPPER_BOUND_RATIO: Lazy = Lazy::new(|| { + try_create_histogram_with_buckets( + "near_chunk_recorded_size_upper_bound_ratio", + "Ratio of upper bound to true storage proof size, equal to (near_chunk_recorded_size_upper_bound / near_chunk_recorded_size)", + buckets_for_storage_proof_size_ratio(), + ) + .unwrap() +}); /// Buckets used for burned gas in receipts. /// @@ -238,6 +362,27 @@ fn buckets_for_compute() -> Option> { ]) } +/// Buckets from 0 to 10MB +fn buckets_for_receipt_storage_proof_size() -> Vec { + // Precise buckets for the smaller, common values + let mut buckets = vec![50_000., 100_000., 200_000., 300_000.]; + + // Coarse buckets for the larger values + buckets.extend(linear_buckets(500_000., 500_000., 20).unwrap()); + buckets +} + +/// Buckets from 0 to 15.2MB +fn buckets_for_chunk_storage_proof_size() -> Vec { + linear_buckets(0., 800_000., 20).unwrap() +} + +/// Buckets from 1 to 12.84 +fn buckets_for_storage_proof_size_ratio() -> Vec { + // 1.2 ** 14 = 12.84 + exponential_buckets(1., 1.2, 15).unwrap() +} + /// Helper struct to collect partial costs of `Runtime::apply` and reporting it /// atomically. #[derive(Debug, Default)] @@ -248,12 +393,20 @@ pub struct ApplyMetrics { tx_gas: u64, local_receipts_compute_usage: u64, local_receipts_gas: u64, + local_receipts_processed_total: u64, + local_receipts_processing_seconds_total: f64, delayed_receipts_compute_usage: u64, delayed_receipts_gas: u64, + delayed_receipts_processed_total: u64, + delayed_receipts_processing_seconds_total: f64, incoming_receipts_compute_usage: u64, incoming_receipts_gas: u64, + incoming_receipts_processed_total: u64, + incoming_receipts_processing_seconds_total: f64, yield_timeouts_compute_usage: u64, yield_timeouts_gas: u64, + yield_timeouts_processed_total: u64, + yield_timeouts_processing_seconds_total: f64, } impl ApplyMetrics { @@ -275,30 +428,97 @@ impl ApplyMetrics { self.update_accumulated(accumulated_gas, accumulated_compute); } - pub fn local_receipts_done(&mut self, accumulated_gas: u64, accumulated_compute: u64) { + pub fn local_receipts_done( + &mut self, + count: u64, + time: Duration, + accumulated_gas: u64, + accumulated_compute: u64, + ) { (self.local_receipts_gas, self.local_receipts_compute_usage) = self.update_accumulated(accumulated_gas, accumulated_compute); + self.local_receipts_processed_total += count; + self.local_receipts_processing_seconds_total += time.as_secs_f64(); } - pub fn delayed_receipts_done(&mut self, accumulated_gas: u64, accumulated_compute: u64) { + pub fn delayed_receipts_done( + &mut self, + count: u64, + time: Duration, + accumulated_gas: u64, + accumulated_compute: u64, + ) { (self.delayed_receipts_gas, self.delayed_receipts_compute_usage) = self.update_accumulated(accumulated_gas, accumulated_compute); + self.delayed_receipts_processed_total += count; + self.delayed_receipts_processing_seconds_total += time.as_secs_f64(); } - pub fn incoming_receipts_done(&mut self, accumulated_gas: u64, accumulated_compute: u64) { + pub fn incoming_receipts_done( + &mut self, + count: u64, + time: Duration, + accumulated_gas: u64, + accumulated_compute: u64, + ) { (self.incoming_receipts_gas, self.incoming_receipts_compute_usage) = self.update_accumulated(accumulated_gas, accumulated_compute); + self.incoming_receipts_processed_total += count; + self.incoming_receipts_processing_seconds_total += time.as_secs_f64(); } - pub fn yield_timeouts_done(&mut self, accumulated_gas: u64, accumulated_compute: u64) { + pub fn yield_timeouts_done( + &mut self, + + count: u64, + time: Duration, + accumulated_gas: u64, + accumulated_compute: u64, + ) { (self.yield_timeouts_gas, self.yield_timeouts_compute_usage) = self.update_accumulated(accumulated_gas, accumulated_compute); + self.yield_timeouts_processed_total += count; + self.yield_timeouts_processing_seconds_total += time.as_secs_f64(); } /// Report statistics - pub fn report(&self, shard_id: &str) { + pub fn report(&mut self, shard_id: &str) { const TERA: f64 = 1_000_000_000_000_f64; + LOCAL_RECEIPT_PROCESSED_TOTAL + .with_label_values(&[shard_id]) + .inc_by(self.local_receipts_processed_total); + self.local_receipts_processed_total = 0; + DELAYED_RECEIPT_PROCESSED_TOTAL + .with_label_values(&[shard_id]) + .inc_by(self.delayed_receipts_processed_total); + self.delayed_receipts_processed_total = 0; + INCOMING_RECEIPT_PROCESSED_TOTAL + .with_label_values(&[shard_id]) + .inc_by(self.incoming_receipts_processed_total); + self.incoming_receipts_processed_total = 0; + YIELD_TIMEOUTS_PROCESSED_TOTAL + .with_label_values(&[shard_id]) + .inc_by(self.yield_timeouts_processed_total); + self.yield_timeouts_processed_total = 0; + + LOCAL_RECEIPT_PROCESSING_SECONDS_TOTAL + .with_label_values(&[shard_id]) + .inc_by(self.local_receipts_processing_seconds_total); + self.local_receipts_processing_seconds_total = 0.0; + DELAYED_RECEIPT_PROCESSING_SECONDS_TOTAL + .with_label_values(&[shard_id]) + .inc_by(self.delayed_receipts_processing_seconds_total); + self.delayed_receipts_processing_seconds_total = 0.0; + INCOMING_RECEIPT_PROCESSING_SECONDS_TOTAL + .with_label_values(&[shard_id]) + .inc_by(self.incoming_receipts_processing_seconds_total); + self.incoming_receipts_processing_seconds_total = 0.0; + YIELD_TIMEOUTS_PROCESSING_SECONDS_TOTAL + .with_label_values(&[shard_id]) + .inc_by(self.yield_timeouts_processing_seconds_total); + self.yield_timeouts_processing_seconds_total = 0.0; + CHUNK_TX_TGAS.with_label_values(&[shard_id]).observe(self.tx_gas as f64 / TERA); CHUNK_TX_COMPUTE .with_label_values(&[shard_id]) diff --git a/runtime/runtime/src/prefetch.rs b/runtime/runtime/src/prefetch.rs index 88f0e77a135..2f7fe53fa6b 100644 --- a/runtime/runtime/src/prefetch.rs +++ b/runtime/runtime/src/prefetch.rs @@ -41,6 +41,7 @@ //! in the prefetcher. Implementation details for most limits are in //! `core/store/src/trie/prefetching_trie_storage.rs` +use borsh::BorshSerialize as _; use near_o11y::metrics::prometheus; use near_o11y::metrics::prometheus::core::GenericCounter; use near_primitives::receipt::{Receipt, ReceiptEnum}; @@ -50,6 +51,7 @@ use near_primitives::types::AccountId; use near_primitives::types::StateRoot; use near_store::{PrefetchApi, PrefetchError, Trie}; use sha2::Digest; +use std::str::FromStr; use tracing::{debug, warn}; use crate::metrics; @@ -63,22 +65,21 @@ pub(crate) struct TriePrefetcher { impl TriePrefetcher { pub(crate) fn new_if_enabled(trie: &Trie) -> Option { - if let Some(caching_storage) = trie.internal_get_storage_as_caching_storage() { - if let Some(prefetch_api) = caching_storage.prefetch_api().clone() { - let trie_root = *trie.get_root(); - let shard_uid = prefetch_api.shard_uid; - let metrics_labels: [&str; 1] = [&shard_uid.shard_id.to_string()]; - return Some(Self { - prefetch_api, - trie_root, - prefetch_enqueued: metrics::PREFETCH_ENQUEUED - .with_label_values(&metrics_labels), - prefetch_queue_full: metrics::PREFETCH_QUEUE_FULL - .with_label_values(&metrics_labels), - }); - } - } - None + let Some(caching_storage) = trie.internal_get_storage_as_caching_storage() else { + return None; + }; + let Some(prefetch_api) = caching_storage.prefetch_api().clone() else { + return None; + }; + let trie_root = *trie.get_root(); + let shard_uid = prefetch_api.shard_uid; + let metrics_labels: [&str; 1] = [&shard_uid.shard_id.to_string()]; + Some(Self { + prefetch_api, + trie_root, + prefetch_enqueued: metrics::PREFETCH_ENQUEUED.with_label_values(&metrics_labels), + prefetch_queue_full: metrics::PREFETCH_QUEUE_FULL.with_label_values(&metrics_labels), + }) } /// Starts prefetching data for processing the receipts. @@ -91,36 +92,102 @@ impl TriePrefetcher { receipts: &[Receipt], ) -> Result<(), PrefetchError> { for receipt in receipts.iter() { - match &receipt.receipt { + let is_refund = receipt.predecessor_id.is_system(); + let action_receipt = match &receipt.receipt { ReceiptEnum::Action(action_receipt) | ReceiptEnum::PromiseYield(action_receipt) => { - let account_id = receipt.receiver_id.clone(); + action_receipt + } + ReceiptEnum::Data(_) | ReceiptEnum::PromiseResume(_) => { + continue; + } + }; + let account_id = receipt.receiver_id.clone(); - // general-purpose account prefetching - if self.prefetch_api.enable_receipt_prefetching { - let trie_key = TrieKey::Account { account_id: account_id.clone() }; - self.prefetch_trie_key(trie_key)?; + // general-purpose account prefetching + if self.prefetch_api.enable_receipt_prefetching { + let trie_key = TrieKey::Account { account_id: account_id.clone() }; + self.prefetch_trie_key(trie_key)?; + if is_refund { + let trie_key = TrieKey::AccessKey { + account_id: account_id.clone(), + public_key: action_receipt.signer_public_key.clone(), + }; + self.prefetch_trie_key(trie_key)?; + } + for action in &action_receipt.actions { + match action { + Action::Delegate(delegate_action) => { + let trie_key = TrieKey::AccessKey { + account_id: delegate_action.delegate_action.sender_id.clone(), + public_key: delegate_action.delegate_action.public_key.clone(), + }; + self.prefetch_trie_key(trie_key)?; + } + Action::AddKey(add_key_action) => { + let trie_key = TrieKey::AccessKey { + account_id: account_id.clone(), + public_key: add_key_action.public_key.clone(), + }; + self.prefetch_trie_key(trie_key)?; + } + Action::DeleteKey(delete_key_action) => { + let trie_key = TrieKey::AccessKey { + account_id: account_id.clone(), + public_key: delete_key_action.public_key.clone(), + }; + self.prefetch_trie_key(trie_key)?; + } + _ => {} } + } + } - // SWEAT specific argument prefetcher - if self.prefetch_api.sweat_prefetch_receivers.contains(&account_id) - && self - .prefetch_api - .sweat_prefetch_senders - .contains(&receipt.predecessor_id) - { - for action in &action_receipt.actions { - if let Action::FunctionCall(fn_call) = action { - if fn_call.method_name == "record_batch" { - self.prefetch_sweat_record_batch( - account_id.clone(), - &fn_call.args, - )?; - } - } - } + for action in &action_receipt.actions { + let Action::FunctionCall(fn_call) = action else { + continue; + }; + if self.prefetch_api.sweat_prefetch_receivers.contains(&account_id) + && self.prefetch_api.sweat_prefetch_senders.contains(&receipt.predecessor_id) + { + if fn_call.method_name == "record_batch" { + self.prefetch_sweat_record_batch(account_id.clone(), &fn_call.args)?; + } + } + + let claim_sweat_cfg = &self.prefetch_api.claim_sweat_prefetch_config; + if fn_call.method_name == "record_batch_for_hold" { + let config = claim_sweat_cfg.iter().find(|cfg| { + cfg.receiver == account_id.as_str() + && cfg.method_name == fn_call.method_name + && cfg.sender == receipt.predecessor_id.as_str() + }); + if config.is_some() { + self.prefetch_claim_sweat_record_batch_for_hold( + account_id.clone(), + &fn_call.args, + )? + } + } + if fn_call.method_name == "claim" { + let config = claim_sweat_cfg.iter().find(|cfg| { + cfg.receiver == account_id.as_str() + && cfg.method_name == fn_call.method_name + }); + if config.is_some() { + self.prefetch_claim_sweat_claim( + account_id.clone(), + receipt.predecessor_id.clone(), + )? } } - ReceiptEnum::Data(_) | ReceiptEnum::PromiseResume(_) => {} + + if self.prefetch_api.kaiching_prefetch_config.iter().any(|cfg| { + cfg.sender == receipt.predecessor_id.as_str() + && cfg.receiver == account_id.as_str() + && cfg.method_name == fn_call.method_name + }) { + self.prefetch_kaiching(account_id.clone(), &fn_call.args)?; + } } } Ok(()) @@ -164,9 +231,13 @@ impl TriePrefetcher { /// at the same time. They share a prefetcher, so they will clean each others /// data. Handling this is a bit more involved. Failing to do so makes prefetching /// less effective in those cases but crucially nothing breaks. - pub(crate) fn clear(&self) { - self.prefetch_api.clear_queue(); + /// + /// Returns the number of prefetch requests that have been removed from the prefetch queue. + /// If this number is large, the prefetches aren't actually getting executed before cancelling. + pub(crate) fn clear(&self) -> usize { + let ret = self.prefetch_api.clear_queue(); self.prefetch_api.clear_data(); + ret } fn prefetch_trie_key(&self, trie_key: TrieKey) -> Result<(), PrefetchError> { @@ -174,49 +245,212 @@ impl TriePrefetcher { match res { Err(PrefetchError::QueueFull) => { self.prefetch_queue_full.inc(); - debug!(target: "prefetcher", "I/O scheduler input queue is full, dropping prefetch request"); + debug!(target: "runtime::prefetch", "I/O scheduler input queue is full, dropping prefetch request"); } Err(PrefetchError::QueueDisconnected) => { // This shouldn't have happened, hence logging warning here - warn!(target: "prefetcher", "I/O scheduler input queue is disconnected, dropping prefetch request"); + warn!(target: "runtime::prefetch", "I/O scheduler input queue is disconnected, dropping prefetch request"); } Ok(()) => self.prefetch_enqueued.inc(), }; res } - /// Prefetcher specifically tuned for SWEAT record batch + /// Prefetcher tuned for SWEAT contract calls of method steps_batch. /// - /// Temporary hack, consider removing after merging flat storage, see - /// . + /// Remove after #10965 reaches mainnet. fn prefetch_sweat_record_batch( &self, account_id: AccountId, arg: &[u8], ) -> Result<(), PrefetchError> { - if let Ok(json) = serde_json::de::from_slice::(arg) { - if json.is_object() { - if let Some(list) = json.get("steps_batch") { - if let Some(list) = list.as_array() { - for tuple in list.iter() { - if let Some(tuple) = tuple.as_array() { - if let Some(user_account) = tuple.first().and_then(|a| a.as_str()) { - let hashed_account = - sha2::Sha256::digest(user_account.as_bytes()).into_iter(); - let mut key = vec![0x74, 0x00]; - key.extend(hashed_account); - let trie_key = TrieKey::ContractData { - account_id: account_id.clone(), - key: key.to_vec(), - }; - near_o11y::io_trace!(count: "prefetch"); - self.prefetch_trie_key(trie_key)?; - } - } - } - } - } + let Ok(json) = serde_json::de::from_slice::(arg) else { + return Ok(()); + }; + let Some(list) = &json.get("steps_batch") else { + return Ok(()); + }; + let Some(list) = list.as_array() else { + return Ok(()); + }; + + for tuple in list.iter() { + let Some(tuple) = tuple.as_array() else { + continue; + }; + let Some(user_account) = tuple.first().and_then(|a| a.as_str()) else { + continue; + }; + let hashed_account = sha2::Sha256::digest(user_account.as_bytes()).into_iter(); + // This is a "t" string used as the unique prefix of underlying + // data structure terminated by a null value. + let mut key = vec![0x74, 0x00]; + key.extend(hashed_account); + let trie_key = + TrieKey::ContractData { account_id: account_id.clone(), key: key.to_vec() }; + near_o11y::io_trace!(count: "prefetch"); + self.prefetch_trie_key(trie_key)?; + } + Ok(()) + } + + /// Prefetcher tuned for claim.sweat::record_batch_for_hold contract calls. + /// + /// Remove after #10965 reaches mainnet. + fn prefetch_claim_sweat_record_batch_for_hold( + &self, + account_id: AccountId, + arg: &[u8], + ) -> Result<(), PrefetchError> { + let Ok(json) = serde_json::de::from_slice::(arg) else { + return Ok(()); + }; + let Some(list) = json.get("amounts") else { + return Ok(()); + }; + let Some(list) = list.as_array() else { + return Ok(()); + }; + for tuple in list.iter() { + let Some(tuple) = tuple.as_array() else { + continue; + }; + let Some(user_account) = tuple.first().and_then(|a| a.as_str()) else { + continue; + }; + // Unique prefix of underlying data structure. + let mut key = vec![0, 64, 0, 0, 0]; + key.extend(user_account.as_bytes()); + let trie_key = TrieKey::ContractData { account_id: account_id.clone(), key }; + near_o11y::io_trace!(count: "prefetch"); + self.prefetch_trie_key(trie_key)?; + } + Ok(()) + } + + /// Prefetcher tuned for claim.sweat::claim contract calls. + /// + /// Remove after #10965 reaches mainnet. + fn prefetch_claim_sweat_claim( + &self, + account_id: AccountId, + predecessor: AccountId, + ) -> Result<(), PrefetchError> { + let Self { prefetch_api, trie_root, .. } = self; + let trie_root = *trie_root; + let prefetch_api = prefetch_api.clone(); + rayon::spawn(move || { + let mut account_data_key = Vec::with_capacity(4 + 8 + predecessor.len()); + let Ok(()) = 0u8.serialize(&mut account_data_key) else { return }; + let Ok(()) = predecessor.serialize(&mut account_data_key) else { return }; + let trie_key = + TrieKey::ContractData { account_id: account_id.clone(), key: account_data_key }; + // Just read this directly for now since this is temporary anyway + let prefetcher_storage = prefetch_api.make_storage(); + let trie = Trie::new(prefetcher_storage, trie_root, None); + let Ok(Some(account_record)) = trie.get(&trie_key.to_vec()) else { + tracing::debug!( + target: "runtime::prefetch", + message = "could not load AccountRecord", + key = ?trie_key, + ); + return; + }; + #[derive(borsh::BorshDeserialize)] + #[allow(dead_code)] + struct AccountRecord { + accruals: Vec<(u32, u32)>, + is_enabled: bool, + claim_period_refreshed_at: u32, + is_locked: bool, + } + let Ok(account_record) = borsh::from_slice::(&account_record) else { + tracing::debug!( + target: "runtime::prefetch", + message = "could not decode AccountRecord", + ); + return; + }; + + for (dt, idx) in account_record.accruals { + let mut accruals_key = Vec::with_capacity(4 + 8); + // StorageKey::Accruals + let Ok(()) = 1u8.serialize(&mut accruals_key) else { continue }; + let Ok(()) = dt.serialize(&mut accruals_key) else { continue }; + let accruals_key = sha2::Sha256::digest(&accruals_key).to_vec(); + let _ = prefetch_api.prefetch_trie_key( + trie_root, + TrieKey::ContractData { account_id: account_id.clone(), key: accruals_key }, + ); + let mut amount_key = Vec::with_capacity(4 + 8 + 8); + let Ok(()) = 2u8.serialize(&mut amount_key) else { continue }; + let Ok(()) = dt.serialize(&mut amount_key) else { continue }; + amount_key.extend(&idx.to_le_bytes()); // index into Vector + let _ = prefetch_api.prefetch_trie_key( + trie_root, + TrieKey::ContractData { account_id: account_id.clone(), key: amount_key }, + ); + } + }); + Ok(()) + } + + /// Prefetcher tuned for kaiching contract calls. + /// + /// Remove after #10965 reaches mainnet. + fn prefetch_kaiching(&self, account_id: AccountId, arg: &[u8]) -> Result<(), PrefetchError> { + let Ok(json) = serde_json::de::from_slice::(&arg) else { + return Ok(()); + }; + let Some(msg) = json.get("msg") else { + return Ok(()); + }; + let Some(json) = msg + .as_str() + .and_then(|s| serde_json::de::from_slice::(s.as_bytes()).ok()) + else { + return Ok(()); + }; + let Some(list) = json.get("rewards") else { + return Ok(()); + }; + let Some(list) = list.as_array() else { + return Ok(()); + }; + + for tuple in list.iter() { + let Some(tuple) = tuple.as_array() else { + continue; + }; + // Unique prefix of underlying data structure. + let mut user_account_key = vec![1, 109]; + let user_account_serialize_result = tuple + .get(0) + .and_then(|a| a.as_str()) + .and_then(|a| AccountId::from_str(a).ok()) + .and_then(|a| borsh::BorshSerialize::serialize(&a, &mut user_account_key).ok()); + if user_account_serialize_result.is_none() { + continue; } + let reward_id = tuple.get(2).and_then(|a| a.as_str()); + let Some(reward_id) = reward_id else { + continue; + }; + let user_account_key_hash = sha2::Sha256::digest(&user_account_key); + let trie_key = TrieKey::ContractData { + account_id: account_id.clone(), + key: user_account_key_hash.to_vec(), + }; + near_o11y::io_trace!(count: "prefetch"); + self.prefetch_trie_key(trie_key)?; + + // Unique prefix of underlying data structure. + let mut reward_key = vec![0, 24, 0, 0, 0]; + reward_key.extend(reward_id.as_bytes()); + let trie_key = + TrieKey::ContractData { account_id: account_id.clone(), key: reward_key }; + near_o11y::io_trace!(count: "prefetch"); + self.prefetch_trie_key(trie_key)?; } Ok(()) } diff --git a/runtime/runtime/src/verifier.rs b/runtime/runtime/src/verifier.rs index 57c0bdd4b5a..2c4647e80e3 100644 --- a/runtime/runtime/src/verifier.rs +++ b/runtime/runtime/src/verifier.rs @@ -128,7 +128,7 @@ pub fn validate_transaction( let sender_is_receiver = &transaction.receiver_id == signer_id; - tx_cost(&config, transaction, gas_price, sender_is_receiver) + tx_cost(&config, transaction, gas_price, sender_is_receiver, current_protocol_version) .map_err(|_| InvalidTxError::CostOverflow.into()) } diff --git a/scripts/nayduck.py b/scripts/nayduck.py index 7886e0f3355..f9d1bfaf430 100755 --- a/scripts/nayduck.py +++ b/scripts/nayduck.py @@ -7,13 +7,13 @@ --branch \ --test-file .txt -Scheduled runs can be seen at . +Scheduled runs can be seen at . See README.md in nightly directory for documentation of the test suite file format. Note that you must be a member of the Near or Near Protocol organisation on GitHub to authenticate (). -The source code for NayDuck itself is at . +The source code for NayDuck itself is at . """ import getpass @@ -28,7 +28,7 @@ REPO_DIR = pathlib.Path(__file__).resolve().parents[1] DEFAULT_TEST_FILE = 'nightly/nightly.txt' -NAYDUCK_BASE_HREF = 'https://nayduck.near.org' +NAYDUCK_BASE_HREF = 'https://nayduck.nearone.org' def _parse_args(): @@ -231,7 +231,7 @@ def _parse_timeout(timeout: typing.Optional[str]) -> typing.Optional[int]: def run_locally(args, tests): for test in tests: - # See nayduck specs at https://github.com/near/nayduck/blob/master/lib/testspec.py + # See nayduck specs at https://github.com/Near-One/nayduck/blob/master/lib/testspec.py fields = test.split() timeout = None diff --git a/tools/cold-store/src/cli.rs b/tools/cold-store/src/cli.rs index d33fef5c3b8..2904c3403c5 100644 --- a/tools/cold-store/src/cli.rs +++ b/tools/cold-store/src/cli.rs @@ -53,6 +53,9 @@ enum SubCommand { /// You can provide maximum depth and/or maximum number of vertices to traverse for each root. /// Trie is traversed using DFS with randomly shuffled kids for every node. CheckStateRoot(CheckStateRootCmd), + /// Modifies cold db from config to be considered not initialised. + /// Doesn't actually delete any data, except for HEAD and COLD_HEAD in BlockMisc. + ResetCold(ResetColdCmd), } impl ColdStoreCommand { @@ -87,6 +90,7 @@ impl ColdStoreCommand { } SubCommand::PrepareHot(cmd) => cmd.run(&storage, &home_dir, &near_config), SubCommand::CheckStateRoot(cmd) => cmd.run(&storage), + SubCommand::ResetCold(cmd) => cmd.run(&storage), } } @@ -652,3 +656,20 @@ impl CheckStateRootCmd { store.get(DBCol::State, &cold_state_key) } } + +#[derive(clap::Args)] +struct ResetColdCmd {} + +impl ResetColdCmd { + pub fn run(self, storage: &NodeStorage) -> anyhow::Result<()> { + let cold_store = storage + .get_cold_store() + .ok_or_else(|| anyhow::anyhow!("Cold storage is not configured"))?; + + let mut store_update = cold_store.store_update(); + store_update.delete(DBCol::BlockMisc, HEAD_KEY); + store_update.delete(DBCol::BlockMisc, COLD_HEAD_KEY); + store_update.commit()?; + Ok(()) + } +} diff --git a/tools/congestion-model/Cargo.toml b/tools/congestion-model/Cargo.toml index 9f6555b7d55..3a9c5b817b0 100644 --- a/tools/congestion-model/Cargo.toml +++ b/tools/congestion-model/Cargo.toml @@ -13,6 +13,8 @@ bytesize.workspace = true chrono.workspace = true clap = { workspace = true, features = ["derive"] } csv.workspace = true +tracing.workspace = true +tracing-subscriber.workspace = true [lints] workspace = true diff --git a/tools/congestion-model/run_eval.sh b/tools/congestion-model/run_eval.sh new file mode 100644 index 00000000000..3bd8f5e6cb5 --- /dev/null +++ b/tools/congestion-model/run_eval.sh @@ -0,0 +1,64 @@ +#!/bin/bash + +# exit on error +set -euo pipefail + +# show each executed command +# set -x + +NUM_SHARDS=6 +WARMUP=500 +ROUNDS=1500 +TX_POOL_SIZE=200 + +OPTIONS_STRING="--rounds ${ROUNDS} --warmup ${WARMUP} --shards ${NUM_SHARDS} --tx-pool-size ${TX_POOL_SIZE}" + +echo "# Delay VS Utilization" +cargo run -q --release -- --workload RelayedHot --strategy SmoothTrafficLight ${OPTIONS_STRING} +cargo run -q --release -- --workload RelayedHot --strategy NoQueues ${OPTIONS_STRING} | tail -n 1 +cargo run -q --release -- --workload RelayedHot --strategy STL_MAX_UTIL ${OPTIONS_STRING} | tail -n 1 +cargo run -q --release -- --workload RelayedHot --strategy STL_LOW_DELAY ${OPTIONS_STRING} | tail -n 1 +cargo run -q --release -- --workload RelayedHot --strategy STL_MIN_DELAY ${OPTIONS_STRING} | tail -n 1 +cargo run -q --release -- --workload RelayedHot --strategy NEPv3 ${OPTIONS_STRING} | tail -n 1 +cargo run -q --release -- --workload RelayedHot --strategy FancyStop ${OPTIONS_STRING} | tail -n 1 + +echo "# Big queues VS Utilization" +cargo run -q --release -- --workload BigLinearImbalance --strategy SmoothTrafficLight ${OPTIONS_STRING} +cargo run -q --release -- --workload BigLinearImbalance --strategy NoQueues ${OPTIONS_STRING} | tail -n 1 +cargo run -q --release -- --workload BigLinearImbalance --strategy STL_MAX_UTIL ${OPTIONS_STRING} | tail -n 1 +cargo run -q --release -- --workload BigLinearImbalance --strategy STL_LOW_DELAY ${OPTIONS_STRING} | tail -n 1 +cargo run -q --release -- --workload BigLinearImbalance --strategy STL_MIN_DELAY ${OPTIONS_STRING} | tail -n 1 +cargo run -q --release -- --workload BigLinearImbalance --strategy NEPv3 ${OPTIONS_STRING} | tail -n 1 +cargo run -q --release -- --workload BigLinearImbalance --strategy FancyStop ${OPTIONS_STRING} | tail -n 1 + +echo "# Simple Utilization" +cargo run -q --release -- --workload Balanced --strategy SmoothTrafficLight ${OPTIONS_STRING} +cargo run -q --release -- --workload Balanced --strategy NoQueues ${OPTIONS_STRING} | tail -n 1 +cargo run -q --release -- --workload Balanced --strategy STL_MAX_UTIL ${OPTIONS_STRING} | tail -n 1 +cargo run -q --release -- --workload Balanced --strategy STL_LOW_DELAY ${OPTIONS_STRING} | tail -n 1 +cargo run -q --release -- --workload Balanced --strategy STL_MIN_DELAY ${OPTIONS_STRING} | tail -n 1 +cargo run -q --release -- --workload Balanced --strategy NEPv3 ${OPTIONS_STRING} | tail -n 1 +cargo run -q --release -- --workload Balanced --strategy FancyStop ${OPTIONS_STRING} | tail -n 1 + +echo "# Fairness and Prioritization opportunity" +cargo run -q --release -- --workload FairnessTest --strategy SmoothTrafficLight ${OPTIONS_STRING} +cargo run -q --release -- --workload FairnessTest --strategy NoQueues ${OPTIONS_STRING} | tail -n 1 +cargo run -q --release -- --workload FairnessTest --strategy STL_MAX_UTIL ${OPTIONS_STRING} | tail -n 1 +cargo run -q --release -- --workload FairnessTest --strategy STL_LOW_DELAY ${OPTIONS_STRING} | tail -n 1 +cargo run -q --release -- --workload FairnessTest --strategy STL_MIN_DELAY ${OPTIONS_STRING} | tail -n 1 +cargo run -q --release -- --workload FairnessTest --strategy NEPv3 ${OPTIONS_STRING} | tail -n 1 +cargo run -q --release -- --workload FairnessTest --strategy FancyStop ${OPTIONS_STRING} | tail -n 1 + + +# Use this code to run all combos. But I usually find it better to control each group separately. +# +# STRATEGIES=("SmoothTrafficLight" "NoQueues" "STL_MAX_UTIL" "STL_LOW_DELAY" "STL_MIN_DELAY" "NEPv3" "FancyStop") +# WORKLOADS=("RelayedHot" "Balanced" "BigLinearImbalance" "FairnessTest") +# +# for WORKLOAD in "${WORKLOADS[@]}" +# do +# for STRATEGY in "${STRATEGIES[@]}" +# do +# cargo run -q --release -- --workload ${WORKLOAD} --strategy ${STRATEGY} ${OPTIONS_STRING} | tail -n 1 +# done; +# done diff --git a/tools/congestion-model/src/evaluation/mod.rs b/tools/congestion-model/src/evaluation/mod.rs index 9160cca02e7..5f0e33e2014 100644 --- a/tools/congestion-model/src/evaluation/mod.rs +++ b/tools/congestion-model/src/evaluation/mod.rs @@ -125,3 +125,19 @@ impl Model { stats_writer.write_record(None::<&[u8]>).unwrap(); } } + +impl std::ops::Sub for GasThroughput { + type Output = GasThroughput; + + fn sub(self, rhs: Self) -> Self::Output { + Self { total: self.total - rhs.total } + } +} + +impl std::ops::Div for GasThroughput { + type Output = GasThroughput; + + fn div(self, rhs: usize) -> Self::Output { + Self { total: self.total / rhs as u64 } + } +} diff --git a/tools/congestion-model/src/evaluation/summary_table.rs b/tools/congestion-model/src/evaluation/summary_table.rs index b2618247d66..5d23ae4d55a 100644 --- a/tools/congestion-model/src/evaluation/summary_table.rs +++ b/tools/congestion-model/src/evaluation/summary_table.rs @@ -1,14 +1,15 @@ use super::{GasThroughput, Progress, ShardQueueLengths, UserExperience}; -use crate::PGAS; +use crate::{PGAS, TGAS}; pub fn print_summary_header() { println!( - "{:<25}{:<25}{:>25}{:>25}{:>16}{:>16}{:>16}{:>16}", + "{:<25}{:<25}{:>25}{:>25}{:>16}{:>16}{:>16}{:>16}{:>16}", "WORKLOAD", "STRATEGY", - "BURNT GAS", + "BURNT GAS PER CHUNK", "TRANSACTIONS FINISHED", "MEDIAN TX DELAY", + "90p TX DELAY", "MAX QUEUE LEN", "MAX QUEUE SIZE", "MAX QUEUE PGAS", @@ -24,10 +25,11 @@ pub fn print_summary_row( user_experience: &UserExperience, ) { println!( - "{workload:<25}{strategy:<25}{:>20} PGas{:>25}{:>16}{:>16}{:>16}{:>16}", - throughput.total / PGAS, + "{workload:<25}{strategy:<25}{:>20} TGas{:>25}{:>16}{:>16}{:>16}{:>16}{:>16}", + throughput.total / TGAS, progress.finished_transactions, user_experience.successful_tx_delay_median, + user_experience.successful_tx_delay_90th_percentile, max_queues.queued_receipts.num, bytesize::ByteSize::b(max_queues.queued_receipts.size), max_queues.queued_receipts.gas / PGAS, diff --git a/tools/congestion-model/src/main.rs b/tools/congestion-model/src/main.rs index 3377967d4b8..e4d6485d36c 100644 --- a/tools/congestion-model/src/main.rs +++ b/tools/congestion-model/src/main.rs @@ -1,16 +1,20 @@ +use bytesize::ByteSize; use chrono::Utc; use clap::Parser; use congestion_model::strategy::{ - FancyGlobalTransactionStop, GlobalTxStopShard, NewTxLast, NoQueueShard, SimpleBackpressure, - TrafficLight, + FancyGlobalTransactionStop, GlobalTxStopShard, NepStrategy, NewTxLast, NoQueueShard, + SimpleBackpressure, SmoothTrafficLight, TrafficLight, }; use congestion_model::workload::{ - AllForOneProducer, BalancedProducer, LinearImbalanceProducer, Producer, + AllForOneProducer, BalancedProducer, FairnessBenchmarkProducer, LinearImbalanceProducer, + Producer, }; use congestion_model::{ - summary_table, CongestionStrategy, Model, ShardQueueLengths, StatsWriter, PGAS, + summary_table, CongestionStrategy, Model, ShardQueueLengths, StatsWriter, PGAS, TGAS, }; use std::time::Duration; +use tracing_subscriber::layer::SubscriberExt; +use tracing_subscriber::{self, Layer}; #[derive(Parser, Debug)] #[command(version, about, long_about = None)] @@ -20,6 +24,11 @@ struct Args { #[clap(short, long, default_value = "1000")] rounds: usize, + /// Warmup rounds do not count towards total gas in the summary table. CSV + /// writer output is not affected. + #[clap(short, long, default_value_t = 0)] + warmup: usize, + /// Can be used to select a single workload or "all" to run all workload. /// It's case insensitive and spaces are stripped. /// Example: "all", "balanced", "all to one", "AllToOne". @@ -43,11 +52,21 @@ struct Args { /// other. #[clap(long)] write_stats_filepath: Option, + /// At most N transactions can stay in the transaction pool and the remainder is rejected. + /// + /// This can be useful to look at transaction delays. + #[clap(long, default_value_t = usize::MAX)] + tx_pool_size: usize, } fn main() { let args = Args::parse(); + let filter = tracing_subscriber::EnvFilter::from_default_env(); + let layer = tracing_subscriber::fmt::layer().with_filter(filter); + let subscriber = tracing_subscriber::registry().with(layer); + tracing::subscriber::set_global_default(subscriber).expect("could not set a global subscriber"); + summary_table::print_summary_header(); let workload_names = parse_workload_names(args.workload.as_ref()); @@ -68,7 +87,15 @@ fn main() { strategy_name, ); - run_model(&strategy_name, &workload_name, args.shards, args.rounds, stats_writer); + run_model( + &strategy_name, + &workload_name, + args.shards, + args.rounds, + args.warmup, + stats_writer, + args.tx_pool_size, + ); } } } @@ -94,7 +121,9 @@ fn run_model( workload_name: &str, num_shards: usize, num_rounds: usize, + num_warmup_rounds: usize, mut stats_writer: StatsWriter, + tx_pool_size: usize, ) { let strategy = strategy(strategy_name, num_shards); let workload = workload(workload_name); @@ -106,19 +135,26 @@ fn run_model( // looking at a maximum of 1800 rounds, beyond that you'll need to customize // the grafana time range. let start_time = Utc::now() - Duration::from_secs(1 * 60 * 60); + let mut warmup_gas_usage = model.gas_throughput(); model.write_stats_header(&mut stats_writer); for round in 0..num_rounds { + if round == num_warmup_rounds { + warmup_gas_usage = model.gas_throughput(); + } model.write_stats_values(&mut stats_writer, start_time, round); model.step(); + model.trim_transaction_pools(tx_pool_size); max_queues = max_queues.max_component_wise(&model.max_queue_length()); } summary_table::print_summary_row( workload_name, strategy_name, &model.progress(), - &model.gas_throughput(), + &((model.gas_throughput() - warmup_gas_usage) + / (num_rounds - num_warmup_rounds) + / num_shards), &max_queues, &model.user_experience(), ); @@ -144,10 +180,15 @@ fn workload(workload_name: &str) -> Box { // Each shard transforms one local tx into 4^3 = 64 receipts of 100kB to another shard Box::new(BalancedProducer::with_sizes_and_fan_out(vec![100, 100, 100, 100_000], 4)) } - "All To One" => Box::new(AllForOneProducer::one_hop_only()), - "Indirect All To One" => Box::::default(), + "Mixed All To One" => Box::::default(), + "Indirect All To One" => Box::new(AllForOneProducer::new(false, true, true)), + "One Hop All To One" => Box::new(AllForOneProducer::new(true, false, false)), + "Two Hop All To One" => Box::new(AllForOneProducer::new(false, true, false)), + "Three Hop All To One" => Box::new(AllForOneProducer::new(false, false, true)), + "Relayed Hot" => Box::new(AllForOneProducer::hot_tg()), "Linear Imbalance" => Box::::default(), "Big Linear Imbalance" => Box::new(LinearImbalanceProducer::big_receipts()), + "Fairness Test" => Box::::default(), _ => panic!("unknown workload: {}", workload_name), } } @@ -161,9 +202,116 @@ fn strategy(strategy_name: &str, num_shards: usize) -> Vec Box::new(NoQueueShard {}) as Box, "Global TX stop" => Box::::default(), "Simple backpressure" => Box::::default(), - "Fancy Global Transaction Stop" => Box::::default(), + "Fancy Stop" => Box::::default(), "New TX last" => Box::::default(), "Traffic Light" => Box::::default(), + "Smooth Traffic Light" => Box::::default(), + // Trade essentially unbounded outgoing delays for higher + // utilization. If run for long enough, it will still fill the + // buffer and hit a memory limit, but full throughput can be + // sustained for a long time this way. + "STL_MAX_UTIL" => Box::new( + SmoothTrafficLight::default() + .with_smooth_slow_down(false) + .with_gas_limits(50 * PGAS, u64::MAX) + .with_tx_reject_threshold(0.125), + ), + "STL_HIGH_UTIL" => Box::new( + SmoothTrafficLight::default() + .with_smooth_slow_down(false) + .with_gas_limits(50 * PGAS, 50 * PGAS) + .with_tx_reject_threshold(0.125), + ), + // Keep queues short enough that the can be emptied in one round. + "STL_MIN_DELAY" => Box::new( + SmoothTrafficLight::default() + .with_gas_limits(1300 * TGAS, 1 * PGAS) + .with_tx_reject_threshold(0.95), + ), + "STL_LOW_DELAY" => Box::new( + SmoothTrafficLight::default() + .with_gas_limits(5 * PGAS, 1 * PGAS) + .with_tx_reject_threshold(0.5), + ), + "NEP" => Box::::default(), + "NEP 200MB" => Box::new( + NepStrategy::default().with_memory_limits(ByteSize::mb(100), ByteSize::mb(100)), + ), + "NEP 450/50MB" => Box::new( + // keep outgoing limit small + // (1) if we hit this, it's due to another shard's incoming congestion, + // so we are already in a second stage of congestion and should be more aggressive + // (2) this soft limit will be breached quite a bit anyway + // as we don't stop executing receipts + NepStrategy::default().with_memory_limits(ByteSize::mb(450), ByteSize::mb(50)), + ), + "NEP 1GB" => Box::new( + NepStrategy::default().with_memory_limits(ByteSize::mb(500), ByteSize::mb(500)), + ), + "NEP 10 Pgas" => Box::new(NepStrategy::default().with_gas_limits(10 * PGAS, 10 * PGAS)), + "NEP 1 Pgas" => Box::new(NepStrategy::default().with_gas_limits(10 * PGAS, 10 * PGAS)), + "NEP 10/1 Pgas" => { + Box::new(NepStrategy::default().with_gas_limits(10 * PGAS, 1 * PGAS)) + } + // NEP v2 takes results from memory and gas limits into account and fixes those + "NEPv2" => Box::new( + NepStrategy::default() + .with_gas_limits(10 * PGAS, 1 * PGAS) + .with_memory_limits(ByteSize::mb(450), ByteSize::mb(50)), + ), + "NEPv2 1GB" => Box::new( + NepStrategy::default() + .with_gas_limits(10 * PGAS, 1 * PGAS) + .with_memory_limits(ByteSize::mb(900), ByteSize::mb(100)), + ), + "NEPv2 early global stop" => Box::new( + NepStrategy::default() + .with_gas_limits(10 * PGAS, 1 * PGAS) + .with_memory_limits(ByteSize::mb(450), ByteSize::mb(50)) + .with_global_stop_limit(0.5), + ), + "NEPv2 late global stop" => Box::new( + NepStrategy::default() + .with_gas_limits(10 * PGAS, 1 * PGAS) + .with_memory_limits(ByteSize::mb(450), ByteSize::mb(50)) + .with_global_stop_limit(1.0), + ), + "NEPv2 less forwarding" => Box::new( + NepStrategy::default() + .with_gas_limits(10 * PGAS, 1 * PGAS) + .with_memory_limits(ByteSize::mb(450), ByteSize::mb(50)) + .with_send_gas_limit_range(PGAS / 2, 2 * PGAS), + ), + "NEPv2 more forwarding" => Box::new( + NepStrategy::default() + .with_gas_limits(10 * PGAS, 1 * PGAS) + .with_memory_limits(ByteSize::mb(450), ByteSize::mb(50)) + .with_send_gas_limit_range(PGAS / 2, 100 * PGAS), + ), + "NEPv2 less tx" => Box::new( + NepStrategy::default() + .with_gas_limits(10 * PGAS, 1 * PGAS) + .with_memory_limits(ByteSize::mb(450), ByteSize::mb(50)) + .with_tx_gas_limit_range(0, 100 * TGAS), + ), + "NEPv2 more tx" => Box::new( + NepStrategy::default() + .with_gas_limits(10 * PGAS, 1 * PGAS) + .with_memory_limits(ByteSize::mb(450), ByteSize::mb(50)) + .with_tx_gas_limit_range(5 * TGAS, 900 * TGAS), + ), + // NEP v3 takes results from v2 into account, and lots of further fine-tuning. + // Unfortunately, no configuration can pass the fairness test in a satisfying way. + "NEPv3" => Box::new( + NepStrategy::default() + // small outgoing buffers is great for low latency + .with_gas_limits(10 * PGAS, 1 * PGAS) + .with_memory_limits(ByteSize::mb(500), ByteSize::mb(50)) + // going to zero is generally better in this strategy + .with_tx_gas_limit_range(0, 500 * TGAS) + .with_send_gas_limit_range(0, 5 * PGAS) + .with_global_stop_limit(0.95), + ), _ => panic!("unknown strategy: {}", strategy_name), }; @@ -178,10 +326,15 @@ fn parse_workload_names(workload_name: &str) -> Vec { "Increasing Size".to_string(), "Extreme Increasing Size".to_string(), "Shard War".to_string(), - "All To One".to_string(), + "Mixed All To One".to_string(), "Indirect All To One".to_string(), + "One Hop All To One".to_string(), + "Two Hop All To One".to_string(), + "Three Hop All To One".to_string(), + "Relayed Hot".to_string(), "Linear Imbalance".to_string(), "Big Linear Imbalance".to_string(), + "Fairness Test".to_string(), ]; if workload_name == "all" { @@ -201,9 +354,30 @@ fn parse_strategy_names(strategy_name: &str) -> Vec { "No queues".to_string(), "Global TX stop".to_string(), "Simple backpressure".to_string(), - "Fancy Global Transaction Stop".to_string(), + "Fancy Stop".to_string(), "New TX last".to_string(), "Traffic Light".to_string(), + "Smooth Traffic Light".to_string(), + "STL_MAX_UTIL".to_string(), + "STL_HIGH_UTIL".to_string(), + "STL_MIN_DELAY".to_string(), + "STL_LOW_DELAY".to_string(), + "NEP".to_string(), + "NEP 200MB".to_string(), + "NEP 450/50MB".to_string(), + "NEP 1GB".to_string(), + "NEP 10 Pgas".to_string(), + "NEP 1 Pgas".to_string(), + "NEP 10/1 Pgas".to_string(), + "NEPv2".to_string(), + "NEPv2 1GB".to_string(), + "NEPv2 early global stop".to_string(), + "NEPv2 late global stop".to_string(), + "NEPv2 less forwarding".to_string(), + "NEPv2 more forwarding".to_string(), + "NEPv2 less tx".to_string(), + "NEPv2 more tx".to_string(), + "NEPv3".to_string(), ]; if strategy_name == "all" { diff --git a/tools/congestion-model/src/model/mod.rs b/tools/congestion-model/src/model/mod.rs index 9df91999615..7e5b7d4f55f 100644 --- a/tools/congestion-model/src/model/mod.rs +++ b/tools/congestion-model/src/model/mod.rs @@ -124,6 +124,15 @@ impl Model { .collect() } + pub fn trim_transaction_pools(&mut self, max_len: usize) { + for &shard_id in &self.shard_ids { + let len = self.queues.incoming_transactions(shard_id).len(); + if len > max_len { + self.queues.incoming_transactions_mut(shard_id).drain(0..len - max_len); + } + } + } + pub fn shard(&mut self, id: ShardId) -> &mut dyn CongestionStrategy { self.shards[id.0].as_mut() } diff --git a/tools/congestion-model/src/strategy/mod.rs b/tools/congestion-model/src/strategy/mod.rs index 0bdae5ab9f8..aced6a730ab 100644 --- a/tools/congestion-model/src/strategy/mod.rs +++ b/tools/congestion-model/src/strategy/mod.rs @@ -3,16 +3,20 @@ use crate::{QueueId, ShardId}; pub use fancy_global_transaction_stop::FancyGlobalTransactionStop; pub use global_tx_stop::GlobalTxStopShard; +pub use nep::NepStrategy; pub use new_tx_last::NewTxLast; pub use no_queues::NoQueueShard; pub use simple_backpressure::SimpleBackpressure; +pub use smooth_traffic_light::SmoothTrafficLight; pub use traffic_light::TrafficLight; mod fancy_global_transaction_stop; mod global_tx_stop; +mod nep; mod new_tx_last; mod no_queues; mod simple_backpressure; +mod smooth_traffic_light; mod traffic_light; /// Implement the shard behavior to define a new congestion control strategy. diff --git a/tools/congestion-model/src/strategy/nep.rs b/tools/congestion-model/src/strategy/nep.rs new file mode 100644 index 00000000000..74b05fd6c41 --- /dev/null +++ b/tools/congestion-model/src/strategy/nep.rs @@ -0,0 +1,361 @@ +use std::collections::BTreeMap; + +use crate::model::ChunkExecutionContext; +use crate::strategy::QueueFactory; +use crate::{GGas, QueueId, Receipt, ShardId, TransactionId, GAS_LIMIT, PGAS, TGAS}; + +pub struct NepStrategy { + pub shard_id: Option, + pub all_shards: Vec, + pub other_shards: Vec, + + // The queues for receipts going to other shards. + pub outgoing_queues: BTreeMap, + + // How much gas are we allowed to send to other shards. + pub outgoing_gas_limit: BTreeMap, + + // numbers to fine-tune + pub min_tx_gas: GGas, + pub max_tx_gas: GGas, + pub min_send_limit: GGas, + pub max_send_limit: GGas, + pub global_outgoing_congestion_limit: f64, + pub max_incoming_gas: GGas, + pub max_incoming_congestion_memory: u64, + pub max_outgoing_congestion_memory: u64, + pub max_outgoing_gas: GGas, +} + +impl Default for NepStrategy { + fn default() -> Self { + Self { + // parameters which can be set by the model runner + min_tx_gas: 5 * TGAS, + max_tx_gas: 500 * TGAS, + min_send_limit: 0, + max_send_limit: 30 * PGAS, + global_outgoing_congestion_limit: 0.9, + max_incoming_gas: 100 * PGAS, + max_outgoing_gas: 100 * PGAS, + max_incoming_congestion_memory: 250_000_000, + max_outgoing_congestion_memory: 250_000_000, + + // init fills these + shard_id: Default::default(), + all_shards: Default::default(), + other_shards: Default::default(), + outgoing_queues: Default::default(), + outgoing_gas_limit: Default::default(), + } + } +} + +#[derive(Default, Clone)] +struct CongestedShardsInfo { + incoming_congestion: f64, + outgoing_congestion: f64, +} + +impl crate::CongestionStrategy for NepStrategy { + fn init( + &mut self, + id: crate::ShardId, + all_shards: &[crate::ShardId], + queue_factory: &mut dyn QueueFactory, + ) { + self.shard_id = Some(id); + self.all_shards = all_shards.to_vec(); + self.other_shards = all_shards.iter().map(|s| *s).filter(|s| *s != id).collect(); + + for shard_id in &self.other_shards { + let name = format!("outgoing_receipts_{}", shard_id); + let queue = queue_factory.register_queue(id, &name); + self.outgoing_queues.insert(*shard_id, queue); + } + } + + fn compute_chunk(&mut self, ctx: &mut ChunkExecutionContext) { + self.init_send_limit(ctx); + + self.process_outgoing_receipts(ctx); + + self.process_new_transactions(ctx); + + self.process_incoming_receipts(ctx); + + self.update_block_info(ctx); + } +} + +impl NepStrategy { + // Step 1: Compute bandwidth limits to other shards based on the congestion information + fn init_send_limit(&mut self, ctx: &mut ChunkExecutionContext<'_>) { + self.outgoing_gas_limit.clear(); + + for shard_id in self.other_shards.clone() { + let CongestedShardsInfo { incoming_congestion, .. } = self.get_info(ctx, &shard_id); + let send_limit = mix(self.max_send_limit, self.min_send_limit, incoming_congestion); + + self.outgoing_gas_limit.insert(shard_id, send_limit); + } + } + + // Step 2: Drain receipts in the outgoing buffer from the previous round + // + // Goes through buffered outgoing receipts and sends as many as possible up + // to the send limit for each shard. Updates the send limit for every sent receipt. + fn process_outgoing_receipts(&mut self, ctx: &mut ChunkExecutionContext<'_>) { + for (other_shard_id, queue_id) in &self.outgoing_queues { + let send_limit = self.outgoing_gas_limit.get_mut(other_shard_id).unwrap(); + + loop { + let Some(receipt) = ctx.queue(*queue_id).front() else { + break; + }; + + if receipt.attached_gas > *send_limit { + break; + } + + let receipt = ctx.queue(*queue_id).pop_front().unwrap(); + *send_limit -= receipt.attached_gas; + + ctx.forward_receipt(receipt); + } + } + } + + // Step 3: Convert all transactions to receipts included in the chunk + // + // * limit the gas for new transaction based on the self incoming congestion + // * filter transactions to a shard based on the receiver's memory congestion + // + // The outgoing receipts are processed as in `process_outgoing_receipts`. + fn process_new_transactions(&mut self, ctx: &mut ChunkExecutionContext<'_>) { + let incoming_congestion = self.get_incoming_congestion(ctx); + let tx_limit = mix(self.max_tx_gas, self.min_tx_gas, incoming_congestion); + + while ctx.gas_burnt() < tx_limit { + let Some(tx) = ctx.incoming_transactions().pop_front() else { + // no more transactions to process + break; + }; + + if self.get_global_stop(ctx) { + break; + } + + if self.get_filter_stop(ctx, tx) { + // reject receipt + continue; + } + + let outgoing = ctx.accept_transaction(tx); + self.forward_or_buffer(ctx, outgoing); + } + } + + // Checks if any of the shards are congested to the point where all shards + // should stop accepting any transactions. + // + // TODO consider smooth slow down + fn get_global_stop(&mut self, ctx: &mut ChunkExecutionContext<'_>) -> bool { + for shard_id in self.all_shards.clone() { + let CongestedShardsInfo { outgoing_congestion, .. } = self.get_info(ctx, &shard_id); + if outgoing_congestion > self.global_outgoing_congestion_limit { + return true; + } + } + + return false; + } + + // Checks if the transaction receiver is in a congested shard. If so the + // transaction should be rejected. + // + // TODO consider smooth slow down + fn get_filter_stop(&mut self, ctx: &mut ChunkExecutionContext<'_>, tx: TransactionId) -> bool { + let filter_outgoing_congestion_limit = 0.5; + + let receiver = ctx.tx_receiver(tx); + // Note: I also tried using the incoming congestion for the filter stop instead. + // Positive + // The fairness test utilization is about 2x better. (depends on + // strategy parameters and model config) + // Negative + // In the available workloads, it leads to larger queues when big + // receipts are involved and slightly worse 90th percentile delays in + // all-to-one workloads. + // Also, the linear imbalance workloads lose about 50% utilization. + let CongestedShardsInfo { outgoing_congestion, .. } = self.get_info(ctx, &receiver); + outgoing_congestion > filter_outgoing_congestion_limit + } + + // Step 4: Execute receipts in the order of local, delayed, incoming + // Step 5: Remaining local or incoming receipts are added to the end of the + // delayed receipts queue + // + // In the model there is no distinction between local, delayed and incoming. + // All of those are stored in the incoming queue so we just process that. + // + // Always process as many receipts as allowed by the GAS_LIMIT. + // + // The outgoing receipts are processed as in `process_outgoing_receipts`. + fn process_incoming_receipts(&mut self, ctx: &mut ChunkExecutionContext<'_>) { + while ctx.gas_burnt() < GAS_LIMIT { + let Some(receipt) = ctx.incoming_receipts().pop_front() else { + break; + }; + + let outgoing = ctx.execute_receipt(receipt); + for receipt in outgoing { + self.forward_or_buffer(ctx, receipt); + } + } + } + + // Step 6: Compute own congestion information for the next block + fn update_block_info(&mut self, ctx: &mut ChunkExecutionContext<'_>) { + let incoming_congestion = self.get_incoming_congestion(ctx); + let outgoing_congestion = self.get_outgoing_congestion(ctx); + + tracing::debug!( + target: "model", + shard_id=?self.shard_id(), + incoming_congestion=format!("{incoming_congestion:.2}"), + outgoing_congestion=format!("{outgoing_congestion:.2}"), + "chunk info" + ); + + let info = CongestedShardsInfo { incoming_congestion, outgoing_congestion }; + ctx.current_block_info().insert(info); + } + + fn get_incoming_congestion(&self, ctx: &mut ChunkExecutionContext) -> f64 { + f64::max(self.incoming_memory_congestion(ctx), self.incoming_gas_congestion(ctx)) + } + + fn incoming_memory_congestion(&self, ctx: &mut ChunkExecutionContext) -> f64 { + let memory_consumption = ctx.incoming_receipts().size(); + let memory_congestion = + memory_consumption as f64 / self.max_incoming_congestion_memory as f64; + f64::clamp(memory_congestion, 0.0, 1.0) + } + + fn incoming_gas_congestion(&self, ctx: &mut ChunkExecutionContext) -> f64 { + let gas_backlog = ctx.incoming_receipts().attached_gas() as f64; + f64::clamp(gas_backlog / self.max_incoming_gas as f64, 0.0, 1.0) + } + + fn get_outgoing_congestion(&self, ctx: &mut ChunkExecutionContext) -> f64 { + f64::max(self.outgoing_memory_congestion(ctx), self.outgoing_gas_congestion(ctx)) + } + + fn outgoing_memory_congestion(&self, ctx: &mut ChunkExecutionContext) -> f64 { + let mut memory_consumption = 0; + for (_, queue_id) in &self.outgoing_queues { + memory_consumption += ctx.queue(*queue_id).size(); + } + + let memory_congestion = + memory_consumption as f64 / self.max_outgoing_congestion_memory as f64; + f64::clamp(memory_congestion, 0.0, 1.0) + } + + fn outgoing_gas_congestion(&self, ctx: &mut ChunkExecutionContext) -> f64 { + let mut gas_backlog = 0; + for (_, queue_id) in &self.outgoing_queues { + gas_backlog += ctx.queue(*queue_id).attached_gas(); + } + + let gas_congestion = gas_backlog as f64 / self.max_outgoing_gas as f64; + f64::clamp(gas_congestion, 0.0, 1.0) + } + + // Forward or buffer a receipt. + // Local receipts are always forwarded. + fn forward_or_buffer(&mut self, ctx: &mut ChunkExecutionContext<'_>, receipt: Receipt) { + let shard_id = receipt.receiver; + + // If we are the receiver just forward the receipt. + if shard_id == self.shard_id() { + ctx.forward_receipt(receipt); + return; + } + + let send_limit = self.outgoing_gas_limit.get_mut(&shard_id).unwrap(); + if receipt.attached_gas > *send_limit { + ctx.queue(self.outgoing_queues[&shard_id]).push_back(receipt); + return; + } + + *send_limit -= receipt.attached_gas; + ctx.forward_receipt(receipt); + } + + fn get_info( + &mut self, + ctx: &mut ChunkExecutionContext<'_>, + shard_id: &ShardId, + ) -> CongestedShardsInfo { + let Some(info) = ctx.prev_block_info().get(&shard_id) else { + // If there is no info assume there is no congestion. + return CongestedShardsInfo::default(); + }; + info.get::().unwrap().clone() + } + + fn shard_id(&self) -> ShardId { + self.shard_id.unwrap() + } + + /// Define 100% congestion limit in gas. + pub fn with_gas_limits(mut self, incoming: GGas, outgoing: GGas) -> Self { + self.max_incoming_gas = incoming; + self.max_outgoing_gas = outgoing; + self + } + + /// Define 100% congestion limit in bytes. + pub fn with_memory_limits( + mut self, + incoming: bytesize::ByteSize, + outgoing: bytesize::ByteSize, + ) -> Self { + self.max_incoming_congestion_memory = incoming.as_u64(); + self.max_outgoing_congestion_memory = outgoing.as_u64(); + self + } + + /// Gas spent on new transactions. + pub fn with_tx_gas_limit_range(mut self, min: GGas, max: GGas) -> Self { + self.min_tx_gas = min; + self.max_tx_gas = max; + self + } + + /// Gas allowance to sent to other shards. + pub fn with_send_gas_limit_range(mut self, min: GGas, max: GGas) -> Self { + self.min_send_limit = min; + self.max_send_limit = max; + self + } + + /// At how much % congestion the global stop should kick in. + pub fn with_global_stop_limit(mut self, congestion_level: f64) -> Self { + self.global_outgoing_congestion_limit = congestion_level; + self + } +} + +fn mix(x: u64, y: u64, a: f64) -> u64 { + assert!(0.0 <= a); + assert!(a <= 1.0); + let x = x as f64; + let y = y as f64; + + let result = x * (1.0 - a) + y * a; + + result as u64 +} diff --git a/tools/congestion-model/src/strategy/smooth_traffic_light.rs b/tools/congestion-model/src/strategy/smooth_traffic_light.rs new file mode 100644 index 00000000000..14545456d9f --- /dev/null +++ b/tools/congestion-model/src/strategy/smooth_traffic_light.rs @@ -0,0 +1,360 @@ +use std::collections::BTreeMap; + +use crate::model::ChunkExecutionContext; +use crate::strategy::QueueFactory; +use crate::{GGas, QueueId, Receipt, ShardId, TransactionId, GAS_LIMIT, PGAS, TGAS}; + +pub struct SmoothTrafficLight { + pub shard_id: Option, + pub all_shards: Vec, + pub other_shards: Vec, + + // The queues for receipts going to other shards. + pub outgoing_buffer: BTreeMap, + + // How much gas are we allowed to send to other shards. + pub outgoing_gas_allowance: BTreeMap, + + // new tx acceptance + pub min_tx_gas: GGas, + pub max_tx_gas: GGas, + pub reject_tx_congestion_limit: f64, + + // receipt forwarding limits + pub min_send_limit_amber: GGas, + pub max_send_limit: GGas, + pub red_send_limit: GGas, + pub smooth_slow_down: bool, + + // queue limits to calculate congestion level + pub red_incoming_gas: GGas, + pub red_outgoing_gas: GGas, + pub memory_limit: u64, +} + +impl Default for SmoothTrafficLight { + fn default() -> Self { + Self { + // new tx acceptance + min_tx_gas: 20 * TGAS, + max_tx_gas: 500 * TGAS, + reject_tx_congestion_limit: 0.25, + + // receipt forwarding limits + min_send_limit_amber: 1 * PGAS, + max_send_limit: 300 * PGAS, + red_send_limit: 1 * PGAS, + smooth_slow_down: true, + + // queue limits to calculate congestion level + red_incoming_gas: 20 * PGAS, + red_outgoing_gas: 2 * PGAS, + memory_limit: bytesize::mb(1000u64), + + // init fills these + shard_id: Default::default(), + all_shards: Default::default(), + other_shards: Default::default(), + outgoing_buffer: Default::default(), + outgoing_gas_allowance: Default::default(), + } + } +} + +#[derive(Default, Clone)] +struct CongestedShardsInfo { + congestion_level: f64, + allowed_shard: Option, +} + +impl crate::CongestionStrategy for SmoothTrafficLight { + fn init( + &mut self, + id: crate::ShardId, + all_shards: &[crate::ShardId], + queue_factory: &mut dyn QueueFactory, + ) { + self.shard_id = Some(id); + self.all_shards = all_shards.to_vec(); + self.other_shards = all_shards.iter().map(|s| *s).filter(|s| *s != id).collect(); + + for shard_id in &self.other_shards { + let name = format!("outgoing_receipts_{}", shard_id); + let queue = queue_factory.register_queue(id, &name); + self.outgoing_buffer.insert(*shard_id, queue); + } + } + + fn compute_chunk(&mut self, ctx: &mut ChunkExecutionContext) { + self.init_send_limit(ctx); + + self.process_outgoing_receipts(ctx); + + self.process_new_transactions(ctx); + + self.process_incoming_receipts(ctx); + + self.update_block_info(ctx); + } +} + +impl SmoothTrafficLight { + // Step 1: Compute bandwidth limits to other shards based on the congestion information + fn init_send_limit(&mut self, ctx: &mut ChunkExecutionContext<'_>) { + self.outgoing_gas_allowance.clear(); + + for shard_id in self.other_shards.clone() { + let CongestedShardsInfo { congestion_level, allowed_shard } = + self.get_info(ctx, &shard_id); + let send_limit = if congestion_level < 1.0 { + // amber + mix(self.max_send_limit, self.min_send_limit_amber, congestion_level) + } else { + // red + if Some(self.shard_id()) == allowed_shard { + self.red_send_limit + } else { + 0 + } + }; + + self.outgoing_gas_allowance.insert(shard_id, send_limit); + } + } + + // Step 2: Drain receipts in the outgoing buffer from the previous round + // + // Goes through buffered outgoing receipts and sends as many as possible up + // to the send limit for each shard. Updates the send limit for every sent receipt. + fn process_outgoing_receipts(&mut self, ctx: &mut ChunkExecutionContext<'_>) { + for (other_shard_id, queue_id) in &self.outgoing_buffer { + let send_allowance = self.outgoing_gas_allowance.get_mut(other_shard_id).unwrap(); + + loop { + let Some(receipt) = ctx.queue(*queue_id).front() else { + break; + }; + + if receipt.attached_gas > *send_allowance { + break; + } + + let receipt = ctx.queue(*queue_id).pop_front().unwrap(); + *send_allowance -= receipt.attached_gas; + + ctx.forward_receipt(receipt); + } + } + } + + // Step 3: Convert all transactions to receipts included in the chunk + // + // * limit the gas for new transaction based on the self incoming congestion + // * filter transactions to a shard based on the receiver's congestion level + // + // The outgoing receipts are processed as in `process_outgoing_receipts`. + fn process_new_transactions(&mut self, ctx: &mut ChunkExecutionContext<'_>) { + let incoming_congestion = self.incoming_gas_congestion(ctx); + let tx_allowance = mix(self.max_tx_gas, self.min_tx_gas, incoming_congestion); + + while ctx.gas_burnt() < tx_allowance { + let Some(tx) = ctx.incoming_transactions().pop_front() else { + // no more transactions to process + break; + }; + + if self.get_filter_stop(ctx, tx) { + // reject receipt + continue; + } + + let outgoing = ctx.accept_transaction(tx); + self.forward_or_buffer(ctx, outgoing); + } + } + + // Checks if the transaction receiver is in a congested shard. If so the + // transaction should be rejected. + fn get_filter_stop(&mut self, ctx: &mut ChunkExecutionContext<'_>, tx: TransactionId) -> bool { + let receiver = ctx.tx_receiver(tx); + + let CongestedShardsInfo { congestion_level, .. } = self.get_info(ctx, &receiver); + congestion_level > self.reject_tx_congestion_limit + } + + // Step 4: Execute receipts in the order of local, delayed, incoming + // Step 5: Remaining local or incoming receipts are added to the end of the + // delayed receipts queue + // + // In the model there is no distinction between local, delayed and incoming. + // All of those are stored in the incoming queue so we just process that. + // + // Always process as many receipts as allowed by the GAS_LIMIT. + // + // The outgoing receipts are processed as in `process_outgoing_receipts`. + fn process_incoming_receipts(&mut self, ctx: &mut ChunkExecutionContext<'_>) { + while ctx.gas_burnt() < GAS_LIMIT { + let Some(receipt) = ctx.incoming_receipts().pop_front() else { + break; + }; + + let outgoing = ctx.execute_receipt(receipt); + for receipt in outgoing { + self.forward_or_buffer(ctx, receipt); + } + } + } + + // Step 6: Compute own congestion information for the next block + fn update_block_info(&mut self, ctx: &mut ChunkExecutionContext<'_>) { + let incoming_gas_congestion = self.incoming_gas_congestion(ctx); + let outgoing_gas_congestion = self.outgoing_gas_congestion(ctx); + let memory_congestion = self.memory_congestion(ctx); + + let max_congestion = + incoming_gas_congestion.max(outgoing_gas_congestion).max(memory_congestion); + let red = max_congestion >= 1.0; + let info = if red { + CongestedShardsInfo { + congestion_level: 1.0, + allowed_shard: Some(self.round_robin_shard(ctx.block_height() as usize)), + } + } else if !self.smooth_slow_down { + // Initial traffic light: + // Usually, signal other shards to slow down based on our incoming gas congestion only. + // All other limits (outgoing gas, memory) are ignored until they hit a red light. + // The goal is to never hit red, unless we have to prevent the unbounded queues vs deadlocks tradeoff. + // If we hit red, it slows down incoming traffic massively, which should bring us out of red soon. + CongestedShardsInfo { congestion_level: incoming_gas_congestion, allowed_shard: None } + } else { + // Simplified and smoothed. + // Requires a larger max memory limit but will smoothly reduce based + // on size, which can even lead to smaller peak buffer size. + CongestedShardsInfo { congestion_level: max_congestion, allowed_shard: None } + }; + + ctx.current_block_info().insert(info); + } + + fn round_robin_shard(&mut self, seed: usize) -> ShardId { + let num_other_shards = self.all_shards.len() - 1; + let mut index = (seed + *self.shard_id.unwrap()) % num_other_shards; + if self.all_shards[index] == self.shard_id() { + index = self.all_shards.len() - 1; + } + self.all_shards[index] + } + + fn memory_congestion(&self, ctx: &mut ChunkExecutionContext) -> f64 { + let mut memory_consumption = ctx.incoming_receipts().size(); + for (_, queue_id) in &self.outgoing_buffer { + memory_consumption += ctx.queue(*queue_id).size(); + } + + f64::clamp(memory_consumption as f64 / self.memory_limit as f64, 0.0, 1.0) + } + + fn incoming_gas_congestion(&self, ctx: &mut ChunkExecutionContext) -> f64 { + let gas_backlog = ctx.incoming_receipts().attached_gas() as f64; + f64::clamp(gas_backlog / self.red_incoming_gas as f64, 0.0, 1.0) + } + + fn outgoing_gas_congestion(&self, ctx: &mut ChunkExecutionContext) -> f64 { + let mut gas_backlog = 0; + for (_, queue_id) in &self.outgoing_buffer { + gas_backlog += ctx.queue(*queue_id).attached_gas(); + } + + let gas_congestion = gas_backlog as f64 / self.red_outgoing_gas as f64; + f64::clamp(gas_congestion, 0.0, 1.0) + } + + // Forward or buffer a receipt. + // Local receipts are always forwarded. + fn forward_or_buffer(&mut self, ctx: &mut ChunkExecutionContext<'_>, receipt: Receipt) { + let shard_id = receipt.receiver; + + // If we are the receiver just forward the receipt. + if shard_id == self.shard_id() { + ctx.forward_receipt(receipt); + return; + } + + let send_limit = self.outgoing_gas_allowance.get_mut(&shard_id).unwrap(); + if receipt.attached_gas > *send_limit { + ctx.queue(self.outgoing_buffer[&shard_id]).push_back(receipt); + return; + } + + *send_limit -= receipt.attached_gas; + ctx.forward_receipt(receipt); + } + + fn get_info( + &mut self, + ctx: &mut ChunkExecutionContext<'_>, + shard_id: &ShardId, + ) -> CongestedShardsInfo { + let Some(info) = ctx.prev_block_info().get(&shard_id) else { + // If there is no info assume there is no congestion. + return CongestedShardsInfo::default(); + }; + info.get::().unwrap().clone() + } + + fn shard_id(&self) -> ShardId { + self.shard_id.unwrap() + } + + /// Define 100% congestion limit in gas. + pub fn with_gas_limits(mut self, incoming: GGas, outgoing: GGas) -> Self { + self.red_incoming_gas = incoming; + self.red_outgoing_gas = outgoing; + self + } + + /// Define 100% congestion limit in bytes. + pub fn with_memory_limit(mut self, limit: bytesize::ByteSize) -> Self { + self.memory_limit = limit.as_u64(); + self + } + + /// Define at what congestion level new transactions to a shard must be rejected. + pub fn with_tx_reject_threshold(mut self, threshold: f64) -> Self { + self.reject_tx_congestion_limit = threshold; + self + } + + /// Set to false to use a less smooth strategy for slowing down, only + /// looking at memory and outgoing congestion once it is at the threshold. + /// This can give higher utilization but will lead to larger buffers. + pub fn with_smooth_slow_down(mut self, yes: bool) -> Self { + self.smooth_slow_down = yes; + self + } + + /// Gas spent on new transactions. + pub fn with_tx_gas_limit_range(mut self, min: GGas, max: GGas) -> Self { + self.min_tx_gas = min; + self.max_tx_gas = max; + self + } + + /// Gas allowance to sent to other shards. + pub fn with_send_gas_limit_range(mut self, min: GGas, max: GGas) -> Self { + self.min_send_limit_amber = min; + self.max_send_limit = max; + self + } +} + +fn mix(x: u64, y: u64, a: f64) -> u64 { + assert!(0.0 <= a); + assert!(a <= 1.0); + let x = x as f64; + let y = y as f64; + + let result = x * (1.0 - a) + y * a; + + result as u64 +} diff --git a/tools/congestion-model/src/workload/all_for_one.rs b/tools/congestion-model/src/workload/all_for_one.rs index d1e8c079601..1e7b07faa7e 100644 --- a/tools/congestion-model/src/workload/all_for_one.rs +++ b/tools/congestion-model/src/workload/all_for_one.rs @@ -140,12 +140,30 @@ impl AllForOneProducer { } } - pub fn one_hop_only() -> Self { + pub fn new(enable_one_hop: bool, enable_two_hops: bool, enable_three_hops: bool) -> Self { + Self { enable_one_hop, enable_two_hops, enable_three_hops, ..Default::default() } + } + + /// Approximates the workload of game.hot.tg + pub fn hot_tg() -> Self { Self { - enable_one_hop: true, - enable_two_hops: false, + // Usually, workloads are function calls through a relayer. + // Therefore, use 2-hop requests only. + enable_one_hop: false, + enable_two_hops: true, enable_three_hops: false, - ..Default::default() + // The receipts are small, exact values should not matter. + receipt_size: 1024, + // Gas numbers based on reduced fn base gas costs and what traffic + // I could find on chain. + attached_gas: 30 * TGAS, + light_execution_gas: TGAS / 2, + last_execution_gas: 3 * TGAS, + conversion_gas: TGAS / 2, + // just send plenty + messages_per_round: 3000, + // empty iterator overwritten in init + round_robin_shards: Box::new(std::iter::empty()), } } } diff --git a/tools/congestion-model/src/workload/fairness_benchmark.rs b/tools/congestion-model/src/workload/fairness_benchmark.rs new file mode 100644 index 00000000000..e8f552e9b94 --- /dev/null +++ b/tools/congestion-model/src/workload/fairness_benchmark.rs @@ -0,0 +1,84 @@ +use crate::{GGas, ReceiptDefinition, ShardId, TransactionBuilder, GAS_LIMIT, TGAS}; + +use super::Producer; + +/// Transaction producer that tests fairness for shards. +/// +/// One shard has to execute receipts from all others. But it is also trying to +/// send new transactions to all others. +/// +/// A fair strategy should allow to send the busy shard at least some work to +/// other shards, since those are not above capacity. Thus, by looking at +/// utilization, we can get a measure of fairness between shards. +pub struct FairnessBenchmarkProducer { + pub receipt_size: u64, + pub attached_gas: GGas, + pub execution_gas: GGas, + pub conversion_gas: GGas, +} + +impl Producer for FairnessBenchmarkProducer { + fn init(&mut self, _shards: &[ShardId]) {} + + fn produce_transactions( + &mut self, + _round: crate::Round, + shards: &[ShardId], + tx_factory: &mut dyn FnMut(ShardId) -> TransactionBuilder, + ) -> Vec { + let mut out = vec![]; + + let busy_shard = shards[0]; + let other_shards = &shards[1..]; + + let mut gas_to_busy = 0; + let mut gas_to_non_busy = 0; + + // Send from all to the busy shard until it receives at least 4 times + // what it can handle + while gas_to_busy < GAS_LIMIT * 4 { + for &other_shard in other_shards { + let mut tx_to_busy_shard = tx_factory(other_shard); + self.produce_tx(busy_shard, &mut tx_to_busy_shard); + out.push(tx_to_busy_shard); + gas_to_busy += self.execution_gas; + } + } + + // Send from the busy shard to all other shards, using around 90% of + // their capacity. + while gas_to_non_busy < GAS_LIMIT { + for &other_shard in other_shards { + let mut tx_from_busy_shard = tx_factory(busy_shard); + self.produce_tx(other_shard, &mut tx_from_busy_shard); + out.push(tx_from_busy_shard); + } + gas_to_non_busy += self.execution_gas; + } + out + } +} + +impl FairnessBenchmarkProducer { + fn produce_tx(&self, receiver: ShardId, tx: &mut TransactionBuilder) { + let receipt = ReceiptDefinition { + receiver, + size: self.receipt_size, + attached_gas: self.attached_gas, + execution_gas: self.execution_gas, + }; + tx.add_first_receipt(receipt, self.conversion_gas); + // no refund for this workload + } +} + +impl Default for FairnessBenchmarkProducer { + fn default() -> Self { + Self { + receipt_size: 10_000, + attached_gas: 200 * TGAS, + execution_gas: 100 * TGAS, + conversion_gas: 1 * TGAS, + } + } +} diff --git a/tools/congestion-model/src/workload/mod.rs b/tools/congestion-model/src/workload/mod.rs index 756076bfa4b..d10c2ace618 100644 --- a/tools/congestion-model/src/workload/mod.rs +++ b/tools/congestion-model/src/workload/mod.rs @@ -1,11 +1,13 @@ mod all_for_one; mod balanced; +mod fairness_benchmark; mod linear_imbalance; mod transaction_builder; mod utils; pub use all_for_one::AllForOneProducer; pub use balanced::BalancedProducer; +pub use fairness_benchmark::FairnessBenchmarkProducer; pub use linear_imbalance::LinearImbalanceProducer; pub use transaction_builder::{ReceiptDefinition, ReceiptId, TransactionBuilder}; diff --git a/tools/database/src/commands.rs b/tools/database/src/commands.rs index f6a0668c62a..0dd9313ec6b 100644 --- a/tools/database/src/commands.rs +++ b/tools/database/src/commands.rs @@ -9,6 +9,7 @@ use crate::make_snapshot::MakeSnapshotCommand; use crate::memtrie::LoadMemTrieCommand; use crate::run_migrations::RunMigrationsCommand; use crate::state_perf::StatePerfCommand; +use crate::write_to_db::WriteCryptoHashCommand; use clap::Parser; use std::path::PathBuf; @@ -48,6 +49,8 @@ enum SubCommand { /// Loads an in-memory trie for research purposes. LoadMemTrie(LoadMemTrieCommand), + /// Write CryptoHash to DB + WriteCryptoHash(WriteCryptoHashCommand), /// Outputs stats that are needed to analise high load /// for a block range and account. HighLoadStats(HighLoadStatsCommand), @@ -81,6 +84,7 @@ impl DatabaseCommand { .unwrap_or_else(|e| panic!("Error loading config: {:#}", e)); cmd.run(near_config, home) } + SubCommand::WriteCryptoHash(cmd) => cmd.run(home), SubCommand::HighLoadStats(cmd) => cmd.run(home), SubCommand::AnalyzeDelayedReceipt(cmd) => cmd.run(home), } diff --git a/tools/database/src/lib.rs b/tools/database/src/lib.rs index 1c2717a01ba..9d761d16d67 100644 --- a/tools/database/src/lib.rs +++ b/tools/database/src/lib.rs @@ -12,3 +12,4 @@ mod memtrie; mod run_migrations; mod state_perf; mod utils; +mod write_to_db; diff --git a/tools/database/src/write_to_db.rs b/tools/database/src/write_to_db.rs new file mode 100644 index 00000000000..44cd6ebe5a7 --- /dev/null +++ b/tools/database/src/write_to_db.rs @@ -0,0 +1,56 @@ +use near_store::{DBCol, NodeStorage}; +use std::path::Path; + +#[derive(clap::Subcommand)] +enum BlockMiscKeySelector { + StateSnapshot, +} + +#[derive(clap::Subcommand)] +enum ColumnSelector { + BlockMisc { + #[clap(subcommand)] + key: BlockMiscKeySelector, + }, +} + +#[derive(clap::Args)] +pub(crate) struct WriteCryptoHashCommand { + #[clap(long)] + hash: near_primitives::hash::CryptoHash, + #[clap(subcommand)] + column: ColumnSelector, +} + +impl WriteCryptoHashCommand { + pub(crate) fn run(&self, home_dir: &Path) -> anyhow::Result<()> { + let near_config = nearcore::config::load_config( + &home_dir, + near_chain_configs::GenesisValidationMode::UnsafeFast, + )?; + let opener = NodeStorage::opener( + home_dir, + near_config.config.archive, + &near_config.config.store, + near_config.config.cold_store.as_ref(), + ); + + let storage = opener.open()?; + let store = storage.get_hot_store(); + let mut store_update = store.store_update(); + + match &self.column { + ColumnSelector::BlockMisc { key } => match key { + BlockMiscKeySelector::StateSnapshot => { + store_update.set_ser( + DBCol::BlockMisc, + near_store::STATE_SNAPSHOT_KEY, + &self.hash, + )?; + } + }, + } + + Ok(store_update.commit()?) + } +} diff --git a/tools/debug-ui/package.json b/tools/debug-ui/package.json index 80a0ddac917..572cac4937c 100644 --- a/tools/debug-ui/package.json +++ b/tools/debug-ui/package.json @@ -9,7 +9,7 @@ "@types/react-dom": "^18.2.18", "react": "^18.2.0", "react-dom": "^18.2.0", - "react-query": "^4.0.0", + "@tanstack/react-query": "^4.0.0", "react-router-dom": "^6.21.1", "react-scripts": "^5.0.1", "react-tooltip": "^5.22.0", diff --git a/tools/debug-ui/src/BlocksView.tsx b/tools/debug-ui/src/BlocksView.tsx index f6da3292c93..dad5014802a 100644 --- a/tools/debug-ui/src/BlocksView.tsx +++ b/tools/debug-ui/src/BlocksView.tsx @@ -1,5 +1,5 @@ import './BlocksView.scss'; -import { useQuery } from 'react-query'; +import { useQuery } from '@tanstack/react-query'; import { fetchChainProcessingStatus, BlockProcessingStatus, ChunkProcessingStatus } from './api'; type BlocksViewProps = { diff --git a/tools/debug-ui/src/ChainInfoSummaryView.tsx b/tools/debug-ui/src/ChainInfoSummaryView.tsx index be002ce521a..c3205dac9a7 100644 --- a/tools/debug-ui/src/ChainInfoSummaryView.tsx +++ b/tools/debug-ui/src/ChainInfoSummaryView.tsx @@ -1,6 +1,6 @@ import './ChainInfoSummaryView.scss'; import { useMemo } from 'react'; -import { useQuery } from 'react-query'; +import { useQuery } from '@tanstack/react-query'; import { fetchChainProcessingStatus, fetchFullStatus } from './api'; type ChainInfoSummaryViewProps = { diff --git a/tools/debug-ui/src/ClusterNodeView.tsx b/tools/debug-ui/src/ClusterNodeView.tsx index 194ae5df7b7..3e4edd76be3 100644 --- a/tools/debug-ui/src/ClusterNodeView.tsx +++ b/tools/debug-ui/src/ClusterNodeView.tsx @@ -1,5 +1,5 @@ import { useEffect } from 'react'; -import { useQuery } from 'react-query'; +import { useQuery } from '@tanstack/react-query'; import { SyncStatusResponse, TrackedShardsResponse, diff --git a/tools/debug-ui/src/ConnectionStorageView.tsx b/tools/debug-ui/src/ConnectionStorageView.tsx index 4ca9f875d17..61639c6aff9 100644 --- a/tools/debug-ui/src/ConnectionStorageView.tsx +++ b/tools/debug-ui/src/ConnectionStorageView.tsx @@ -1,4 +1,4 @@ -import { useQuery } from 'react-query'; +import { useQuery } from '@tanstack/react-query'; import { toHumanTime } from './utils'; import { fetchRecentOutboundConnections } from './api'; import './ConnectionStorageView.scss'; diff --git a/tools/debug-ui/src/CurrentPeersView.tsx b/tools/debug-ui/src/CurrentPeersView.tsx index 0d81eb725dc..11fd2c9f62a 100644 --- a/tools/debug-ui/src/CurrentPeersView.tsx +++ b/tools/debug-ui/src/CurrentPeersView.tsx @@ -1,5 +1,5 @@ import { MouseEvent, useCallback, useMemo, useState } from 'react'; -import { useQuery } from 'react-query'; +import { useQuery } from '@tanstack/react-query'; import { PeerInfoView, fetchEpochInfo, fetchFullStatus } from './api'; import { addDebugPortLink, formatDurationInMillis, formatTraffic } from './utils'; import './CurrentPeersView.scss'; diff --git a/tools/debug-ui/src/EpochShardsView.tsx b/tools/debug-ui/src/EpochShardsView.tsx index 544d4030815..ab88837e066 100644 --- a/tools/debug-ui/src/EpochShardsView.tsx +++ b/tools/debug-ui/src/EpochShardsView.tsx @@ -1,4 +1,4 @@ -import { useQuery } from 'react-query'; +import { useQuery } from '@tanstack/react-query'; import { fetchEpochInfo } from './api'; import './EpochShardsView.scss'; diff --git a/tools/debug-ui/src/EpochValidatorsView.tsx b/tools/debug-ui/src/EpochValidatorsView.tsx index 6790d866e7d..ae2ffc951a3 100644 --- a/tools/debug-ui/src/EpochValidatorsView.tsx +++ b/tools/debug-ui/src/EpochValidatorsView.tsx @@ -1,5 +1,5 @@ import { useId } from 'react'; -import { useQuery } from 'react-query'; +import { useQuery } from '@tanstack/react-query'; import { Tooltip } from 'react-tooltip'; import { ValidatorKickoutReason, fetchEpochInfo } from './api'; import './EpochValidatorsView.scss'; diff --git a/tools/debug-ui/src/FloatingChunksView.tsx b/tools/debug-ui/src/FloatingChunksView.tsx index de1b163294d..b335c3d4f1d 100644 --- a/tools/debug-ui/src/FloatingChunksView.tsx +++ b/tools/debug-ui/src/FloatingChunksView.tsx @@ -1,5 +1,5 @@ import './FloatingChunksView.scss'; -import { useQuery } from 'react-query'; +import { useQuery } from '@tanstack/react-query'; import { fetchChainProcessingStatus } from './api'; type FloatingChunksViewProps = { diff --git a/tools/debug-ui/src/HeaderBar.tsx b/tools/debug-ui/src/HeaderBar.tsx index afe46b0eb48..0c8b367dee4 100644 --- a/tools/debug-ui/src/HeaderBar.tsx +++ b/tools/debug-ui/src/HeaderBar.tsx @@ -1,4 +1,4 @@ -import { useQuery } from 'react-query'; +import { useQuery } from '@tanstack/react-query'; import { fetchBasicStatus } from './api'; import './HeaderBar.scss'; diff --git a/tools/debug-ui/src/LatestBlocksView.tsx b/tools/debug-ui/src/LatestBlocksView.tsx index aeea9bfe5ee..dec6451ed2f 100644 --- a/tools/debug-ui/src/LatestBlocksView.tsx +++ b/tools/debug-ui/src/LatestBlocksView.tsx @@ -1,5 +1,5 @@ import { Fragment, ReactElement, useCallback, useMemo, useState } from 'react'; -import { useQuery } from 'react-query'; +import { useQuery } from '@tanstack/react-query'; import Xarrow, { Xwrapper, useXarrow } from 'react-xarrows'; import { DebugBlockStatus, MissedHeightInfo, fetchBlockStatus, fetchFullStatus } from './api'; import './LatestBlocksView.scss'; diff --git a/tools/debug-ui/src/PeerStorageView.tsx b/tools/debug-ui/src/PeerStorageView.tsx index 00bfd44ab90..9f8eaafcb62 100644 --- a/tools/debug-ui/src/PeerStorageView.tsx +++ b/tools/debug-ui/src/PeerStorageView.tsx @@ -1,4 +1,4 @@ -import { useQuery } from 'react-query'; +import { useQuery } from '@tanstack/react-query'; import { toHumanTime } from './utils'; import { fetchPeerStore } from './api'; import './PeerStorageView.scss'; diff --git a/tools/debug-ui/src/RecentEpochsView.tsx b/tools/debug-ui/src/RecentEpochsView.tsx index b70fa21eea6..992f0a5838b 100644 --- a/tools/debug-ui/src/RecentEpochsView.tsx +++ b/tools/debug-ui/src/RecentEpochsView.tsx @@ -1,4 +1,4 @@ -import { useQuery } from 'react-query'; +import { useQuery } from '@tanstack/react-query'; import { fetchEpochInfo, fetchFullStatus } from './api'; import { formatDurationInMillis } from './utils'; import './RecentEpochsView.scss'; diff --git a/tools/debug-ui/src/RoutingTableView.tsx b/tools/debug-ui/src/RoutingTableView.tsx index 1554f391369..1620259cc9a 100644 --- a/tools/debug-ui/src/RoutingTableView.tsx +++ b/tools/debug-ui/src/RoutingTableView.tsx @@ -1,4 +1,4 @@ -import { useQuery } from 'react-query'; +import { useQuery } from '@tanstack/react-query'; import { toHumanTime, formatDurationInMillis } from './utils'; import { fetchRoutingTable } from './api'; import './RoutingTableView.scss'; diff --git a/tools/debug-ui/src/SnapshotHostsView.tsx b/tools/debug-ui/src/SnapshotHostsView.tsx index c647c75bda6..85df4c20462 100644 --- a/tools/debug-ui/src/SnapshotHostsView.tsx +++ b/tools/debug-ui/src/SnapshotHostsView.tsx @@ -1,4 +1,4 @@ -import { useQuery } from 'react-query'; +import { useQuery } from '@tanstack/react-query'; import { toHumanTime } from './utils'; import { fetchSnapshotHosts } from './api'; import './SnapshotHostsView.scss'; diff --git a/tools/debug-ui/src/Tier1View.tsx b/tools/debug-ui/src/Tier1View.tsx index 96b8a70ede2..5c940919fea 100644 --- a/tools/debug-ui/src/Tier1View.tsx +++ b/tools/debug-ui/src/Tier1View.tsx @@ -1,5 +1,5 @@ import { MouseEvent, useCallback, useState } from 'react'; -import { useQuery } from 'react-query'; +import { useQuery } from '@tanstack/react-query'; import { PeerAddr, fetchFullStatus } from './api'; import { addDebugPortLink, formatDurationInMillis, formatTraffic } from './utils'; import './Tier1View.scss'; diff --git a/tools/debug-ui/src/index.tsx b/tools/debug-ui/src/index.tsx index 2508b48ac8a..7c5618a9f8c 100644 --- a/tools/debug-ui/src/index.tsx +++ b/tools/debug-ui/src/index.tsx @@ -4,7 +4,7 @@ import './index.css'; import 'react-tooltip/dist/react-tooltip.css'; import '@patternfly/react-core/dist/styles/base.css'; import { createBrowserRouter, RouterProvider } from 'react-router-dom'; -import { QueryClient, QueryClientProvider } from 'react-query'; +import { QueryClient, QueryClientProvider } from '@tanstack/react-query'; import { App } from './App'; import { LogVisualizer } from './log_visualizer/LogVisualizer'; import { LandingPage } from './LandingPage'; diff --git a/tools/flat-storage/src/commands.rs b/tools/flat-storage/src/commands.rs index 383f88bab4d..d268907160c 100644 --- a/tools/flat-storage/src/commands.rs +++ b/tools/flat-storage/src/commands.rs @@ -6,14 +6,16 @@ use near_chain::types::RuntimeAdapter; use near_chain::{ChainStore, ChainStoreAccess}; use near_chain_configs::GenesisValidationMode; use near_epoch_manager::{EpochManager, EpochManagerAdapter, EpochManagerHandle}; -use near_primitives::shard_layout::ShardVersion; +use near_primitives::shard_layout::{account_id_to_shard_id, ShardVersion}; +use near_primitives::state::FlatStateValue; use near_primitives::types::{BlockHeight, ShardId}; use near_store::flat::{ - inline_flat_state_values, store_helper, FlatStateDelta, FlatStateDeltaMetadata, - FlatStorageManager, FlatStorageStatus, + inline_flat_state_values, store_helper, FlatStateChanges, FlatStateDelta, + FlatStateDeltaMetadata, FlatStorageManager, FlatStorageStatus, }; use near_store::{DBCol, Mode, NodeStorage, ShardUId, Store, StoreOpener}; use nearcore::{load_config, NearConfig, NightshadeRuntime, NightshadeRuntimeExt}; +use std::collections::{HashMap, HashSet}; use std::sync::atomic::AtomicBool; use std::{path::PathBuf, sync::Arc, time::Duration}; use tqdm::tqdm; @@ -102,14 +104,30 @@ pub struct MigrateValueInliningCmd { batch_size: usize, } +#[derive(Parser)] +pub enum MoveFlatHeadMode { + /// Moves head forward to specific height. + Forward { + #[clap(long)] + new_flat_head_height: BlockHeight, + }, + /// Moves head back by specific number of blocks. + /// Note: it doesn't record deltas on the way and should be used + /// only for replaying chain forward. + Back { + #[clap(long)] + blocks: usize, + }, +} + #[derive(Parser)] pub struct MoveFlatHeadCmd { #[clap(long)] shard_id: ShardId, #[clap(long)] version: ShardVersion, - #[clap(long)] - new_flat_head_height: BlockHeight, + #[clap(subcommand)] + mode: MoveFlatHeadMode, } fn print_delta(store: &Store, shard_uid: ShardUId, metadata: FlatStateDeltaMetadata) { @@ -400,6 +418,181 @@ impl FlatStorageCommand { Ok(()) } + // This is a hack needed to find all updated keys which are not recorded + // in `DBCol::StateChanges`. Takes keys, values for which are different + // in flat storage accessible in `store` and given trie. + // TODO(1.40): remove after removal of feature `serialize_all_state_changes` + // reaches mainnet. + fn find_updated_missing_items( + &self, + shard_uid: ShardUId, + store: &Store, + trie: near_store::Trie, + ) -> anyhow::Result, Option)>> { + let missing_keys_left_boundary = &[near_primitives::trie_key::col::RECEIVED_DATA]; + let missing_keys_right_boundary = + &[near_primitives::trie_key::col::DELAYED_RECEIPT_OR_INDICES + 1]; + + let mut prev_iter = trie.iter()?; + let nibbles_left_boundary: Vec<_> = + near_store::NibbleSlice::new(missing_keys_left_boundary).iter().collect(); + let nibbles_right_boundary: Vec<_> = + near_store::NibbleSlice::new(missing_keys_right_boundary).iter().collect(); + let prev_missing_items: HashMap, near_primitives::state::FlatStateValue> = + HashMap::from_iter( + prev_iter + .get_trie_items(&nibbles_left_boundary, &nibbles_right_boundary)? + .into_iter() + .map(|(key, value)| { + ( + key, + near_primitives::state::FlatStateValue::Ref( + near_primitives::state::ValueRef::new(&value), + ), + ) + }), + ); + + let iter = store_helper::iter_flat_state_entries( + shard_uid, + &store, + Some(missing_keys_left_boundary), + Some(missing_keys_right_boundary), + ); + let missing_items: HashMap, near_primitives::state::FlatStateValue> = + HashMap::from_iter(iter.map(|it| { + let (key, value) = it.unwrap(); + (key, near_primitives::state::FlatStateValue::Ref(value.to_value_ref())) + })); + + let missing_keys: HashSet<_> = + prev_missing_items.keys().chain(missing_items.keys()).collect(); + let mut result = vec![]; + for key in missing_keys { + let prev_value = prev_missing_items.get(key); + let value = missing_items.get(key); + if prev_value != value { + result.push((key.to_vec(), prev_value.cloned())); + } + } + Ok(result) + } + + fn move_flat_head_back( + &self, + epoch_manager: &dyn EpochManagerAdapter, + runtime: &dyn RuntimeAdapter, + chain_store: ChainStore, + mut shard_uid: ShardUId, + blocks: usize, + ) -> anyhow::Result<()> { + let store = chain_store.store(); + let flat_head = match store_helper::get_flat_storage_status(&store, shard_uid) { + Ok(FlatStorageStatus::Ready(ready_status)) => ready_status.flat_head, + status => { + panic!("invalid flat storage status for shard {shard_uid:?}: {status:?}") + } + }; + let mut height = flat_head.height; + let shard_id = shard_uid.shard_id(); + + for _ in 0..blocks { + let block_hash = chain_store.get_block_hash_by_height(height)?; + let block = chain_store.get_block(&block_hash)?; + let header = block.header(); + let state_root = block.chunks().get(shard_id as usize).unwrap().prev_state_root(); + let epoch_id = header.epoch_id(); + let prev_hash = header.prev_hash(); + let prev_header = chain_store.get_block_header(&prev_hash)?; + let prev_prev_hash = *prev_header.prev_hash(); + let prev_height = prev_header.height(); + + let shard_layout = epoch_manager.get_shard_layout(&epoch_id)?; + shard_uid = epoch_manager.shard_id_to_uid(shard_id, epoch_id)?; + + let trie = + runtime.get_trie_for_shard(shard_uid.shard_id(), &block_hash, state_root, false)?; + + // Find all items which were changed after applying block `block_hash` + // and add them to delta. + // This is done by iterating over `DBCol::StateChanges` corresponding + // to given shard. + let mut prev_delta = FlatStateChanges::default(); + for item in store.iter_prefix(DBCol::StateChanges, &block_hash.0) { + let (key, _) = item.unwrap(); + let maybe_trie_key = &key[32..]; + let maybe_account_id = + near_primitives::trie_key::trie_key_parsers::parse_account_id_from_raw_key( + maybe_trie_key, + )?; + let maybe_trie_key = match maybe_account_id { + Some(account_id) => { + let account_shard_id = account_id_to_shard_id(&account_id, &shard_layout); + if shard_id == account_shard_id { + Some(maybe_trie_key) + } else { + None + } + } + None => { + assert!(maybe_trie_key.len() >= 8); + let (trie_key, shard_uid_raw) = + maybe_trie_key.split_at(maybe_trie_key.len() - 8); + if shard_uid.to_bytes() == shard_uid_raw { + Some(trie_key) + } else { + None + } + } + }; + + if let Some(trie_key) = maybe_trie_key { + // Take *previous* value for the key from trie corresponding + // to pre-state-root for this block. + let prev_value = trie + .get_optimized_ref(trie_key, near_store::KeyLookupMode::Trie)? + .map(|value_ref| { + near_primitives::state::FlatStateValue::Ref(value_ref.into_value_ref()) + }); + let value = store_helper::get_flat_state_value(&store, shard_uid, trie_key)? + .map(|val| near_primitives::state::FlatStateValue::Ref(val.to_value_ref())); + if prev_value != value { + prev_delta.insert(trie_key.to_vec(), prev_value); + } + } + } + + let missing_items = self.find_updated_missing_items(shard_uid, &store, trie)?; + for (key, value) in missing_items.into_iter() { + prev_delta.insert(key, value); + } + + // Change all keys values of which are different for previous + // and current block. + // Note that we don't write delta to DB, because this command is + // used to simulate applying chunks from past blocks, and in that + // simulations future deltas should not exist. + let mut store_update = store.store_update(); + prev_delta.apply_to_flat_state(&mut store_update, shard_uid); + store_helper::set_flat_storage_status( + &mut store_update, + shard_uid, + FlatStorageStatus::Ready(near_store::flat::FlatStorageReadyStatus { + flat_head: near_store::flat::BlockInfo { + hash: *prev_hash, + height: prev_height, + prev_hash: prev_prev_hash, + }, + }), + ); + store_update.commit()?; + + height = prev_height; + println!("moved to {height}"); + } + Ok(()) + } + fn move_flat_head( &self, cmd: &MoveFlatHeadCmd, @@ -407,16 +600,32 @@ impl FlatStorageCommand { near_config: &NearConfig, opener: StoreOpener, ) -> anyhow::Result<()> { - let (_, _, runtime, chain_store, _) = + let (_, epoch_manager, runtime, chain_store, _) = Self::get_db(&opener, home_dir, &near_config, near_store::Mode::ReadWriteExisting); let shard_uid = ShardUId { version: cmd.version, shard_id: cmd.shard_id as u32 }; let flat_storage_manager = runtime.get_flat_storage_manager(); flat_storage_manager.create_flat_storage_for_shard(shard_uid)?; let flat_storage = flat_storage_manager.get_flat_storage_for_shard(shard_uid).unwrap(); - let header = chain_store.get_block_header_by_height(cmd.new_flat_head_height)?; - println!("Header: {header:?}"); - flat_storage.update_flat_head(header.hash(), true)?; + + match cmd.mode { + MoveFlatHeadMode::Forward { new_flat_head_height } => { + let header = chain_store.get_block_header_by_height(new_flat_head_height)?; + println!("Moving flat head for shard {shard_uid} forward to header: {header:?}"); + flat_storage.update_flat_head(header.hash(), true)?; + } + MoveFlatHeadMode::Back { blocks } => { + println!("Moving flat head for shard {shard_uid} back by {blocks} blocks"); + self.move_flat_head_back( + epoch_manager.as_ref(), + runtime.as_ref(), + chain_store, + shard_uid, + blocks, + )?; + } + } + Ok(()) } diff --git a/tools/fork-network/src/cli.rs b/tools/fork-network/src/cli.rs index 58f36b266c1..d1008a21787 100644 --- a/tools/fork-network/src/cli.rs +++ b/tools/fork-network/src/cli.rs @@ -1,6 +1,7 @@ use crate::single_shard_storage_mutator::SingleShardStorageMutator; use crate::storage_mutator::StorageMutator; use anyhow::Context; +use chrono::{DateTime, Utc}; use near_chain::types::{RuntimeAdapter, Tip}; use near_chain::{ChainStore, ChainStoreAccess}; use near_chain_configs::{Genesis, GenesisConfig, GenesisValidationMode, NEAR_BASE}; @@ -14,7 +15,6 @@ use near_primitives::account::id::AccountType; use near_primitives::account::{AccessKey, AccessKeyPermission, Account}; use near_primitives::borsh; use near_primitives::hash::CryptoHash; -use near_primitives::receipt::Receipt; use near_primitives::serialize::dec_format; use near_primitives::shard_layout::ShardUId; use near_primitives::state::FlatStateValue; @@ -107,6 +107,10 @@ struct SetValidatorsCmd { pub epoch_length: NumBlocks, #[arg(long, default_value = "-fork", allow_hyphen_values = true)] pub chain_id_suffix: String, + /// Timestamp that should be set in the genesis block. This is required if you want + /// to create a consistent forked network across many machines + #[arg(long)] + pub genesis_time: Option>, } #[derive(clap::Parser)] @@ -176,11 +180,13 @@ impl ForkNetworkCommand { self.amend_access_keys(*batch_size, near_config, home_dir)?; } SubCommand::SetValidators(SetValidatorsCmd { + genesis_time, validators, epoch_length, chain_id_suffix, }) => { self.set_validators( + genesis_time.unwrap_or_else(chrono::Utc::now), validators, *epoch_length, chain_id_suffix, @@ -320,7 +326,7 @@ impl ForkNetworkCommand { let runtime = NightshadeRuntime::from_config(home_dir, store.clone(), &near_config, epoch_manager) .context("could not create the transaction runtime")?; - runtime.load_mem_tries_on_startup(&all_shard_uids).unwrap(); + runtime.get_tries().load_mem_tries_for_enabled_shards(&all_shard_uids).unwrap(); let make_storage_mutator: MakeSingleShardStorageMutatorFn = Arc::new(move |prev_state_root| { @@ -344,6 +350,7 @@ impl ForkNetworkCommand { /// Creates a genesis file with the new validators. fn set_validators( &self, + genesis_time: DateTime, validators: &Path, epoch_length: u64, chain_id_suffix: &str, @@ -379,6 +386,7 @@ impl ForkNetworkCommand { tracing::info!("Creating a new genesis"); backup_genesis_file(home_dir, &near_config)?; self.make_and_write_genesis( + genesis_time, epoch_length, block_height, chain_id_suffix, @@ -480,6 +488,8 @@ impl ForkNetworkCommand { tracing::info!(?shard_uid); let mut storage_mutator: SingleShardStorageMutator = make_storage_mutator(prev_state_root)?; + // TODO: allow mutating the state with a secret, so this can be used to prepare a public test network + let default_key = near_mirror::key_mapping::default_extra_key(None).public_key(); // Keeps track of accounts that have a full access key. let mut has_full_key = HashSet::new(); // Lets us lookup large values in the `State` columns. @@ -495,7 +505,6 @@ impl ForkNetworkCommand { let mut contract_data_updated = 0; let mut contract_code_updated = 0; let mut postponed_receipts_updated = 0; - let mut delayed_receipts_updated = 0; let mut received_data_updated = 0; let mut fake_block_height = block_height + 1; for item in store_helper::iter_flat_state_entries(shard_uid, &store, None, None) { @@ -554,23 +563,11 @@ impl ForkNetworkCommand { contract_code_updated += 1; } } - StateRecord::PostponedReceipt(receipt) => { - // TODO(eth-implicit) Change back to is_implicit() when ETH-implicit accounts are supported. - if receipt.predecessor_id.get_account_type() - == AccountType::NearImplicitAccount - || receipt.receiver_id.get_account_type() - == AccountType::NearImplicitAccount - { - let new_receipt = Receipt { - predecessor_id: map_account(&receipt.predecessor_id, None), - receiver_id: map_account(&receipt.receiver_id, None), - receipt_id: receipt.receipt_id, - receipt: receipt.receipt.clone(), - }; - storage_mutator.delete_postponed_receipt(receipt)?; - storage_mutator.set_postponed_receipt(&new_receipt)?; - postponed_receipts_updated += 1; - } + StateRecord::PostponedReceipt(mut receipt) => { + storage_mutator.delete_postponed_receipt(&receipt)?; + near_mirror::genesis::map_receipt(&mut receipt, None, &default_key); + storage_mutator.set_postponed_receipt(&receipt)?; + postponed_receipts_updated += 1; } StateRecord::ReceivedData { account_id, data_id, data } => { // TODO(eth-implicit) Change back to is_implicit() when ETH-implicit accounts are supported. @@ -581,24 +578,10 @@ impl ForkNetworkCommand { received_data_updated += 1; } } - StateRecord::DelayedReceipt(receipt) => { - // TODO(eth-implicit) Change back to is_implicit() when ETH-implicit accounts are supported. - if receipt.predecessor_id.get_account_type() - == AccountType::NearImplicitAccount - || receipt.receiver_id.get_account_type() - == AccountType::NearImplicitAccount - { - let new_receipt = Receipt { - predecessor_id: map_account(&receipt.predecessor_id, None), - receiver_id: map_account(&receipt.receiver_id, None), - receipt_id: receipt.receipt_id, - receipt: receipt.receipt, - }; - storage_mutator.delete_delayed_receipt(index_delayed_receipt)?; - storage_mutator - .set_delayed_receipt(index_delayed_receipt, &new_receipt)?; - delayed_receipts_updated += 1; - } + StateRecord::DelayedReceipt(mut receipt) => { + storage_mutator.delete_delayed_receipt(index_delayed_receipt)?; + near_mirror::genesis::map_receipt(&mut receipt, None, &default_key); + storage_mutator.set_delayed_receipt(index_delayed_receipt, &receipt)?; index_delayed_receipt += 1; } } @@ -616,7 +599,7 @@ impl ForkNetworkCommand { + contract_data_updated + contract_code_updated + postponed_receipts_updated - + delayed_receipts_updated + + index_delayed_receipt + received_data_updated, ); let state_root = storage_mutator.commit(&shard_uid, fake_block_height)?; @@ -635,7 +618,7 @@ impl ForkNetworkCommand { contract_code_updated, contract_data_updated, postponed_receipts_updated, - delayed_receipts_updated, + delayed_receipts_updated = index_delayed_receipt, received_data_updated, num_has_full_key = has_full_key.len(), "Pass 1 done" @@ -666,7 +649,7 @@ impl ForkNetworkCommand { } storage_mutator.set_access_key( account_id, - near_mirror::key_mapping::default_extra_key(None).public_key(), + default_key.clone(), AccessKey::full_access(), )?; num_added += 1; @@ -768,6 +751,7 @@ impl ForkNetworkCommand { /// Makes a new genesis and writes it to `~/.near/genesis.json`. fn make_and_write_genesis( &self, + genesis_time: DateTime, epoch_length: u64, height: BlockHeight, chain_id_suffix: &str, @@ -785,7 +769,7 @@ impl ForkNetworkCommand { let new_config = GenesisConfig { chain_id: original_config.chain_id.clone() + chain_id_suffix, genesis_height: height, - genesis_time: chrono::Utc::now(), + genesis_time, epoch_length, num_block_producer_seats: epoch_config.num_block_producer_seats, num_block_producer_seats_per_shard: epoch_config.num_block_producer_seats_per_shard, diff --git a/tools/fork-network/src/single_shard_storage_mutator.rs b/tools/fork-network/src/single_shard_storage_mutator.rs index 9c2368b3237..7206dda2e91 100644 --- a/tools/fork-network/src/single_shard_storage_mutator.rs +++ b/tools/fork-network/src/single_shard_storage_mutator.rs @@ -101,9 +101,9 @@ impl SingleShardStorageMutator { ) } - pub(crate) fn delete_postponed_receipt(&mut self, receipt: Box) -> anyhow::Result<()> { + pub(crate) fn delete_postponed_receipt(&mut self, receipt: &Receipt) -> anyhow::Result<()> { self.remove(TrieKey::PostponedReceipt { - receiver_id: receipt.receiver_id, + receiver_id: receipt.receiver_id.clone(), receipt_id: receipt.receipt_id, }) } diff --git a/tools/mirror/src/chain_tracker.rs b/tools/mirror/src/chain_tracker.rs index c310052b71d..29641131bdc 100644 --- a/tools/mirror/src/chain_tracker.rs +++ b/tools/mirror/src/chain_tracker.rs @@ -129,7 +129,6 @@ pub(crate) enum SentBatch { // TODO: the separation between what's in here and what's in the main file with struct TxMirror is not // that clear and doesn't make that much sense. Should refactor -#[derive(Default)] pub(crate) struct TxTracker { sent_txs: HashMap, txs_by_signer: HashMap<(AccountId, PublicKey), BTreeSet>, @@ -147,7 +146,7 @@ pub(crate) struct TxTracker { nonempty_height_queued: Option, height_popped: Option, height_seen: Option, - send_time: Option>>, + send_time: Pin>, // Config value in the target chain, used to judge how long to wait before sending a new batch of txs min_block_production_delay: Duration, // timestamps in the target chain, used to judge how long to wait before sending a new batch of txs @@ -169,7 +168,25 @@ impl TxTracker { I: IntoIterator, { let next_heights = next_heights.into_iter().map(Clone::clone).collect(); - Self { min_block_production_delay, next_heights, stop_height, ..Default::default() } + Self { + min_block_production_delay, + next_heights, + stop_height, + // Wait at least 15 seconds before sending any transactions because for + // a few seconds after the node starts, transaction routing requests + // will be silently dropped by the peer manager. + send_time: Box::pin(tokio::time::sleep(std::time::Duration::from_secs(15))), + sent_txs: HashMap::new(), + txs_by_signer: HashMap::new(), + queued_blocks: VecDeque::new(), + updater_to_keys: HashMap::new(), + nonces: HashMap::new(), + height_queued: None, + nonempty_height_queued: None, + height_popped: None, + height_seen: None, + recent_block_timestamps: VecDeque::new(), + } } pub(crate) async fn next_heights( @@ -441,10 +458,7 @@ impl TxTracker { } pub(crate) fn next_batch_time(&self) -> Instant { - match &self.send_time { - Some(t) => t.as_ref().deadline().into_std(), - None => Instant::now(), - } + self.send_time.as_ref().deadline().into_std() } pub(crate) async fn next_batch( @@ -455,10 +469,10 @@ impl TxTracker { // sleep until 20 milliseconds before we want to send transactions before we check for nonces // in the target chain. In the second or so between now and then, we might process another block // that will set the nonces. - if let Some(s) = &self.send_time { - tokio::time::sleep_until(s.as_ref().deadline() - std::time::Duration::from_millis(20)) - .await; - } + tokio::time::sleep_until( + self.send_time.as_ref().deadline() - std::time::Duration::from_millis(20), + ) + .await; let mut needed_access_keys = HashSet::new(); for c in self.queued_blocks[0].chunks.iter_mut() { for tx in c.txs.iter_mut() { @@ -518,9 +532,7 @@ impl TxTracker { }; } } - if let Some(sleep) = &mut self.send_time { - sleep.await; - } + (&mut self.send_time).await; Ok(self.queued_blocks.pop_front().unwrap()) } @@ -1134,12 +1146,7 @@ impl TxTracker { let block_delay = self .second_longest_recent_block_delay() .unwrap_or(self.min_block_production_delay + Duration::from_millis(100)); - match &mut self.send_time { - Some(t) => t.as_mut().reset(tokio::time::Instant::now() + block_delay), - None => { - self.send_time = Some(Box::pin(tokio::time::sleep(block_delay))); - } - } + self.send_time.as_mut().reset(tokio::time::Instant::now() + block_delay); crate::set_last_source_height(db, b.source_height)?; let txs = b .chunks diff --git a/tools/mirror/src/genesis.rs b/tools/mirror/src/genesis.rs index 3fcfbd67fba..a84d05d2eb2 100644 --- a/tools/mirror/src/genesis.rs +++ b/tools/mirror/src/genesis.rs @@ -1,4 +1,8 @@ +use near_crypto::PublicKey; +use near_primitives::action::delegate::{DelegateAction, SignedDelegateAction}; +use near_primitives::receipt::{ActionReceipt, Receipt, ReceiptEnum}; use near_primitives::state_record::StateRecord; +use near_primitives::transaction::{Action, AddKeyAction, DeleteAccountAction, DeleteKeyAction}; use near_primitives_core::account::id::AccountType; use near_primitives_core::account::{AccessKey, AccessKeyPermission}; use serde::ser::{SerializeSeq, Serializer}; @@ -7,10 +11,164 @@ use std::fs::File; use std::io::{BufReader, BufWriter}; use std::path::Path; +fn map_action( + action: &Action, + secret: Option<&[u8; crate::secret::SECRET_LEN]>, + default_key: &PublicKey, + delegate_allowed: bool, +) -> Option { + match action { + Action::AddKey(add_key) => { + let public_key = crate::key_mapping::map_key(&add_key.public_key, secret).public_key(); + + Some(Action::AddKey(Box::new(AddKeyAction { + public_key, + access_key: add_key.access_key.clone(), + }))) + } + Action::DeleteKey(delete_key) => { + let public_key = + crate::key_mapping::map_key(&delete_key.public_key, secret).public_key(); + + Some(Action::DeleteKey(Box::new(DeleteKeyAction { public_key }))) + } + Action::DeleteAccount(delete_account) => { + let beneficiary_id = + crate::key_mapping::map_account(&delete_account.beneficiary_id, secret); + Some(Action::DeleteAccount(DeleteAccountAction { beneficiary_id })) + } + Action::Delegate(delegate) => { + if delegate_allowed { + map_delegate_action(delegate, secret, default_key) + } else { + // This should not happen, but we handle the case here defensively + tracing::warn!(target: "mirror", "a delegate action was contained inside another delegate action: {:?}", delegate); + None + } + } + // We don't want to mess with the set of validators in the target chain + Action::Stake(_) => None, + _ => Some(action.clone()), + } +} + +fn map_delegate_action( + delegate: &SignedDelegateAction, + secret: Option<&[u8; crate::secret::SECRET_LEN]>, + default_key: &PublicKey, +) -> Option { + let source_actions = delegate.delegate_action.get_actions(); + let mut actions = Vec::with_capacity(source_actions.len()); + + let mut account_created = false; + let mut full_key_added = false; + for action in source_actions.iter() { + if let Some(a) = map_action(action, secret, default_key, false) { + match &a { + Action::AddKey(add_key) => { + if add_key.access_key.permission == AccessKeyPermission::FullAccess { + full_key_added = true; + } + } + Action::CreateAccount(_) => { + account_created = true; + } + _ => {} + }; + actions.push(a.try_into().unwrap()); + } + } + if actions.is_empty() { + return None; + } + if account_created && !full_key_added { + actions.push( + Action::AddKey(Box::new(AddKeyAction { + public_key: default_key.clone(), + access_key: AccessKey::full_access(), + })) + .try_into() + .unwrap(), + ); + } + let mapped_key = crate::key_mapping::map_key(&delegate.delegate_action.public_key, secret); + let mapped_action = DelegateAction { + sender_id: crate::key_mapping::map_account(&delegate.delegate_action.sender_id, secret), + receiver_id: crate::key_mapping::map_account(&delegate.delegate_action.receiver_id, secret), + actions, + nonce: delegate.delegate_action.nonce, + max_block_height: delegate.delegate_action.max_block_height, + public_key: mapped_key.public_key(), + }; + let tx_hash = mapped_action.get_nep461_hash(); + let d = SignedDelegateAction { + delegate_action: mapped_action, + signature: mapped_key.sign(tx_hash.as_ref()), + }; + Some(Action::Delegate(Box::new(d))) +} + +// map all the account IDs and keys in this receipt and its actions, and skip any stake actions +fn map_action_receipt( + receipt: &mut ActionReceipt, + secret: Option<&[u8; crate::secret::SECRET_LEN]>, + default_key: &PublicKey, +) { + receipt.signer_id = crate::key_mapping::map_account(&receipt.signer_id, secret); + receipt.signer_public_key = + crate::key_mapping::map_key(&receipt.signer_public_key, secret).public_key(); + for receiver in receipt.output_data_receivers.iter_mut() { + receiver.receiver_id = crate::key_mapping::map_account(&receiver.receiver_id, secret); + } + + let mut actions = Vec::with_capacity(receipt.actions.len()); + let mut account_created = false; + let mut full_key_added = false; + for action in receipt.actions.iter() { + if let Some(a) = map_action(action, secret, default_key, true) { + match &a { + Action::AddKey(add_key) => { + if add_key.access_key.permission == AccessKeyPermission::FullAccess { + full_key_added = true; + } + } + Action::CreateAccount(_) => { + account_created = true; + } + _ => {} + }; + actions.push(a); + } + } + if account_created && !full_key_added { + actions.push(Action::AddKey(Box::new(AddKeyAction { + public_key: default_key.clone(), + access_key: AccessKey::full_access(), + }))); + } + receipt.actions = actions; +} + +// map any account IDs or keys referenced in the receipt +pub fn map_receipt( + receipt: &mut Receipt, + secret: Option<&[u8; crate::secret::SECRET_LEN]>, + default_key: &PublicKey, +) { + receipt.predecessor_id = crate::key_mapping::map_account(&receipt.predecessor_id, secret); + receipt.receiver_id = crate::key_mapping::map_account(&receipt.receiver_id, secret); + match &mut receipt.receipt { + ReceiptEnum::Action(r) | ReceiptEnum::PromiseYield(r) => { + map_action_receipt(r, secret, default_key); + } + _ => {} + } +} + /// Reads records, makes changes to them and writes them to a new file. /// `records_file_in` must be different from `records_file_out`. /// Writes a secret to `secret_file_out`. -pub fn map_records>( +pub(crate) fn map_records>( records_file_in: P, records_file_out: P, no_secret: bool, @@ -30,6 +188,7 @@ pub fn map_records>( let mut has_full_key = HashSet::new(); let mut accounts = HashSet::new(); + let default_key = crate::key_mapping::default_extra_key(secret.as_ref()).public_key(); near_chain_configs::stream_records_from_file(reader, |mut r| { match &mut r { StateRecord::AccessKey { account_id, public_key, access_key } => { @@ -73,15 +232,7 @@ pub fn map_records>( records_seq.serialize_element(&r).unwrap(); } StateRecord::PostponedReceipt(receipt) => { - // TODO(eth-implicit) Change back to is_implicit() when ETH-implicit accounts are supported. - if receipt.predecessor_id.get_account_type() == AccountType::NearImplicitAccount - || receipt.receiver_id.get_account_type() == AccountType::NearImplicitAccount - { - receipt.predecessor_id = - crate::key_mapping::map_account(&receipt.predecessor_id, secret.as_ref()); - receipt.receiver_id = - crate::key_mapping::map_account(&receipt.receiver_id, secret.as_ref()); - } + map_receipt(receipt, secret.as_ref(), &default_key); records_seq.serialize_element(&r).unwrap(); } StateRecord::ReceivedData { account_id, .. } => { @@ -92,25 +243,17 @@ pub fn map_records>( records_seq.serialize_element(&r).unwrap(); } StateRecord::DelayedReceipt(receipt) => { - // TODO(eth-implicit) Change back to is_implicit() when ETH-implicit accounts are supported. - if receipt.predecessor_id.get_account_type() == AccountType::NearImplicitAccount - || receipt.receiver_id.get_account_type() == AccountType::NearImplicitAccount - { - receipt.predecessor_id = - crate::key_mapping::map_account(&receipt.predecessor_id, secret.as_ref()); - receipt.receiver_id = - crate::key_mapping::map_account(&receipt.receiver_id, secret.as_ref()); - } + map_receipt(receipt, secret.as_ref(), &default_key); records_seq.serialize_element(&r).unwrap(); } }; })?; - let default_key = crate::key_mapping::default_extra_key(secret.as_ref()); + for account_id in accounts { if !has_full_key.contains(&account_id) { records_seq.serialize_element(&StateRecord::AccessKey { account_id, - public_key: default_key.public_key(), + public_key: default_key.clone(), access_key: AccessKey::full_access(), })?; } @@ -118,3 +261,176 @@ pub fn map_records>( records_seq.end()?; Ok(()) } + +#[cfg(test)] +mod test { + use near_crypto::{KeyType, SecretKey}; + use near_primitives::account::{AccessKeyPermission, FunctionCallPermission}; + use near_primitives::action::delegate::{DelegateAction, SignedDelegateAction}; + use near_primitives::hash::CryptoHash; + use near_primitives::receipt::{ActionReceipt, Receipt, ReceiptEnum}; + use near_primitives::transaction::{Action, AddKeyAction, CreateAccountAction}; + use near_primitives_core::account::AccessKey; + + #[test] + fn test_map_receipt() { + let default_key = crate::key_mapping::default_extra_key(None).public_key(); + + let mut receipt0 = Receipt { + predecessor_id: "foo.near".parse().unwrap(), + receiver_id: "foo.foo.near".parse().unwrap(), + receipt_id: CryptoHash::default(), + receipt: ReceiptEnum::Action(ActionReceipt { + signer_id: "foo.near".parse().unwrap(), + signer_public_key: "ed25519:He7QeRuwizNEhBioYG3u4DZ8jWXyETiyNzFD3MkTjDMf" + .parse() + .unwrap(), + gas_price: 100, + output_data_receivers: vec![], + input_data_ids: vec![], + actions: vec![ + Action::CreateAccount(CreateAccountAction {}), + Action::AddKey(Box::new(AddKeyAction { + public_key: "ed25519:FXXrTXiKWpXj1R6r5fBvMLpstd8gPyrBq3qMByqKVzKF" + .parse() + .unwrap(), + access_key: AccessKey { + nonce: 0, + permission: AccessKeyPermission::FunctionCall(FunctionCallPermission { + allowance: None, + receiver_id: "foo.near".parse().unwrap(), + method_names: vec![String::from("do_thing")], + }), + }, + })), + ], + }), + }; + let want_receipt0 = Receipt { + predecessor_id: "foo.near".parse().unwrap(), + receiver_id: "foo.foo.near".parse().unwrap(), + receipt_id: CryptoHash::default(), + receipt: ReceiptEnum::Action(ActionReceipt { + signer_id: "foo.near".parse().unwrap(), + signer_public_key: "ed25519:6rL9HcTfinxxcVURLeQ3Y3nkietL4LQ3WxhPn51bCo4V" + .parse() + .unwrap(), + gas_price: 100, + output_data_receivers: vec![], + input_data_ids: vec![], + actions: vec![ + Action::CreateAccount(CreateAccountAction {}), + Action::AddKey(Box::new(AddKeyAction { + public_key: "ed25519:FYcGnVNM6wTcvm9b4UenJuCiiL9wDaJ3mpoebF4Go4mc" + .parse() + .unwrap(), + access_key: AccessKey { + nonce: 0, + permission: AccessKeyPermission::FunctionCall(FunctionCallPermission { + allowance: None, + receiver_id: "foo.near".parse().unwrap(), + method_names: vec![String::from("do_thing")], + }), + }, + })), + Action::AddKey(Box::new(AddKeyAction { + public_key: default_key.clone(), + access_key: AccessKey::full_access(), + })), + ], + }), + }; + + let secret_key = SecretKey::from_random(KeyType::ED25519); + let delegate_action = DelegateAction { + sender_id: "d4156e03cb09f47117ddfde4fdcd5f3b8b087dccb364e228b8b3ed91d69054f4" + .parse() + .unwrap(), + receiver_id: "foo.near".parse().unwrap(), + nonce: 0, + max_block_height: 1234, + public_key: secret_key.public_key(), + actions: vec![Action::AddKey(Box::new(AddKeyAction { + public_key: "ed25519:Eo9W44tRMwcYcoua11yM7Xfr1DjgR4EWQFM3RU27MEX8".parse().unwrap(), + access_key: AccessKey::full_access(), + })) + .try_into() + .unwrap()], + }; + let tx_hash = delegate_action.get_nep461_hash(); + let signature = secret_key.sign(tx_hash.as_ref()); + + let mut receipt1 = Receipt { + predecessor_id: "757a45019f9a3e5bd475586c31f63d6e15d50f5366caf4643f6f69731a222cad" + .parse() + .unwrap(), + receiver_id: "d4156e03cb09f47117ddfde4fdcd5f3b8b087dccb364e228b8b3ed91d69054f4" + .parse() + .unwrap(), + receipt_id: CryptoHash::default(), + receipt: ReceiptEnum::Action(ActionReceipt { + signer_id: "757a45019f9a3e5bd475586c31f63d6e15d50f5366caf4643f6f69731a222cad" + .parse() + .unwrap(), + signer_public_key: "ed25519:He7QeRuwizNEhBioYG3u4DZ8jWXyETiyNzFD3MkTjDMf" + .parse() + .unwrap(), + gas_price: 100, + output_data_receivers: vec![], + input_data_ids: vec![], + actions: vec![Action::Delegate(Box::new(SignedDelegateAction { + delegate_action, + signature, + }))], + }), + }; + + let mapped_secret_key = crate::key_mapping::map_key(&secret_key.public_key(), None); + let delegate_action = DelegateAction { + sender_id: "799185fe8173d8adf46b0c088d57887b2550642c08aafdc20ccce67b5ad51976" + .parse() + .unwrap(), + receiver_id: "foo.near".parse().unwrap(), + nonce: 0, + max_block_height: 1234, + public_key: mapped_secret_key.public_key(), + actions: vec![Action::AddKey(Box::new(AddKeyAction { + public_key: "ed25519:4etp3kcYH2rwGdbwbLbUd1AKHMEPLKosCMSQFqYqPL6V".parse().unwrap(), + access_key: AccessKey::full_access(), + })) + .try_into() + .unwrap()], + }; + let tx_hash = delegate_action.get_nep461_hash(); + let signature = mapped_secret_key.sign(tx_hash.as_ref()); + let want_receipt1 = Receipt { + predecessor_id: "3f8c3be8929e5fa61907f13a6247e7e452b92bb7d224cf691a9aa67814eb509b" + .parse() + .unwrap(), + receiver_id: "799185fe8173d8adf46b0c088d57887b2550642c08aafdc20ccce67b5ad51976" + .parse() + .unwrap(), + receipt_id: CryptoHash::default(), + receipt: ReceiptEnum::Action(ActionReceipt { + signer_id: "3f8c3be8929e5fa61907f13a6247e7e452b92bb7d224cf691a9aa67814eb509b" + .parse() + .unwrap(), + signer_public_key: "ed25519:6rL9HcTfinxxcVURLeQ3Y3nkietL4LQ3WxhPn51bCo4V" + .parse() + .unwrap(), + gas_price: 100, + output_data_receivers: vec![], + input_data_ids: vec![], + actions: vec![Action::Delegate(Box::new(SignedDelegateAction { + delegate_action, + signature, + }))], + }), + }; + + crate::genesis::map_receipt(&mut receipt0, None, &default_key); + assert_eq!(receipt0, want_receipt0); + crate::genesis::map_receipt(&mut receipt1, None, &default_key); + assert_eq!(receipt1, want_receipt1); + } +} diff --git a/tools/mirror/src/lib.rs b/tools/mirror/src/lib.rs index e9d8e5da1e5..d6c9e45691b 100644 --- a/tools/mirror/src/lib.rs +++ b/tools/mirror/src/lib.rs @@ -16,8 +16,8 @@ use near_o11y::WithSpanContextExt; use near_primitives::hash::CryptoHash; use near_primitives::receipt::{Receipt, ReceiptEnum}; use near_primitives::transaction::{ - Action, AddKeyAction, CreateAccountAction, DeleteKeyAction, SignedTransaction, StakeAction, - Transaction, + Action, AddKeyAction, CreateAccountAction, DeleteAccountAction, DeleteKeyAction, + SignedTransaction, StakeAction, Transaction, }; use near_primitives::types::{ AccountId, BlockHeight, BlockReference, Finality, TransactionOrReceiptId, @@ -41,7 +41,7 @@ use tokio::sync::mpsc; mod chain_tracker; pub mod cli; -mod genesis; +pub mod genesis; pub mod key_mapping; mod metrics; mod offline; @@ -1033,6 +1033,15 @@ impl TxMirror { account_created = true; actions.push(action.clone()); } + Action::DeleteAccount(d) => { + actions.push(Action::DeleteAccount(DeleteAccountAction { + beneficiary_id: crate::key_mapping::map_account( + &d.beneficiary_id, + self.secret.as_ref(), + ), + })); + } + // TODO: handle delegate actions _ => actions.push(action.clone()), }; } diff --git a/tools/state-viewer/README.md b/tools/state-viewer/README.md index 8d44cc3ce0e..9a02cec8be6 100644 --- a/tools/state-viewer/README.md +++ b/tools/state-viewer/README.md @@ -15,7 +15,7 @@ Basic example: ```bash make neard ./target/release/neard --home ~/.near/ view_state apply_range \ - --shard-id=0 --start-index=42376889 --end_index=423770101 \ + --shard-id=0 --start-index=42376889 --end-index=423770101 \ --verbose-output --csv-file=./apply_range.csv ``` diff --git a/tools/state-viewer/src/apply_chain_range.rs b/tools/state-viewer/src/apply_chain_range.rs index 40bff382f70..e4ee3e5ac58 100644 --- a/tools/state-viewer/src/apply_chain_range.rs +++ b/tools/state-viewer/src/apply_chain_range.rs @@ -1,3 +1,4 @@ +use crate::cli::ApplyRangeMode; use near_chain::chain::collect_receipts_from_response; use near_chain::migrations::check_if_block_is_first_with_chunk_of_version; use near_chain::types::{ @@ -12,6 +13,7 @@ use near_primitives::transaction::{Action, ExecutionOutcomeWithId, ExecutionOutc use near_primitives::trie_key::TrieKey; use near_primitives::types::chunk_extra::ChunkExtra; use near_primitives::types::{BlockHeight, ShardId}; +use near_store::flat::{BlockInfo, FlatStateChanges, FlatStorageStatus}; use near_store::{DBCol, Store}; use nearcore::NightshadeRuntime; use rayon::iter::{IntoParallelIterator, ParallelIterator}; @@ -111,6 +113,7 @@ fn maybe_add_to_csv(csv_file_mutex: &Mutex>, s: &str) { } fn apply_block_from_range( + mode: ApplyRangeMode, height: BlockHeight, shard_id: ShardId, store: Store, @@ -322,9 +325,38 @@ fn apply_block_from_range( ), ); progress_reporter.inc_and_report_progress(apply_result.total_gas_burnt); + + if mode == ApplyRangeMode::Benchmarking { + // Compute delta and immediately apply to flat storage. + let changes = + FlatStateChanges::from_state_changes(apply_result.trie_changes.state_changes()); + let delta = near_store::flat::FlatStateDelta { + metadata: near_store::flat::FlatStateDeltaMetadata { + block: BlockInfo { + hash: block_hash, + height: block.header().height(), + prev_hash: *block.header().prev_hash(), + }, + prev_block_with_changes: None, + }, + changes, + }; + + let flat_storage_manager = runtime_adapter.get_flat_storage_manager(); + let flat_storage = flat_storage_manager.get_flat_storage_for_shard(shard_uid).unwrap(); + let store_update = flat_storage.add_delta(delta).unwrap(); + store_update.commit().unwrap(); + flat_storage.update_flat_head(&block_hash, true).unwrap(); + + // Apply trie changes to trie node caches. + let mut fake_store_update = store.store_update(); + apply_result.trie_changes.insertions_into(&mut fake_store_update); + apply_result.trie_changes.deletions_into(&mut fake_store_update); + } } pub fn apply_chain_range( + mode: ApplyRangeMode, store: Store, genesis: &Genesis, start_height: Option, @@ -335,22 +367,54 @@ pub fn apply_chain_range( verbose_output: bool, csv_file: Option<&mut File>, only_contracts: bool, - sequential: bool, use_flat_storage: bool, ) { let parent_span = tracing::debug_span!( target: "state_viewer", "apply_chain_range", + ?mode, ?start_height, ?end_height, %shard_id, only_contracts, - sequential, use_flat_storage) .entered(); let chain_store = ChainStore::new(store.clone(), genesis.config.genesis_height, false); - let end_height = end_height.unwrap_or_else(|| chain_store.head().unwrap().height); - let start_height = start_height.unwrap_or_else(|| chain_store.tail().unwrap()); + let (start_height, end_height) = match mode { + ApplyRangeMode::Benchmarking => { + // Benchmarking mode requires flat storage and retrieves start and + // end heights from flat storage and chain. + assert!(use_flat_storage); + assert!(start_height.is_none()); + assert!(end_height.is_none()); + + let chain_store = ChainStore::new(store.clone(), genesis.config.genesis_height, false); + let final_head = chain_store.final_head().unwrap(); + let shard_layout = epoch_manager.get_shard_layout(&final_head.epoch_id).unwrap(); + let shard_uid = near_primitives::shard_layout::ShardUId::from_shard_id_and_layout( + shard_id, + &shard_layout, + ); + let flat_head = match near_store::flat::store_helper::get_flat_storage_status( + &store, shard_uid, + ) { + Ok(FlatStorageStatus::Ready(ready_status)) => ready_status.flat_head, + status => { + panic!("cannot create flat storage for shard {shard_id} with status {status:?}") + } + }; + let flat_storage_manager = runtime_adapter.get_flat_storage_manager(); + flat_storage_manager.create_flat_storage_for_shard(shard_uid).unwrap(); + + // Note that first height to apply is the first one after flat + // head. + (flat_head.height + 1, final_head.height) + } + _ => ( + start_height.unwrap_or_else(|| chain_store.tail().unwrap()), + end_height.unwrap_or_else(|| chain_store.head().unwrap().height), + ), + }; println!( "Applying chunks in the range {}..={} for shard_id {}", @@ -365,7 +429,7 @@ pub fn apply_chain_range( let progress_reporter = ProgressReporter { cnt: AtomicU64::new(0), ts: AtomicU64::new(timestamp_ms()), - all: end_height - start_height, + all: (end_height + 1).saturating_sub(start_height), skipped: AtomicU64::new(0), empty_blocks: AtomicU64::new(0), non_empty_blocks: AtomicU64::new(0), @@ -373,6 +437,7 @@ pub fn apply_chain_range( }; let process_height = |height| { apply_block_from_range( + mode, height, shard_id, store.clone(), @@ -387,26 +452,29 @@ pub fn apply_chain_range( ); }; - if sequential { - range.into_iter().for_each(|height| { - let _span = tracing::debug_span!( - target: "state_viewer", - parent: &parent_span, - "process_block_in_order", - height) - .entered(); - process_height(height) - }); - } else { - range.into_par_iter().for_each(|height| { - let _span = tracing::debug_span!( + match mode { + ApplyRangeMode::Sequential | ApplyRangeMode::Benchmarking => { + range.into_iter().for_each(|height| { + let _span = tracing::debug_span!( + target: "state_viewer", + parent: &parent_span, + "process_block_in_order", + height) + .entered(); + process_height(height) + }); + } + ApplyRangeMode::Parallel => { + range.into_par_iter().for_each(|height| { + let _span = tracing::debug_span!( target: "mock_node", parent: &parent_span, "process_block_in_parallel", height) - .entered(); - process_height(height) - }); + .entered(); + process_height(height) + }); + } } println!( @@ -465,6 +533,7 @@ mod test { use nearcore::NightshadeRuntime; use crate::apply_chain_range::apply_chain_range; + use crate::cli::ApplyRangeMode; fn setup(epoch_length: NumBlocks) -> (Store, Genesis, TestEnv) { let mut genesis = @@ -551,6 +620,7 @@ mod test { epoch_manager.clone(), ); apply_chain_range( + ApplyRangeMode::Parallel, store, &genesis, None, @@ -562,7 +632,6 @@ mod test { None, false, false, - false, ); } @@ -594,6 +663,7 @@ mod test { ); let mut file = tempfile::NamedTempFile::new().unwrap(); apply_chain_range( + ApplyRangeMode::Parallel, store, &genesis, None, @@ -605,7 +675,6 @@ mod test { Some(file.as_file_mut()), false, false, - false, ); let mut csv = String::new(); file.as_file_mut().seek(SeekFrom::Start(0)).unwrap(); diff --git a/tools/state-viewer/src/cli.rs b/tools/state-viewer/src/cli.rs index 0ba82abb385..be85a721081 100644 --- a/tools/state-viewer/src/cli.rs +++ b/tools/state-viewer/src/cli.rs @@ -197,6 +197,25 @@ impl ApplyChunkCmd { } } +#[derive(clap::Parser, Copy, Clone, Debug, Eq, PartialEq)] +pub enum ApplyRangeMode { + /// Applies chunks one after another in order of increasing heights. + /// TODO(#8741): doesn't work. Remove dependency on flat storage + /// by simulating correct costs. Consider reintroducing DbTrieOnly + /// read mode removed at #10490. + Sequential, + /// Applies chunks in parallel. + /// Useful for quick correctness check of applying chunks by comparing + /// results with `ChunkExtra`s. + /// TODO(#8741): doesn't work, same as above. + Parallel, + /// Sequentially applies chunks from flat storage head until chain + /// final head, moving flat head forward. Use in combination with + /// `MoveFlatHeadCmd` and `MoveFlatHeadMode::Back`. + /// Useful for benchmarking. + Benchmarking, +} + #[derive(clap::Parser)] pub struct ApplyRangeCmd { #[clap(long)] @@ -212,14 +231,15 @@ pub struct ApplyRangeCmd { #[clap(long)] only_contracts: bool, #[clap(long)] - sequential: bool, - #[clap(long)] use_flat_storage: bool, + #[clap(subcommand)] + mode: ApplyRangeMode, } impl ApplyRangeCmd { pub fn run(self, home_dir: &Path, near_config: NearConfig, store: Store) { apply_range( + self.mode, self.start_index, self.end_index, self.shard_id, @@ -229,7 +249,6 @@ impl ApplyRangeCmd { near_config, store, self.only_contracts, - self.sequential, self.use_flat_storage, ); } diff --git a/tools/state-viewer/src/commands.rs b/tools/state-viewer/src/commands.rs index 771764f192a..b4d00a39f49 100644 --- a/tools/state-viewer/src/commands.rs +++ b/tools/state-viewer/src/commands.rs @@ -1,4 +1,5 @@ use crate::apply_chain_range::apply_chain_range; +use crate::cli::ApplyRangeMode; use crate::contract_accounts::ContractAccount; use crate::contract_accounts::ContractAccountFilter; use crate::contract_accounts::Summary; @@ -204,6 +205,7 @@ pub(crate) fn apply_chunk( } pub(crate) fn apply_range( + mode: ApplyRangeMode, start_index: Option, end_index: Option, shard_id: ShardId, @@ -213,7 +215,6 @@ pub(crate) fn apply_range( near_config: NearConfig, store: Store, only_contracts: bool, - sequential: bool, use_flat_storage: bool, ) { let mut csv_file = csv_file.map(|filename| std::fs::File::create(filename).unwrap()); @@ -227,6 +228,7 @@ pub(crate) fn apply_range( ) .expect("could not create the transaction runtime"); apply_chain_range( + mode, store, &near_config.genesis, start_index, @@ -237,7 +239,6 @@ pub(crate) fn apply_range( verbose_output, csv_file.as_mut(), only_contracts, - sequential, use_flat_storage, ); } diff --git a/tools/state-viewer/src/scan_db.rs b/tools/state-viewer/src/scan_db.rs index bd96193c106..cc602926af5 100644 --- a/tools/state-viewer/src/scan_db.rs +++ b/tools/state-viewer/src/scan_db.rs @@ -238,7 +238,7 @@ fn format_block_misc_value<'a>(key: &'a [u8], value: &'a [u8]) -> Box::try_from_slice(value).unwrap()) diff --git a/tools/state-viewer/src/state_parts.rs b/tools/state-viewer/src/state_parts.rs index c9f4d46f49d..7be89ca60be 100644 --- a/tools/state-viewer/src/state_parts.rs +++ b/tools/state-viewer/src/state_parts.rs @@ -549,7 +549,7 @@ fn read_state_header( } fn finalize_state_sync(sync_hash: CryptoHash, shard_id: ShardId, chain: &mut Chain) { - chain.set_state_finalize(shard_id, sync_hash, Ok(())).unwrap() + chain.set_state_finalize(shard_id, sync_hash).unwrap() } fn get_part_ids(part_from: Option, part_to: Option, num_parts: u64) -> Range {