diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index f2ccaf438ac..c3119db3780 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -5,7 +5,6 @@ on: branches: - unstable - stable - - capella tags: - v* @@ -35,11 +34,6 @@ jobs: run: | echo "VERSION=latest" >> $GITHUB_ENV echo "VERSION_SUFFIX=-unstable" >> $GITHUB_ENV - - name: Extract version (if capella) - if: github.event.ref == 'refs/heads/capella' - run: | - echo "VERSION=capella" >> $GITHUB_ENV - echo "VERSION_SUFFIX=" >> $GITHUB_ENV - name: Extract version (if tagged release) if: startsWith(github.event.ref, 'refs/tags') run: | diff --git a/.github/workflows/linkcheck.yml b/.github/workflows/linkcheck.yml index 4d4e92ae143..8428c0a3b0a 100644 --- a/.github/workflows/linkcheck.yml +++ b/.github/workflows/linkcheck.yml @@ -7,6 +7,7 @@ on: pull_request: paths: - 'book/**' + merge_group: jobs: linkcheck: diff --git a/.github/workflows/local-testnet.yml b/.github/workflows/local-testnet.yml index a522f2efb99..1ca1006c1f2 100644 --- a/.github/workflows/local-testnet.yml +++ b/.github/workflows/local-testnet.yml @@ -6,6 +6,7 @@ on: branches: - unstable pull_request: + merge_group: jobs: run-local-testnet: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 2e63b4d6c24..e6d79bd5ef9 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -73,7 +73,7 @@ jobs: - uses: KyleMayes/install-llvm-action@v1 if: startsWith(matrix.arch, 'x86_64-windows') with: - version: "13.0" + version: "15.0" directory: ${{ runner.temp }}/llvm - name: Set LIBCLANG_PATH if: startsWith(matrix.arch, 'x86_64-windows') diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 445f71fa096..27c91f22620 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -8,19 +8,20 @@ on: - trying - 'pr/*' pull_request: + merge_group: env: # Deny warnings in CI # Disable debug info (see https://github.com/sigp/lighthouse/issues/4005) RUSTFLAGS: "-D warnings -C debuginfo=0" # The Nightly version used for cargo-udeps, might need updating from time to time. - PINNED_NIGHTLY: nightly-2022-12-15 + PINNED_NIGHTLY: nightly-2023-04-16 # Prevent Github API rate limiting. LIGHTHOUSE_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} jobs: target-branch-check: name: target-branch-check runs-on: ubuntu-latest - if: github.event_name == 'pull_request' + if: github.event_name == 'pull_request' || github.event_name == 'merge_group' steps: - name: Check that the pull request is not targeting the stable branch run: test ${{ github.base_ref }} != "stable" @@ -83,7 +84,7 @@ jobs: run: choco install -y make - uses: KyleMayes/install-llvm-action@v1 with: - version: "13.0" + version: "15.0" directory: ${{ runner.temp }}/llvm - name: Set LIBCLANG_PATH run: echo "LIBCLANG_PATH=$((gcm clang).source -replace "clang.exe")" >> $env:GITHUB_ENV diff --git a/Cargo.lock b/Cargo.lock index e0fe09ef7ae..a8f6cfe9818 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -88,6 +88,16 @@ dependencies = [ "rand_core 0.6.4", ] +[[package]] +name = "aead" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" +dependencies = [ + "crypto-common", + "generic-array", +] + [[package]] name = "aes" version = "0.6.0" @@ -113,17 +123,14 @@ dependencies = [ ] [[package]] -name = "aes-gcm" -version = "0.8.0" +name = "aes" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5278b5fabbb9bd46e24aa69b2fdea62c99088e0a950a9be40e3e0101298f88da" +checksum = "433cfd6710c9986c576a25ca913c39d66a6474107b406f34f91d4a8923395241" dependencies = [ - "aead 0.3.2", - "aes 0.6.0", - "cipher 0.2.5", - "ctr 0.6.0", - "ghash 0.3.1", - "subtle", + "cfg-if", + "cipher 0.4.4", + "cpufeatures", ] [[package]] @@ -140,6 +147,20 @@ dependencies = [ "subtle", ] +[[package]] +name = "aes-gcm" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82e1366e0c69c9f927b1fa5ce2c7bf9eafc8f9268c0b9800729e8b267612447c" +dependencies = [ + "aead 0.5.2", + "aes 0.8.2", + "cipher 0.4.4", + "ctr 0.9.2", + "ghash 0.5.0", + "subtle", +] + [[package]] name = "aes-soft" version = "0.6.4" @@ -166,16 +187,16 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.9", "once_cell", "version_check", ] [[package]] name = "aho-corasick" -version = "0.7.20" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac" +checksum = "67fc08ce920c31afb70f013dcce1bfc3a3195de6a228474e45e1f145b36f8d04" dependencies = [ "memchr", ] @@ -205,14 +226,14 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.69" +version = "1.0.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "224afbd727c3d6e4b90103ece64b8d1b67fbb1973b1046c2281eed3f3803f800" +checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8" [[package]] name = "arbitrary" -version = "1.2.2" -source = "git+https://github.com/michaelsproul/arbitrary?rev=a572fd8743012a4f1ada5ee5968b1b3619c427ba#a572fd8743012a4f1ada5ee5968b1b3619c427ba" +version = "1.3.0" +source = "git+https://github.com/michaelsproul/arbitrary?rev=f002b99989b561ddce62e4cf2887b0f8860ae991#f002b99989b561ddce62e4cf2887b0f8860ae991" dependencies = [ "derive_arbitrary", ] @@ -225,9 +246,9 @@ checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" [[package]] name = "arrayref" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" +checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" [[package]] name = "arrayvec" @@ -248,14 +269,14 @@ dependencies = [ "num-traits", "rusticata-macros", "thiserror", - "time 0.3.17", + "time 0.3.20", ] [[package]] name = "asn1-rs" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf6690c370453db30743b373a60ba498fc0d6d83b11f4abfd87a84a075db5dd4" +checksum = "7f6fd5ddaf0351dff5b8da21b2fb4ff8e08ddd02857f0bf69c47639106c0fff0" dependencies = [ "asn1-rs-derive 0.4.0", "asn1-rs-impl", @@ -264,7 +285,7 @@ dependencies = [ "num-traits", "rusticata-macros", "thiserror", - "time 0.3.17", + "time 0.3.20", ] [[package]] @@ -275,7 +296,7 @@ checksum = "db8b7511298d5b7784b40b092d9e9dcd3a627a5707e4b5e507931ab0d44eeebf" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", "synstructure", ] @@ -287,7 +308,7 @@ checksum = "726535892e8eae7e70657b4c8ea93d26b8553afb1ce617caee529ef96d7dee6c" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", "synstructure", ] @@ -299,75 +320,75 @@ checksum = "2777730b2039ac0f95f093556e61b6d26cebed5393ca6f152717777cec3a42ed" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "asn1_der" -version = "0.7.5" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e22d1f4b888c298a027c99dc9048015fac177587de20fc30232a057dfbe24a21" +checksum = "155a5a185e42c6b77ac7b88a15143d930a9e9727a5b7b77eed417404ab15c247" [[package]] name = "async-io" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c374dda1ed3e7d8f0d9ba58715f924862c63eae6849c92d3a18e7fbde9e2794" +checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" dependencies = [ "async-lock", "autocfg 1.1.0", + "cfg-if", "concurrent-queue", "futures-lite", - "libc", "log", "parking", "polling", + "rustix", "slab", - "socket2", + "socket2 0.4.9", "waker-fn", - "windows-sys 0.42.0", ] [[package]] name = "async-lock" -version = "2.6.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8101efe8695a6c17e02911402145357e718ac92d3ff88ae8419e84b1707b685" +checksum = "fa24f727524730b077666307f2734b4a1a1c57acb79193127dcc8914d5242dd7" dependencies = [ "event-listener", - "futures-lite", ] [[package]] name = "async-stream" -version = "0.3.3" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dad5c83079eae9969be7fadefe640a1c566901f05ff91ab221de4b6f68d9507e" +checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" dependencies = [ "async-stream-impl", "futures-core", + "pin-project-lite 0.2.9", ] [[package]] name = "async-stream-impl" -version = "0.3.3" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10f203db73a71dfa2fb6dd22763990fa26f3d2625a6da2da900d23b87d26be27" +checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] name = "async-trait" -version = "0.1.64" +version = "0.1.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd7fce9ba8c3c042128ce72d8b2ddbf3a05747efb67ea0313c635e10bda47a2" +checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] @@ -396,9 +417,9 @@ dependencies = [ [[package]] name = "atomic-waker" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "debc29dde2e69f9e47506b525f639ed42300fc014a3e007832592448fa8e4599" +checksum = "1181e1e0d1fce796a03db1ae795d67167da795f9cf4a39c37589e85ef57f26d3" [[package]] name = "attohttpc" @@ -424,14 +445,14 @@ dependencies = [ [[package]] name = "auto_impl" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a8c1df849285fbacd587de7818cc7d13be6cd2cbcd47a04fb1801b0e2706e33" +checksum = "fee3da8ef1276b0bee5dd1c7258010d8fffd31801447323115a25560e1327b89" dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -506,7 +527,7 @@ dependencies = [ "cc", "cfg-if", "libc", - "miniz_oxide", + "miniz_oxide 0.6.2", "object", "rustc-demangle", ] @@ -537,14 +558,14 @@ checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" [[package]] name = "base64ct" -version = "1.5.3" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b645a089122eccb6111b4f81cbc1a49f5900ac4666bb93ac027feaecf15607bf" +checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" [[package]] name = "beacon-api-client" version = "0.1.0" -source = "git+https://github.com/ralexstokes/beacon-api-client#53690a711e33614d59d4d44fb09762b4699e2a4e" +source = "git+https://github.com/ralexstokes/beacon-api-client#30679e9e25d61731cde54e14cd8a3688a39d8e5b" dependencies = [ "ethereum-consensus", "http", @@ -569,10 +590,9 @@ dependencies = [ "environment", "eth1", "eth2", - "eth2_hashing", - "eth2_ssz", - "eth2_ssz_derive", - "eth2_ssz_types", + "ethereum_hashing", + "ethereum_ssz", + "ethereum_ssz_derive", "execution_layer", "exit-future", "fork_choice", @@ -603,6 +623,7 @@ dependencies = [ "sloggers", "slot_clock", "smallvec", + "ssz_types", "state_processing", "store", "strum", @@ -610,6 +631,7 @@ dependencies = [ "task_executor", "tempfile", "tokio", + "tokio-stream", "tree_hash", "types", "unused_port", @@ -617,7 +639,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "3.5.1" +version = "4.1.0" dependencies = [ "beacon_chain", "clap", @@ -728,9 +750,9 @@ dependencies = [ [[package]] name = "block-buffer" -version = "0.10.3" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ "generic-array", ] @@ -757,10 +779,10 @@ version = "0.2.0" dependencies = [ "arbitrary", "blst", - "eth2_hashing", - "eth2_serde_utils", - "eth2_ssz", "ethereum-types 0.14.1", + "ethereum_hashing", + "ethereum_serde_utils", + "ethereum_ssz", "hex", "milagro_bls", "rand 0.7.3", @@ -783,15 +805,26 @@ dependencies = [ "zeroize", ] +[[package]] +name = "bollard-stubs" +version = "1.41.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed2f2e73fffe9455141e170fb9c1feb0ac521ec7e7dcd47a7cab72a658490fb8" +dependencies = [ + "chrono", + "serde", + "serde_with", +] + [[package]] name = "boot_node" -version = "3.5.1" +version = "4.1.0" dependencies = [ "beacon_node", "clap", "clap_utils", "eth2_network_config", - "eth2_ssz", + "ethereum_ssz", "hex", "lighthouse_network", "log", @@ -830,6 +863,7 @@ name = "builder_client" version = "0.1.0" dependencies = [ "eth2", + "lighthouse_version", "reqwest", "sensitive_url", "serde", @@ -838,9 +872,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.12.0" +version = "3.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d261e256854913907f67ed06efbc3338dfe6179796deefc1ff763fc1aee5535" +checksum = "9b1ce199063694f33ffb7dd4e0ee620741495c32833cde5aa08f02a0bf96f0c8" [[package]] name = "byte-slice-cast" @@ -888,14 +922,14 @@ dependencies = [ name = "cached_tree_hash" version = "0.1.0" dependencies = [ - "eth2_hashing", - "eth2_ssz", - "eth2_ssz_derive", - "eth2_ssz_types", "ethereum-types 0.14.1", + "ethereum_hashing", + "ethereum_ssz", + "ethereum_ssz_derive", "quickcheck", "quickcheck_macros", "smallvec", + "ssz_types", "tree_hash", ] @@ -964,14 +998,15 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.23" +version = "0.4.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f" +checksum = "4e3c5919066adf22df73762e50cffcde3a758f2a848b113b586d1f86728b673b" dependencies = [ "iana-time-zone", "js-sys", "num-integer", "num-traits", + "serde", "time 0.1.45", "wasm-bindgen", "winapi", @@ -995,11 +1030,21 @@ dependencies = [ "generic-array", ] +[[package]] +name = "cipher" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" +dependencies = [ + "crypto-common", + "inout", +] + [[package]] name = "clang-sys" -version = "1.4.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa2e27ae6ab525c3d369ded447057bca5438d86dc3a68f6faafb8269ba82ebf3" +checksum = "c688fc74432808e3eb684cae8830a86be1d66a2bd58e1f248ed0960a590baf6f" dependencies = [ "glob", "libc", @@ -1028,8 +1073,8 @@ dependencies = [ "clap", "dirs", "eth2_network_config", - "eth2_ssz", "ethereum-types 0.14.1", + "ethereum_ssz", "hex", "serde", "serde_json", @@ -1072,7 +1117,7 @@ dependencies = [ "state_processing", "store", "task_executor", - "time 0.3.17", + "time 0.3.20", "timer", "tokio", "types", @@ -1080,9 +1125,9 @@ dependencies = [ [[package]] name = "cmake" -version = "0.1.49" +version = "0.1.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db34956e100b30725f2eb215f90d4871051239535632f84fea3bc92722c66b7c" +checksum = "a31c789563b815f77f4250caee12365734369f942439b7defd71e18a48197130" dependencies = [ "cc", ] @@ -1109,33 +1154,23 @@ name = "compare_fields_derive" version = "0.2.0" dependencies = [ "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "concurrent-queue" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c278839b831783b70278b14df4d45e1beb1aad306c07bb796637de9a0e323e8e" +checksum = "62ec6771ecfa0762d24683ee5a32ad78487a3d3afdc0fb8cae19d2c5deb50b7c" dependencies = [ "crossbeam-utils", ] -[[package]] -name = "console_error_panic_hook" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a06aeb73f470f66dcdbf7223caeebb85984942f22f1adb2a088cf9668146bbbc" -dependencies = [ - "cfg-if", - "wasm-bindgen", -] - [[package]] name = "const-oid" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cec318a675afcb6a1ea1d4340e2d377e56e47c266f28043ceccbf4412ddfdd3b" +checksum = "520fbf3c07483f94e3e3ca9d0cfd913d7718ef2483d2cfd91c0d9e91474ab913" [[package]] name = "convert_case" @@ -1155,9 +1190,9 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" +checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] name = "core2" @@ -1170,19 +1205,13 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.5" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320" +checksum = "3e4c1eaa2012c47becbbad2ab175484c2a84d1185b566fb2cc5b8707343dfe58" dependencies = [ "libc", ] -[[package]] -name = "cpuid-bool" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcb25d077389e53838a8158c8e99174c5a9d902dee4904320db714f3c653ffba" - [[package]] name = "crc" version = "3.0.1" @@ -1245,9 +1274,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf2b3e8478797446514c91ef04bafcb59faba183e621ad488df88983cc14128c" +checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" dependencies = [ "cfg-if", "crossbeam-utils", @@ -1255,9 +1284,9 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc" +checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" dependencies = [ "cfg-if", "crossbeam-epoch", @@ -1266,22 +1295,22 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.13" +version = "0.9.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01a9af1f4c2ef74bb8aa1f7e19706bc72d03598c8a570bb5de72243c7a9d9d5a" +checksum = "46bd5f3f85273295a9d14aedfb86f6aadbff6d8f5295c4a9edb08e819dcf5695" dependencies = [ "autocfg 1.1.0", "cfg-if", "crossbeam-utils", - "memoffset 0.7.1", + "memoffset 0.8.0", "scopeguard", ] [[package]] name = "crossbeam-utils" -version = "0.8.14" +version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f" +checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" dependencies = [ "cfg-if", ] @@ -1311,6 +1340,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", + "rand_core 0.6.4", "typenum", ] @@ -1324,16 +1354,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "crypto-mac" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bff07008ec701e8028e2ceb8f83f0e4274ee62bd2dbdc4fefff2e9a91824081a" -dependencies = [ - "generic-array", - "subtle", -] - [[package]] name = "crypto-mac" version = "0.11.1" @@ -1346,9 +1366,9 @@ dependencies = [ [[package]] name = "csv" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af91f40b7355f82b0a891f50e70399475945bb0b0da4f1700ce60761c9d3e359" +checksum = "0b015497079b9a9d69c02ad25de6c0a6edef051ea6360a327d0bd05802ef64ad" dependencies = [ "csv-core", "itoa", @@ -1367,20 +1387,20 @@ dependencies = [ [[package]] name = "ctr" -version = "0.6.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb4a30d54f7443bf3d6191dcd486aca19e67cb3c49fa7a06a319966346707e7f" +checksum = "049bb91fb4aaf0e3c7efa6cd5ef877dbbbd15b39dad06d9948de4ec8a75761ea" dependencies = [ - "cipher 0.2.5", + "cipher 0.3.0", ] [[package]] name = "ctr" -version = "0.8.0" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "049bb91fb4aaf0e3c7efa6cd5ef877dbbbd15b39dad06d9948de4ec8a75761ea" +checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835" dependencies = [ - "cipher 0.3.0", + "cipher 0.4.4", ] [[package]] @@ -1408,9 +1428,9 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.0.0-rc.0" +version = "4.0.0-rc.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8da00a7a9a4eb92a0a0f8e75660926d48f0d0f3c537e455c457bcdaa1e16b1ac" +checksum = "8d4ba9852b42210c7538b75484f9daa0655e9a3ac04f693747bb0f02cf3cfe16" dependencies = [ "cfg-if", "fiat-crypto", @@ -1422,9 +1442,9 @@ dependencies = [ [[package]] name = "cxx" -version = "1.0.90" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90d59d9acd2a682b4e40605a242f6670eaa58c5957471cbf85e8aa6a0b97a5e8" +checksum = "f61f1b6389c3fe1c316bf8a4dccc90a38208354b330925bce1f74a6c4756eb93" dependencies = [ "cc", "cxxbridge-flags", @@ -1434,9 +1454,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.90" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebfa40bda659dd5c864e65f4c9a2b0aff19bea56b017b9b77c73d3766a453a38" +checksum = "12cee708e8962df2aeb38f594aae5d827c022b6460ac71a7a3e2c3c2aae5a07b" dependencies = [ "cc", "codespan-reporting", @@ -1444,24 +1464,24 @@ dependencies = [ "proc-macro2", "quote", "scratch", - "syn", + "syn 2.0.15", ] [[package]] name = "cxxbridge-flags" -version = "1.0.90" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "457ce6757c5c70dc6ecdbda6925b958aae7f959bda7d8fb9bde889e34a09dc03" +checksum = "7944172ae7e4068c533afbb984114a56c46e9ccddda550499caa222902c7f7bb" [[package]] name = "cxxbridge-macro" -version = "1.0.90" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebf883b7aacd7b2aeb2a7b338648ee19f57c140d4ee8e52c68979c6b2f7f2263" +checksum = "2345488264226bf682893e25de0769f3360aac9957980ec49361b083ddaa5bc5" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] @@ -1476,12 +1496,12 @@ dependencies = [ [[package]] name = "darling" -version = "0.14.3" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0808e1bd8671fb44a113a14e13497557533369847788fa2ae912b6ebfce9fa8" +checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" dependencies = [ - "darling_core 0.14.3", - "darling_macro 0.14.3", + "darling_core 0.14.4", + "darling_macro 0.14.4", ] [[package]] @@ -1495,21 +1515,21 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn", + "syn 1.0.109", ] [[package]] name = "darling_core" -version = "0.14.3" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "001d80444f28e193f30c2f293455da62dcf9a6b29918a4253152ae2b1de592cb" +checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", "strsim 0.10.0", - "syn", + "syn 1.0.109", ] [[package]] @@ -1520,18 +1540,18 @@ checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ "darling_core 0.13.4", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "darling_macro" -version = "0.14.3" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b36230598a2d5de7ec1c6f51f72d8a99a9208daff41de2084d06e3fd3ea56685" +checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" dependencies = [ - "darling_core 0.14.3", + "darling_core 0.14.4", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1577,7 +1597,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a5bbed42daaa95e780b60a50546aa345b8413a1e46f9a40a12907d3598f038db" dependencies = [ "data-encoding", - "syn", + "syn 1.0.109", ] [[package]] @@ -1606,20 +1626,20 @@ checksum = "b72465f46d518f6015d9cf07f7f3013a95dd6b9c2747c3d65ae0cce43929d14f" [[package]] name = "delay_map" -version = "0.1.2" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c4d75d3abfe4830dcbf9bcb1b926954e121669f74dd1ca7aa0183b1755d83f6" +checksum = "e4355c25cbf99edcb6b4a0e906f6bdc6956eda149e84455bea49696429b2f8e8" dependencies = [ "futures", - "tokio-util 0.6.10", + "tokio-util 0.7.8", ] [[package]] name = "deposit_contract" version = "0.2.0" dependencies = [ - "eth2_ssz", "ethabi 16.0.0", + "ethereum_ssz", "hex", "reqwest", "serde_json", @@ -1655,11 +1675,11 @@ dependencies = [ [[package]] name = "der-parser" -version = "8.1.0" +version = "8.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42d4bc9b0db0a0df9ae64634ac5bdefb7afcb534e182275ca0beadbe486701c1" +checksum = "dbd676fbbab537128ef0278adb5576cf363cff6aa22a7b24effe97347cfab61e" dependencies = [ - "asn1-rs 0.5.1", + "asn1-rs 0.5.2", "displaydoc", "nom 7.1.3", "num-bigint", @@ -1675,18 +1695,17 @@ checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "derive_arbitrary" -version = "1.2.2" -source = "git+https://github.com/michaelsproul/arbitrary?rev=a572fd8743012a4f1ada5ee5968b1b3619c427ba#a572fd8743012a4f1ada5ee5968b1b3619c427ba" +version = "1.3.0" +source = "git+https://github.com/michaelsproul/arbitrary?rev=f002b99989b561ddce62e4cf2887b0f8860ae991#f002b99989b561ddce62e4cf2887b0f8860ae991" dependencies = [ - "darling 0.14.3", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1704,10 +1723,10 @@ version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f91d4cfa921f1c05904dc3c57b4a32c38aed3340cce209f3a6fd1478babafc4" dependencies = [ - "darling 0.14.3", + "darling 0.14.4", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1717,7 +1736,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f0314b72bed045f3a68671b3c86328386762c93f82d98c65c3cb5e5f573dd68" dependencies = [ "derive_builder_core", - "syn", + "syn 1.0.109", ] [[package]] @@ -1730,7 +1749,44 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.0", - "syn", + "syn 1.0.109", +] + +[[package]] +name = "diesel" +version = "2.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72eb77396836a4505da85bae0712fa324b74acfe1876d7c2f7e694ef3d0ee373" +dependencies = [ + "bitflags", + "byteorder", + "diesel_derives", + "itoa", + "pq-sys", + "r2d2", +] + +[[package]] +name = "diesel_derives" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ad74fdcf086be3d4fdd142f67937678fe60ed431c3b2f08599e7687269410c4" +dependencies = [ + "proc-macro-error", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "diesel_migrations" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9ae22beef5e9d6fab9225ddb073c1c6c1a7a6ded5019d5da11d1e5c5adc34e2" +dependencies = [ + "diesel", + "migrations_internals", + "migrations_macros", ] [[package]] @@ -1748,7 +1804,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" dependencies = [ - "block-buffer 0.10.3", + "block-buffer 0.10.4", "crypto-common", "subtle", ] @@ -1805,15 +1861,15 @@ dependencies = [ [[package]] name = "discv5" -version = "0.1.0" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d767c0e59b3e8d65222d95df723cc2ea1da92bb0f27c563607e6f0bde064f255" +checksum = "b009a99b85b58900df46435307fc5c4c845af7e182582b1fbf869572fa9fce69" dependencies = [ "aes 0.7.5", "aes-gcm 0.9.4", "arrayvec", "delay_map", - "enr", + "enr 0.7.0", "fnv", "futures", "hashlink 0.7.0", @@ -1827,7 +1883,7 @@ dependencies = [ "rand 0.8.5", "rlp", "smallvec", - "socket2", + "socket2 0.4.9", "tokio", "tokio-stream", "tokio-util 0.6.10", @@ -1845,14 +1901,14 @@ checksum = "3bf95dc3f046b9da4f2d51833c0d3547d8564ef6910f5c1ed130306a75b92886" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "dtoa" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c00704156a7de8df8da0911424e30c2049957b0a714542a44e05fe693dd85313" +checksum = "65d09067bfacaa79114679b279d7f5885b53295b1e2cfb4e79c8e4bd3d633169" [[package]] name = "ecdsa" @@ -1899,9 +1955,9 @@ dependencies = [ "compare_fields", "compare_fields_derive", "derivative", - "eth2_ssz", - "eth2_ssz_derive", "ethereum-types 0.14.1", + "ethereum_ssz", + "ethereum_ssz_derive", "execution_layer", "fork_choice", "fs2", @@ -1962,6 +2018,25 @@ name = "enr" version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26fa0a0be8915790626d5759eb51fe47435a8eac92c2f212bd2da9aa7f30ea56" +dependencies = [ + "base64 0.13.1", + "bs58", + "bytes", + "hex", + "k256", + "log", + "rand 0.8.5", + "rlp", + "serde", + "sha3 0.10.7", + "zeroize", +] + +[[package]] +name = "enr" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "492a7e5fc2504d5fdce8e124d3e263b244a68b283cac67a69eda0cd43e0aebad" dependencies = [ "base64 0.13.1", "bs58", @@ -1973,7 +2048,7 @@ dependencies = [ "rand 0.8.5", "rlp", "serde", - "sha3 0.10.6", + "sha3 0.10.7", "zeroize", ] @@ -1986,7 +2061,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -2036,13 +2111,13 @@ dependencies = [ [[package]] name = "errno" -version = "0.2.8" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1" +checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" dependencies = [ "errno-dragonfly", "libc", - "winapi", + "windows-sys 0.48.0", ] [[package]] @@ -2072,8 +2147,8 @@ dependencies = [ "environment", "eth1_test_rig", "eth2", - "eth2_ssz", - "eth2_ssz_derive", + "ethereum_ssz", + "ethereum_ssz_derive", "execution_layer", "futures", "hex", @@ -2116,9 +2191,9 @@ dependencies = [ "account_utils", "bytes", "eth2_keystore", - "eth2_serde_utils", - "eth2_ssz", - "eth2_ssz_derive", + "ethereum_serde_utils", + "ethereum_ssz", + "ethereum_ssz_derive", "futures", "futures-util", "libsecp256k1", @@ -2145,25 +2220,13 @@ dependencies = [ "types", ] -[[package]] -name = "eth2_hashing" -version = "0.3.0" -dependencies = [ - "cpufeatures", - "lazy_static", - "ring", - "rustc-hex", - "sha2 0.10.6", - "wasm-bindgen-test", -] - [[package]] name = "eth2_interop_keypairs" version = "0.2.0" dependencies = [ "base64 0.13.1", "bls", - "eth2_hashing", + "ethereum_hashing", "hex", "lazy_static", "num-bigint", @@ -2210,64 +2273,15 @@ dependencies = [ name = "eth2_network_config" version = "0.2.0" dependencies = [ - "enr", + "discv5", "eth2_config", - "eth2_ssz", + "ethereum_ssz", "serde_yaml", "tempfile", "types", "zip", ] -[[package]] -name = "eth2_serde_utils" -version = "0.1.1" -dependencies = [ - "ethereum-types 0.14.1", - "hex", - "serde", - "serde_derive", - "serde_json", -] - -[[package]] -name = "eth2_ssz" -version = "0.4.1" -dependencies = [ - "eth2_ssz_derive", - "ethereum-types 0.14.1", - "itertools", - "smallvec", -] - -[[package]] -name = "eth2_ssz_derive" -version = "0.3.1" -dependencies = [ - "darling 0.13.4", - "eth2_ssz", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "eth2_ssz_types" -version = "0.2.2" -dependencies = [ - "arbitrary", - "derivative", - "eth2_serde_utils", - "eth2_ssz", - "serde", - "serde_derive", - "serde_json", - "smallvec", - "tree_hash", - "tree_hash_derive", - "typenum", -] - [[package]] name = "eth2_wallet" version = "0.1.0" @@ -2320,7 +2334,7 @@ dependencies = [ "regex", "serde", "serde_json", - "sha3 0.10.6", + "sha3 0.10.7", "thiserror", "uint", ] @@ -2361,11 +2375,11 @@ dependencies = [ "async-stream", "blst", "bs58", - "enr", + "enr 0.6.2", "hex", "integer-sqrt", "multiaddr 0.14.0", - "multihash", + "multihash 0.16.3", "rand 0.8.5", "serde", "serde_json", @@ -2406,6 +2420,54 @@ dependencies = [ "uint", ] +[[package]] +name = "ethereum_hashing" +version = "1.0.0-beta.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "233dc6f434ce680dbabf4451ee3380cec46cb3c45d66660445a435619710dd35" +dependencies = [ + "cpufeatures", + "lazy_static", + "ring", + "sha2 0.10.6", +] + +[[package]] +name = "ethereum_serde_utils" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f8cb04ea380a33e9c269fa5f8df6f2d63dee19728235f3e639e7674e038686a" +dependencies = [ + "ethereum-types 0.14.1", + "hex", + "serde", + "serde_derive", + "serde_json", +] + +[[package]] +name = "ethereum_ssz" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32749e96305376af40d7a7ee8ea4c4c64c68d09ff94a81ab78c8d9bc7153c221" +dependencies = [ + "ethereum-types 0.14.1", + "itertools", + "smallvec", +] + +[[package]] +name = "ethereum_ssz_derive" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9cac7ef2107926cea34c0064056f9bb134d2085eef882388d151d2e59174cf0" +dependencies = [ + "darling 0.13.4", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "ethers-core" version = "1.0.2" @@ -2446,7 +2508,7 @@ dependencies = [ "futures-core", "futures-timer", "futures-util", - "getrandom 0.2.8", + "getrandom 0.2.9", "hashers", "hex", "http", @@ -2508,10 +2570,9 @@ dependencies = [ "bytes", "environment", "eth2", - "eth2_serde_utils", - "eth2_ssz", - "eth2_ssz_types", "ethereum-consensus", + "ethereum_serde_utils", + "ethereum_ssz", "ethers-core", "exit-future", "fork_choice", @@ -2534,6 +2595,7 @@ dependencies = [ "slog", "slot_clock", "ssz-rs", + "ssz_types", "state_processing", "strum", "superstruct 0.6.0", @@ -2597,18 +2659,18 @@ checksum = "ec54ac60a7f2ee9a97cad9946f9bf629a3bc6a7ae59e68983dc9318f5a54b81a" [[package]] name = "fiat-crypto" -version = "0.1.17" +version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a214f5bb88731d436478f3ae1f8a277b62124089ba9fb67f4f93fb100ef73c90" +checksum = "e825f6987101665dea6ec934c09ec6d721de7bc1bf92248e1d5810c8cd636b77" [[package]] name = "field-offset" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e1c54951450cbd39f3dbcf1005ac413b49487dabf18a720ad2383eccfeffb92" +checksum = "a3cf3a800ff6e860c863ca6d4b16fd999db8b752819c1606884047b73e468535" dependencies = [ - "memoffset 0.6.5", - "rustc_version 0.3.3", + "memoffset 0.8.0", + "rustc_version 0.4.0", ] [[package]] @@ -2652,13 +2714,13 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "flate2" -version = "1.0.25" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8a2db397cb1c8772f31494cb8917e48cd1e64f0fa7efac59fbd741a0a8ce841" +checksum = "3b9429470923de8e8cbd4d2dc513535400b4b3fef0319fb5c4e1f520a7bef743" dependencies = [ "crc32fast", "libz-sys", - "miniz_oxide", + "miniz_oxide 0.7.1", ] [[package]] @@ -2687,8 +2749,8 @@ name = "fork_choice" version = "0.1.0" dependencies = [ "beacon_chain", - "eth2_ssz", - "eth2_ssz_derive", + "ethereum_ssz", + "ethereum_ssz_derive", "proto_array", "slog", "state_processing", @@ -2730,9 +2792,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.26" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13e2792b0ff0340399d58445b88fd9770e3489eff258a4cbc1523418f12abf84" +checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" dependencies = [ "futures-channel", "futures-core", @@ -2745,9 +2807,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.26" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e5317663a9089767a1ec00a487df42e0ca174b61b4483213ac24448e4664df5" +checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" dependencies = [ "futures-core", "futures-sink", @@ -2755,15 +2817,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.26" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec90ff4d0fe1f57d600049061dc6bb68ed03c7d2fbd697274c41805dcb3f8608" +checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" [[package]] name = "futures-executor" -version = "0.3.26" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8de0a35a6ab97ec8869e32a2473f4b1324459e14c29275d14b10cb1fd19b50e" +checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" dependencies = [ "futures-core", "futures-task", @@ -2773,15 +2835,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.26" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfb8371b6fb2aeb2d280374607aeabfc99d95c72edfe51692e42d3d7f0d08531" +checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" [[package]] name = "futures-lite" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7694489acd39452c77daa48516b894c153f192c3578d5a839b62c58099fcbf48" +checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" dependencies = [ "fastrand", "futures-core", @@ -2794,13 +2856,13 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.26" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95a73af87da33b5acf53acfebdc339fe592ecf5357ac7c0a7734ab9d8c876a70" +checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] @@ -2816,15 +2878,15 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.26" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f310820bb3e8cfd46c80db4d7fb8353e15dfff853a127158425f31e0be6c8364" +checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" [[package]] name = "futures-task" -version = "0.3.26" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf79a1bf610b10f42aea489289c5a2c478a786509693b80cd39c44ccd936366" +checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" [[package]] name = "futures-timer" @@ -2834,9 +2896,9 @@ checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" [[package]] name = "futures-util" -version = "0.3.26" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c1d6de3acfef38d2be4b1f543f553131788603495be83da675e180c8d6b7bd1" +checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" dependencies = [ "futures-channel", "futures-core", @@ -2861,9 +2923,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.6" +version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", @@ -2876,8 +2938,8 @@ dependencies = [ "environment", "eth1", "eth1_test_rig", - "eth2_hashing", - "eth2_ssz", + "ethereum_hashing", + "ethereum_ssz", "futures", "int_to_bytes", "merkle_proof", @@ -2905,9 +2967,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" +checksum = "c85e1d9ab2eadba7e5040d4e09cbd6d072b76a557ad64e797c2cb9d4da21d7e4" dependencies = [ "cfg-if", "js-sys", @@ -2918,29 +2980,29 @@ dependencies = [ [[package]] name = "ghash" -version = "0.3.1" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97304e4cd182c3846f7575ced3890c53012ce534ad9114046b0a9e00bb30a375" +checksum = "1583cc1656d7839fd3732b80cf4f38850336cdb9b8ded1cd399ca62958de3c99" dependencies = [ "opaque-debug", - "polyval 0.4.5", + "polyval 0.5.3", ] [[package]] name = "ghash" -version = "0.4.4" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1583cc1656d7839fd3732b80cf4f38850336cdb9b8ded1cd399ca62958de3c99" +checksum = "d930750de5717d2dd0b8c0d42c076c0e884c81a73e6cab859bbd2339c71e3e40" dependencies = [ "opaque-debug", - "polyval 0.5.3", + "polyval 0.6.0", ] [[package]] name = "gimli" -version = "0.27.1" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "221996f774192f0f718773def8201c4ae31f02616a54ccfc2d358bb0e5cefdec" +checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4" [[package]] name = "git-version" @@ -2961,7 +3023,7 @@ dependencies = [ "proc-macro-hack", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -2983,9 +3045,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.15" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f9f29bc9dda355256b2916cf526ab02ce0aeaaaf2bad60d65ef3f12f11dd0f4" +checksum = "17f8a914c2987b688368b5138aa05321db91f4090cf26118185672ad588bce21" dependencies = [ "bytes", "fnv", @@ -2996,7 +3058,7 @@ dependencies = [ "indexmap", "slab", "tokio", - "tokio-util 0.7.7", + "tokio-util 0.7.8", "tracing", ] @@ -3115,6 +3177,12 @@ dependencies = [ "libc", ] +[[package]] +name = "hermit-abi" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286" + [[package]] name = "hex" version = "0.4.3" @@ -3146,16 +3214,6 @@ dependencies = [ "digest 0.9.0", ] -[[package]] -name = "hmac" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1441c6b1e930e2817404b5046f1f989899143a12bf92de603b69f4e0aee1e15" -dependencies = [ - "crypto-mac 0.10.1", - "digest 0.9.0", -] - [[package]] name = "hmac" version = "0.11.0" @@ -3199,9 +3257,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" +checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" dependencies = [ "bytes", "fnv", @@ -3235,8 +3293,8 @@ dependencies = [ "environment", "eth1", "eth2", - "eth2_serde_utils", - "eth2_ssz", + "ethereum_serde_utils", + "ethereum_ssz", "execution_layer", "futures", "genesis", @@ -3312,9 +3370,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.24" +version = "0.14.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e011372fa0b68db8350aa7a248930ecc7839bf46d8485577d69f117a75f164c" +checksum = "ab302d72a6f11a3b910431ff93aae7e773078c769f0a3ef15fb9ec692ed147d4" dependencies = [ "bytes", "futures-channel", @@ -3327,7 +3385,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite 0.2.9", - "socket2", + "socket2 0.4.9", "tokio", "tower-service", "tracing", @@ -3362,16 +3420,16 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.53" +version = "0.1.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64c122667b287044802d6ce17ee2ddf13207ed924c712de9a66a5814d5b64765" +checksum = "0722cd7114b7de04316e7ea5456a0bbb20e4adb46fd27a3697adb812cff0f37c" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "winapi", + "windows 0.48.0", ] [[package]] @@ -3444,9 +3502,9 @@ dependencies = [ [[package]] name = "if-watch" -version = "3.0.0" +version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba7abdbb86e485125dad06c2691e1e393bf3b08c7b743b43aa162a00fd39062e" +checksum = "a9465340214b296cd17a0009acdb890d6160010b8adf8f78a00d0d7ab270f79f" dependencies = [ "async-io", "core-foundation", @@ -3458,7 +3516,7 @@ dependencies = [ "rtnetlink", "system-configuration", "tokio", - "windows", + "windows 0.34.0", ] [[package]] @@ -3527,19 +3585,28 @@ checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "indexmap" -version = "1.9.2" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg 1.1.0", "hashbrown 0.12.3", ] +[[package]] +name = "inout" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" +dependencies = [ + "generic-array", +] + [[package]] name = "instant" version = "0.1.12" @@ -3591,12 +3658,13 @@ dependencies = [ [[package]] name = "io-lifetimes" -version = "1.0.5" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1abeb7a0dd0f8181267ff8adc397075586500b81b28a73e8a0208b00fc170fb3" +checksum = "9c66c74d2ae7e79a5a8f7ac924adbe38ee42a859c6539ad869eb51f0b52dc220" dependencies = [ + "hermit-abi 0.3.1", "libc", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -3605,7 +3673,7 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd302af1b90f2463a98fa5ad469fc212c8e3175a41c3068601bfa2727591c5be" dependencies = [ - "socket2", + "socket2 0.4.9", "widestring 0.5.1", "winapi", "winreg", @@ -3613,9 +3681,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.7.1" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30e22bd8629359895450b59ea7a776c850561b96a3b1d31321c1949d9e6c9146" +checksum = "12b6ee2129af8d4fb011108c73d99a1b83a85977f23b82460c0ae2e25bb4b57f" [[package]] name = "itertools" @@ -3628,9 +3696,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fad582f4b9e86b6caa621cabeb0963332d92eea04729ab12892c2533951e6440" +checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" [[package]] name = "jemalloc-ctl" @@ -3689,11 +3757,11 @@ dependencies = [ [[package]] name = "jsonwebtoken" -version = "8.2.0" +version = "8.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09f4f04699947111ec1733e71778d763555737579e44b85844cae8e1940a1828" +checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" dependencies = [ - "base64 0.13.1", + "base64 0.21.0", "pem", "ring", "serde", @@ -3711,7 +3779,7 @@ dependencies = [ "ecdsa", "elliptic-curve", "sha2 0.10.6", - "sha3 0.10.6", + "sha3 0.10.7", ] [[package]] @@ -3750,7 +3818,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "3.5.1" +version = "4.1.0" dependencies = [ "account_utils", "beacon_chain", @@ -3764,8 +3832,8 @@ dependencies = [ "eth1_test_rig", "eth2", "eth2_network_config", - "eth2_ssz", "eth2_wallet", + "ethereum_ssz", "genesis", "int_to_bytes", "lighthouse_network", @@ -3810,15 +3878,15 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.139" +version = "0.2.142" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "201de327520df007757c1f0adce6e827fe8562fbc28bfd9c15571c66ca1f5f79" +checksum = "6a987beff54b60ffa6d51982e1aa1146bc42f19bd26be28b0586f252fccf5317" [[package]] name = "libflate" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05605ab2bce11bcfc0e9c635ff29ef8b2ea83f29be257ee7d730cac3ee373093" +checksum = "97822bf791bd4d5b403713886a5fbe8bf49520fe78e323b0dc480ca1a03e50b0" dependencies = [ "adler32", "crc32fast", @@ -3827,9 +3895,9 @@ dependencies = [ [[package]] name = "libflate_lz77" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39a734c0493409afcd49deee13c006a04e3586b9761a03543c6272c9c51f2f5a" +checksum = "a52d3a8bfc85f250440e4424db7d857e241a3aebbbe301f3eb606ab15c39acbf" dependencies = [ "rle-decode-fast", ] @@ -3873,14 +3941,14 @@ dependencies = [ [[package]] name = "libp2p" -version = "0.50.0" +version = "0.50.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e0a0d2f693675f49ded13c5d510c48b78069e23cbd9108d7ccd59f6dc568819" +checksum = "9c7b0104790be871edcf97db9bd2356604984e623a08d825c3f27852290266b8" dependencies = [ "bytes", "futures", "futures-timer", - "getrandom 0.2.8", + "getrandom 0.2.9", "instant", "libp2p-core 0.38.0", "libp2p-dns", @@ -3921,7 +3989,7 @@ dependencies = [ "libsecp256k1", "log", "multiaddr 0.14.0", - "multihash", + "multihash 0.16.3", "multistream-select 0.11.0", "p256", "parking_lot 0.12.1", @@ -3955,7 +4023,7 @@ dependencies = [ "libsecp256k1", "log", "multiaddr 0.16.0", - "multihash", + "multihash 0.16.3", "multistream-select 0.12.1", "once_cell", "p256", @@ -3974,6 +4042,34 @@ dependencies = [ "zeroize", ] +[[package]] +name = "libp2p-core" +version = "0.39.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c1df63c0b582aa434fb09b2d86897fa2b419ffeccf934b36f87fcedc8e835c2" +dependencies = [ + "either", + "fnv", + "futures", + "futures-timer", + "instant", + "libp2p-identity", + "log", + "multiaddr 0.17.1", + "multihash 0.17.0", + "multistream-select 0.12.1", + "once_cell", + "parking_lot 0.12.1", + "pin-project", + "quick-protobuf", + "rand 0.8.5", + "rw-stream-sink", + "smallvec", + "thiserror", + "unsigned-varint 0.7.1", + "void", +] + [[package]] name = "libp2p-dns" version = "0.38.0" @@ -4039,6 +4135,24 @@ dependencies = [ "void", ] +[[package]] +name = "libp2p-identity" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e2d584751cecb2aabaa56106be6be91338a60a0f4e420cf2af639204f596fc1" +dependencies = [ + "bs58", + "ed25519-dalek", + "log", + "multiaddr 0.17.1", + "multihash 0.17.0", + "quick-protobuf", + "rand 0.8.5", + "sha2 0.10.6", + "thiserror", + "zeroize", +] + [[package]] name = "libp2p-mdns" version = "0.42.0" @@ -4053,7 +4167,7 @@ dependencies = [ "log", "rand 0.8.5", "smallvec", - "socket2", + "socket2 0.4.9", "tokio", "trust-dns-proto", "void", @@ -4181,7 +4295,7 @@ checksum = "9d527d5827582abd44a6d80c07ff8b50b4ee238a8979e05998474179e79dc400" dependencies = [ "heck", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -4196,19 +4310,20 @@ dependencies = [ "libc", "libp2p-core 0.38.0", "log", - "socket2", + "socket2 0.4.9", "tokio", ] [[package]] name = "libp2p-tls" -version = "0.1.0-alpha" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7905ce0d040576634e8a3229a7587cc8beab83f79db6023800f1792895defa8" +checksum = "ff08d13d0dc66e5e9ba6279c1de417b84fa0d0adc3b03e5732928c180ec02781" dependencies = [ "futures", "futures-rustls", - "libp2p-core 0.38.0", + "libp2p-core 0.39.2", + "libp2p-identity", "rcgen 0.10.0", "ring", "rustls 0.20.8", @@ -4234,7 +4349,7 @@ dependencies = [ "libp2p-core 0.38.0", "libp2p-noise", "log", - "multihash", + "multihash 0.16.3", "prost", "prost-build", "prost-codec", @@ -4245,7 +4360,7 @@ dependencies = [ "thiserror", "tinytemplate", "tokio", - "tokio-util 0.7.7", + "tokio-util 0.7.8", "webrtc", ] @@ -4343,9 +4458,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.8" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9702761c3935f8cc2f101793272e202c72b99da8f4224a19ddcf1279a6450bbf" +checksum = "56ee889ecc9568871456d42f603d6a0ce59ff328d291063a45cbdf0036baf6db" dependencies = [ "cc", "pkg-config", @@ -4354,7 +4469,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "3.5.1" +version = "4.1.0" dependencies = [ "account_manager", "account_utils", @@ -4368,8 +4483,8 @@ dependencies = [ "env_logger 0.9.3", "environment", "eth1", - "eth2_hashing", "eth2_network_config", + "ethereum_hashing", "futures", "lazy_static", "lighthouse_metrics", @@ -4409,9 +4524,8 @@ dependencies = [ "dirs", "discv5", "error-chain", - "eth2_ssz", - "eth2_ssz_derive", - "eth2_ssz_types", + "ethereum_ssz", + "ethereum_ssz_derive", "exit-future", "fnv", "futures", @@ -4436,6 +4550,7 @@ dependencies = [ "slog-term", "smallvec", "snap", + "ssz_types", "strum", "superstruct 0.5.0", "task_executor", @@ -4478,9 +4593,9 @@ checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "linux-raw-sys" -version = "0.1.4" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" +checksum = "b64f40e5e03e0d54f03845c8197d0291253cdbedfb1cb46b13c2c117554a9f4c" [[package]] name = "lmdb-rkv" @@ -4673,9 +4788,9 @@ dependencies = [ [[package]] name = "memoffset" -version = "0.7.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4" +checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1" dependencies = [ "autocfg 1.1.0", ] @@ -4684,8 +4799,8 @@ dependencies = [ name = "merkle_proof" version = "0.2.0" dependencies = [ - "eth2_hashing", "ethereum-types 0.14.1", + "ethereum_hashing", "lazy_static", "quickcheck", "quickcheck_macros", @@ -4712,7 +4827,7 @@ dependencies = [ "proc-macro2", "quote", "smallvec", - "syn", + "syn 1.0.109", ] [[package]] @@ -4732,6 +4847,27 @@ dependencies = [ "tracing", ] +[[package]] +name = "migrations_internals" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c493c09323068c01e54c685f7da41a9ccf9219735c3766fbfd6099806ea08fbc" +dependencies = [ + "serde", + "toml", +] + +[[package]] +name = "migrations_macros" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a8ff27a350511de30cdabb77147501c36ef02e0451d957abea2f30caffb2b58" +dependencies = [ + "migrations_internals", + "proc-macro2", + "quote", +] + [[package]] name = "milagro_bls" version = "1.4.2" @@ -4746,9 +4882,9 @@ dependencies = [ [[package]] name = "mime" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "mime_guess" @@ -4775,6 +4911,15 @@ dependencies = [ "adler", ] +[[package]] +name = "miniz_oxide" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" +dependencies = [ + "adler", +] + [[package]] name = "mio" version = "0.8.6" @@ -4823,7 +4968,7 @@ dependencies = [ "bs58", "byteorder", "data-encoding", - "multihash", + "multihash 0.16.3", "percent-encoding", "serde", "static_assertions", @@ -4841,7 +4986,26 @@ dependencies = [ "byteorder", "data-encoding", "multibase", - "multihash", + "multihash 0.16.3", + "percent-encoding", + "serde", + "static_assertions", + "unsigned-varint 0.7.1", + "url", +] + +[[package]] +name = "multiaddr" +version = "0.17.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b36f567c7099511fa8612bbbb52dda2419ce0bdbacf31714e3a5ffdb766d3bd" +dependencies = [ + "arrayref", + "byteorder", + "data-encoding", + "log", + "multibase", + "multihash 0.17.0", "percent-encoding", "serde", "static_assertions", @@ -4873,6 +5037,17 @@ dependencies = [ "unsigned-varint 0.7.1", ] +[[package]] +name = "multihash" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "835d6ff01d610179fbce3de1694d007e500bf33a7f29689838941d6bf783ae40" +dependencies = [ + "core2", + "multihash-derive", + "unsigned-varint 0.7.1", +] + [[package]] name = "multihash-derive" version = "0.8.1" @@ -4883,7 +5058,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn", + "syn 1.0.109", "synstructure", ] @@ -5012,9 +5187,9 @@ dependencies = [ [[package]] name = "netlink-sys" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "260e21fbb6f3d253a14df90eb0000a6066780a15dd901a7519ce02d77a94985b" +checksum = "6471bf08e7ac0135876a9581bf3217ef0333c191c128d34878079f42ee150411" dependencies = [ "bytes", "futures", @@ -5032,9 +5207,8 @@ dependencies = [ "derivative", "environment", "error-chain", - "eth2_ssz", - "eth2_ssz_types", "ethereum-types 0.14.1", + "ethereum_ssz", "execution_layer", "exit-future", "fnv", @@ -5060,6 +5234,7 @@ dependencies = [ "sloggers", "slot_clock", "smallvec", + "ssz_types", "store", "strum", "task_executor", @@ -5145,9 +5320,9 @@ dependencies = [ [[package]] name = "ntapi" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc51db7b362b205941f71232e56c625156eb9a929f8cf74a428fd5bc094a4afc" +checksum = "e8a3895c6391c39d7fe7ebc444a87eb2991b2a0bc718fdabd071eec617fc68e4" dependencies = [ "winapi", ] @@ -5265,7 +5440,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9bedf36ffb6ba96c2eb7144ef6270557b52e54b20c0a8e1eb2ff99a6c6959bff" dependencies = [ - "asn1-rs 0.5.1", + "asn1-rs 0.5.2", ] [[package]] @@ -5315,14 +5490,14 @@ dependencies = [ "bytes", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "openssl" -version = "0.10.45" +version = "0.10.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b102428fd03bc5edf97f62620f7298614c45cedf287c271e7ed450bbaf83f2e1" +checksum = "01b8574602df80f7b85fdfc5392fa884a4e3b3f4f35402c070ab34c3d3f78d56" dependencies = [ "bitflags", "cfg-if", @@ -5335,13 +5510,13 @@ dependencies = [ [[package]] name = "openssl-macros" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] @@ -5352,20 +5527,19 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "111.25.1+1.1.1t" +version = "111.25.3+1.1.1t" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ef9a9cc6ea7d9d5e7c4a913dc4b48d0e359eddf01af1dfec96ba7064b4aba10" +checksum = "924757a6a226bf60da5f7dd0311a34d2b52283dd82ddeb103208ddc66362f80c" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.80" +version = "0.9.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23bbbf7854cd45b83958ebe919f0e8e516793727652e27fda10a8384cfc790b7" +checksum = "8e17f59264b2809d77ae94f0e1ebabc434773f370d6ca667bd223ea10e06cc7e" dependencies = [ - "autocfg 1.1.0", "cc", "libc", "openssl-src", @@ -5380,8 +5554,8 @@ dependencies = [ "beacon_chain", "bitvec 1.0.1", "derivative", - "eth2_ssz", - "eth2_ssz_derive", + "ethereum_ssz", + "ethereum_ssz_derive", "itertools", "lazy_static", "lighthouse_metrics", @@ -5472,7 +5646,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -5484,14 +5658,14 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "parking" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72" +checksum = "14f2252c834a40ed9bb5422029649578e63aa341ac401f74e719dd1afda8394e" [[package]] name = "parking_lot" @@ -5523,7 +5697,7 @@ dependencies = [ "cfg-if", "instant", "libc", - "redox_syscall", + "redox_syscall 0.2.16", "smallvec", "winapi", ] @@ -5536,16 +5710,16 @@ checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521" dependencies = [ "cfg-if", "libc", - "redox_syscall", + "redox_syscall 0.2.16", "smallvec", "windows-sys 0.45.0", ] [[package]] name = "paste" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d01a5bd0424d00070b0098dd17ebca6f961a959dead1dbcbbbc1d1cd8d3deeba" +checksum = "9f746c4065a8fa3fe23974dd82f15431cc8d40779821001404d10d2e79ca7d79" [[package]] name = "pbkdf2" @@ -5595,16 +5769,6 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" -[[package]] -name = "pest" -version = "2.5.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "028accff104c4e513bad663bbcd2ad7cfd5304144404c31ed0a77ac103d00660" -dependencies = [ - "thiserror", - "ucd-trie", -] - [[package]] name = "petgraph" version = "0.6.3" @@ -5625,6 +5789,24 @@ dependencies = [ "rustc_version 0.4.0", ] +[[package]] +name = "phf" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "928c6535de93548188ef63bb7c4036bd415cd8f36ad25af44b9789b2ee72a48c" +dependencies = [ + "phf_shared", +] + +[[package]] +name = "phf_shared" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1fb5f6f826b772a8d4c0394209441e7d37cbbb967ae9c7e0e8134365c9ee676" +dependencies = [ + "siphasher", +] + [[package]] name = "pin-project" version = "1.0.12" @@ -5642,7 +5824,7 @@ checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -5721,16 +5903,18 @@ dependencies = [ [[package]] name = "polling" -version = "2.5.2" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22122d5ec4f9fe1b3916419b76be1e80bcb93f618d071d2edf841b137b2a2bd6" +checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" dependencies = [ "autocfg 1.1.0", + "bitflags", "cfg-if", + "concurrent-queue", "libc", "log", - "wepoll-ffi", - "windows-sys 0.42.0", + "pin-project-lite 0.2.9", + "windows-sys 0.48.0", ] [[package]] @@ -5741,30 +5925,60 @@ checksum = "048aeb476be11a4b6ca432ca569e375810de9294ae78f4774e78ea98a9246ede" dependencies = [ "cpufeatures", "opaque-debug", - "universal-hash", + "universal-hash 0.4.1", ] [[package]] name = "polyval" -version = "0.4.5" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eebcc4aa140b9abd2bc40d9c3f7ccec842679cd79045ac3a7ac698c1a064b7cd" +checksum = "8419d2b623c7c0896ff2d5d96e2cb4ede590fed28fcc34934f4c33c036e620a1" dependencies = [ - "cpuid-bool", + "cfg-if", + "cpufeatures", "opaque-debug", - "universal-hash", + "universal-hash 0.4.1", ] [[package]] name = "polyval" -version = "0.5.3" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8419d2b623c7c0896ff2d5d96e2cb4ede590fed28fcc34934f4c33c036e620a1" +checksum = "7ef234e08c11dfcb2e56f79fd70f6f2eb7f025c0ce2333e82f4f0518ecad30c6" dependencies = [ "cfg-if", "cpufeatures", "opaque-debug", - "universal-hash", + "universal-hash 0.5.0", +] + +[[package]] +name = "postgres-protocol" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78b7fa9f396f51dffd61546fd8573ee20592287996568e6175ceb0f8699ad75d" +dependencies = [ + "base64 0.21.0", + "byteorder", + "bytes", + "fallible-iterator", + "hmac 0.12.1", + "md-5", + "memchr", + "rand 0.8.5", + "sha2 0.10.6", + "stringprep", +] + +[[package]] +name = "postgres-types" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f028f05971fe20f512bcc679e2c10227e57809a3af86a7606304435bc8896cd6" +dependencies = [ + "bytes", + "fallible-iterator", + "postgres-protocol", ] [[package]] @@ -5773,14 +5987,23 @@ version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +[[package]] +name = "pq-sys" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31c0052426df997c0cbd30789eb44ca097e3541717a7b8fa36b1c464ee7edebd" +dependencies = [ + "vcpkg", +] + [[package]] name = "prettyplease" -version = "0.1.23" +version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e97e3215779627f01ee256d2fad52f3d95e8e1c11e9fc6fd08f7cd455d5d5c78" +checksum = "6c8646e95016a7a6c4adea95bafa8a16baab64b583356217f2c85db4a39d9a86" dependencies = [ "proc-macro2", - "syn", + "syn 1.0.109", ] [[package]] @@ -5829,7 +6052,7 @@ dependencies = [ "proc-macro-error-attr", "proc-macro2", "quote", - "syn", + "syn 1.0.109", "version_check", ] @@ -5852,9 +6075,9 @@ checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" [[package]] name = "proc-macro2" -version = "1.0.51" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d727cae5b39d21da60fa540906919ad737832fe0b1c165da3a34d6548c849d6" +checksum = "2b63bdb0cd06f1f4dedf69b254734f9b45af66e4a031e42a7480257d9898b435" dependencies = [ "unicode-ident", ] @@ -5906,14 +6129,14 @@ checksum = "66a455fbcb954c1a7decf3c586e860fd7889cddf4b8e164be736dbac95a953cd" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "prost" -version = "0.11.6" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21dc42e00223fc37204bd4aa177e69420c604ca4a183209a8f9de30c6d934698" +checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd" dependencies = [ "bytes", "prost-derive", @@ -5921,9 +6144,9 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.11.6" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3f8ad728fb08fe212df3c05169e940fbb6d9d16a877ddde14644a983ba2012e" +checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270" dependencies = [ "bytes", "heck", @@ -5936,7 +6159,7 @@ dependencies = [ "prost", "prost-types", "regex", - "syn", + "syn 1.0.109", "tempfile", "which", ] @@ -5956,24 +6179,23 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.11.6" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bda8c0881ea9f722eb9629376db3d0b903b462477c1aafcb0566610ac28ac5d" +checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", "itertools", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "prost-types" -version = "0.11.6" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5e0526209433e96d83d750dd81a99118edbc55739e7e61a46764fd2ad537788" +checksum = "213622a1460818959ac1181aaeb2dc9c7f63df720db7d788b3e24eacd1983e13" dependencies = [ - "bytes", "prost", ] @@ -5981,8 +6203,8 @@ dependencies = [ name = "proto_array" version = "0.2.0" dependencies = [ - "eth2_ssz", - "eth2_ssz_derive", + "ethereum_ssz", + "ethereum_ssz_derive", "safe_arith", "serde", "serde_derive", @@ -6021,6 +6243,15 @@ version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" +[[package]] +name = "quick-protobuf" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d6da84cc204722a989e01ba2f6e1e276e190f22263d0cb6ce8526fcdb0d2e1f" +dependencies = [ + "byteorder", +] + [[package]] name = "quickcheck" version = "0.9.2" @@ -6041,7 +6272,7 @@ checksum = "608c156fd8e97febc07dc9c2e2c80bf74cfc6ef26893eae3daf8bc2bc94a4b7f" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -6057,9 +6288,9 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.9.2" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72ef4ced82a24bb281af338b9e8f94429b6eca01b4e66d899f40031f074e74c9" +checksum = "67c10f662eee9c94ddd7135043e544f3c82fa839a1e7b865911331961b53186c" dependencies = [ "bytes", "rand 0.8.5", @@ -6075,9 +6306,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.23" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b" +checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc" dependencies = [ "proc-macro2", ] @@ -6174,7 +6405,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.9", ] [[package]] @@ -6197,9 +6428,9 @@ dependencies = [ [[package]] name = "rayon" -version = "1.6.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db3a213adf02b3bcfd2d3846bb41cb22857d131789e01df434fb7e7bc0759b7" +checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b" dependencies = [ "either", "rayon-core", @@ -6207,9 +6438,9 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.10.2" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "356a0625f1954f730c0201cdab48611198dc6ce21f4acff55089b5a78e6e835b" +checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" dependencies = [ "crossbeam-channel", "crossbeam-deque", @@ -6225,7 +6456,7 @@ checksum = "6413f3de1edee53342e6138e75b56d32e7bc6e332b3bd62d497b1929d4cfbcdd" dependencies = [ "pem", "ring", - "time 0.3.17", + "time 0.3.20", "x509-parser 0.13.2", "yasna", ] @@ -6238,7 +6469,7 @@ checksum = "ffbe84efe2f38dea12e9bfc1f65377fdf03e53a18cb3b995faedf7934c7e785b" dependencies = [ "pem", "ring", - "time 0.3.17", + "time 0.3.20", "yasna", ] @@ -6251,26 +6482,35 @@ dependencies = [ "bitflags", ] +[[package]] +name = "redox_syscall" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" +dependencies = [ + "bitflags", +] + [[package]] name = "redox_users" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.8", - "redox_syscall", + "getrandom 0.2.9", + "redox_syscall 0.2.16", "thiserror", ] [[package]] name = "regex" -version = "1.7.1" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48aaa5748ba571fb95cd2c85c09f629215d3a6ece942baa100950af03a34f733" +checksum = "af83e617f331cc6ae2da5443c602dfa5af81e517212d9d611a5b3ba1777b5370" dependencies = [ "aho-corasick", "memchr", - "regex-syntax", + "regex-syntax 0.7.1", ] [[package]] @@ -6279,20 +6519,26 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" dependencies = [ - "regex-syntax", + "regex-syntax 0.6.29", ] [[package]] name = "regex-syntax" -version = "0.6.28" +version = "0.6.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" + +[[package]] +name = "regex-syntax" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" +checksum = "a5996294f19bd3aae0453a862ad728f60e6600695733dd5df01da90c54363a3c" [[package]] name = "reqwest" -version = "0.11.14" +version = "0.11.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21eed90ec8570952d53b772ecf8f206aa1ec9a3d76b2521c56c42973f2d91ee9" +checksum = "13293b639a097af28fc8a90f22add145a9c954e49d77da06263d58cf44d5fb91" dependencies = [ "base64 0.21.0", "bytes", @@ -6321,7 +6567,7 @@ dependencies = [ "tokio", "tokio-native-tls", "tokio-rustls 0.23.4", - "tokio-util 0.7.7", + "tokio-util 0.7.8", "tower-service", "url", "wasm-bindgen", @@ -6392,7 +6638,7 @@ checksum = "e33d7b2abe0c340d8797fe2907d3f20d3b5ea5908683618bfe80df7f621f672a" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -6461,9 +6707,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.21" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" +checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" [[package]] name = "rustc-hash" @@ -6486,22 +6732,13 @@ dependencies = [ "semver 0.9.0", ] -[[package]] -name = "rustc_version" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee" -dependencies = [ - "semver 0.11.0", -] - [[package]] name = "rustc_version" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.16", + "semver 1.0.17", ] [[package]] @@ -6515,16 +6752,16 @@ dependencies = [ [[package]] name = "rustix" -version = "0.36.9" +version = "0.37.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd5c6ff11fecd55b40746d1995a02f2eb375bf8c00d192d521ee09f42bef37bc" +checksum = "8bbfc1d1c7c40c01715f47d71444744a81669ca84e8b63e25a55e169b1f86433" dependencies = [ "bitflags", "errno", "io-lifetimes", "libc", "linux-raw-sys", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -6563,9 +6800,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5583e89e108996506031660fe09baa5011b9dd0341b89029313006d1fb508d70" +checksum = "4f3208ce4d8448b3f3e7d168a73f5e0c43a61e32930de3bceeccedb388b6bf06" [[package]] name = "rw-stream-sink" @@ -6580,9 +6817,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b4b9743ed687d4b4bcedf9ff5eaa7398495ae14e61cba0a295704edbc7decde" +checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" [[package]] name = "safe_arith" @@ -6614,9 +6851,9 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.3.1" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "001cf62ece89779fd16105b5f515ad0e5cedcd5440d3dd806bb067978e7c3608" +checksum = "dfdef77228a4c05dc94211441595746732131ad7f6530c6c18f045da7b7ab937" dependencies = [ "cfg-if", "derive_more", @@ -6626,14 +6863,14 @@ dependencies = [ [[package]] name = "scale-info-derive" -version = "2.3.1" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "303959cf613a6f6efd19ed4b4ad5bf79966a13352716299ad532cfb115f4205c" +checksum = "53012eae69e5aa5c14671942a5dd47de59d4cdcff8532a6dd0e081faf1119482" dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -6647,9 +6884,9 @@ dependencies = [ [[package]] name = "scheduled-thread-pool" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "977a7519bff143a44f842fd07e80ad1329295bd71686457f18e496736f4bf9bf" +checksum = "3cbc66816425a074528352f5789333ecff06ca41b36b0b0efdfbb29edc391a19" dependencies = [ "parking_lot 0.12.1", ] @@ -6668,9 +6905,9 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "scratch" -version = "1.0.3" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddccb15bcce173023b3fedd9436f882a0739b8dfb45e4f6b6002bee5929f61b2" +checksum = "1792db035ce95be60c3f8853017b3999209281c24e2ba5bc8e59bf97a0c590c1" [[package]] name = "scrypt" @@ -6773,27 +7010,18 @@ dependencies = [ [[package]] name = "semver" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" -dependencies = [ - "semver-parser 0.7.0", -] - -[[package]] -name = "semver" -version = "0.11.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" +checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" dependencies = [ - "semver-parser 0.10.2", + "semver-parser", ] [[package]] name = "semver" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58bc9567378fc7690d6b2addae4e60ac2eeea07becb2c64b9f218b53865cba2a" +checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed" [[package]] name = "semver-parser" @@ -6801,15 +7029,6 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" -[[package]] -name = "semver-parser" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7" -dependencies = [ - "pest", -] - [[package]] name = "send_wrapper" version = "0.6.0" @@ -6826,9 +7045,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.154" +version = "1.0.160" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cdd151213925e7f1ab45a9bbfb129316bd00799784b174b7cc7bcd16961c49e" +checksum = "bb2f3770c8bce3bcda7e149193a069a0f4365bda1fa5cd88e03bca26afc1216c" dependencies = [ "serde_derive", ] @@ -6855,20 +7074,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.154" +version = "1.0.160" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fc80d722935453bcafdc2c9a73cd6fac4dc1938f0346035d84bf99fa9e33217" +checksum = "291a097c63d8497e00160b166a967a4a79c64f3facdd01cbd7502231688d77df" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] name = "serde_json" -version = "1.0.94" +version = "1.0.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c533a59c9d8a93a09c6ab31f0fd5e5f4dd1b8fc9434804029839884765d04ea" +checksum = "057d394a50403bcac12672b2b18fb387ab6d289d957dab67dd201875391e52f1" dependencies = [ "itoa", "ryu", @@ -6877,13 +7096,13 @@ dependencies = [ [[package]] name = "serde_repr" -version = "0.1.10" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a5ec9fa74a20ebbe5d9ac23dac1fc96ba0ecfe9f50f2843b52e537b10fbcb4e" +checksum = "bcec881020c684085e55a25f7fd888954d56609ef363479dc5a1305eb0d40cab" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] @@ -6917,7 +7136,7 @@ dependencies = [ "darling 0.13.4", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -7005,9 +7224,9 @@ dependencies = [ [[package]] name = "sha3" -version = "0.10.6" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdf0c33fae925bdc080598b84bc15c55e7b9a4a43b3c704da051f977469691c9" +checksum = "54c2bb1a323307527314a36bfb73f24febb08ce2b8a554bf4ffd6f51ad15198c" dependencies = [ "digest 0.10.6", "keccak", @@ -7056,7 +7275,7 @@ dependencies = [ "num-bigint", "num-traits", "thiserror", - "time 0.3.17", + "time 0.3.20", ] [[package]] @@ -7077,11 +7296,17 @@ dependencies = [ "types", ] +[[package]] +name = "siphasher" +version = "0.3.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7bd3e3206899af3f8b12af284fafc038cc1dc2b41d1b89dd17297221c5d225de" + [[package]] name = "slab" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef" +checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" dependencies = [ "autocfg 1.1.0", ] @@ -7092,8 +7317,8 @@ version = "0.1.0" dependencies = [ "bincode", "byteorder", - "eth2_ssz", - "eth2_ssz_derive", + "ethereum_ssz", + "ethereum_ssz_derive", "filesystem", "flate2", "lazy_static", @@ -7141,7 +7366,7 @@ name = "slashing_protection" version = "0.1.0" dependencies = [ "arbitrary", - "eth2_serde_utils", + "ethereum_serde_utils", "filesystem", "lazy_static", "r2d2", @@ -7182,7 +7407,7 @@ dependencies = [ "serde", "serde_json", "slog", - "time 0.3.17", + "time 0.3.20", ] [[package]] @@ -7227,7 +7452,7 @@ dependencies = [ "slog", "term", "thread_local", - "time 0.3.17", + "time 0.3.20", ] [[package]] @@ -7278,14 +7503,14 @@ checksum = "5e9f0ab6ef7eb7353d9119c170a436d1bf248eea575ac42d19d12f4e34130831" [[package]] name = "snow" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12ba5f4d4ff12bdb6a169ed51b7c48c0e0ac4b0b4b31012b2571e97d78d3201d" +checksum = "5ccba027ba85743e09d15c03296797cad56395089b832b48b5a5217880f57733" dependencies = [ "aes-gcm 0.9.4", "blake2", "chacha20poly1305", - "curve25519-dalek 4.0.0-rc.0", + "curve25519-dalek 4.0.0-rc.1", "rand_core 0.6.4", "ring", "rustc_version 0.4.0", @@ -7295,14 +7520,24 @@ dependencies = [ [[package]] name = "socket2" -version = "0.4.7" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd" +checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" dependencies = [ "libc", "winapi", ] +[[package]] +name = "socket2" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d283f86695ae989d1e18440a943880967156325ba025f05049946bff47bcc2b" +dependencies = [ + "libc", + "windows-sys 0.48.0", +] + [[package]] name = "soketto" version = "0.7.1" @@ -7356,7 +7591,25 @@ source = "git+https://github.com/ralexstokes//ssz-rs?rev=adf1a0b14cef90b9536f28e dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", +] + +[[package]] +name = "ssz_types" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8052a1004e979c0be24b9e55940195553103cc57d0b34f7e2c4e32793325e402" +dependencies = [ + "arbitrary", + "derivative", + "ethereum_serde_utils", + "ethereum_ssz", + "itertools", + "serde", + "serde_derive", + "smallvec", + "tree_hash", + "typenum", ] [[package]] @@ -7368,10 +7621,9 @@ dependencies = [ "bls", "derivative", "env_logger 0.9.3", - "eth2_hashing", - "eth2_ssz", - "eth2_ssz_derive", - "eth2_ssz_types", + "ethereum_hashing", + "ethereum_ssz", + "ethereum_ssz_derive", "int_to_bytes", "integer-sqrt", "itertools", @@ -7381,6 +7633,7 @@ dependencies = [ "rayon", "safe_arith", "smallvec", + "ssz_types", "tokio", "tree_hash", "types", @@ -7391,7 +7644,7 @@ name = "state_transition_vectors" version = "0.1.0" dependencies = [ "beacon_chain", - "eth2_ssz", + "ethereum_ssz", "lazy_static", "state_processing", "tokio", @@ -7411,8 +7664,8 @@ dependencies = [ "beacon_chain", "db-key", "directory", - "eth2_ssz", - "eth2_ssz_derive", + "ethereum_ssz", + "ethereum_ssz_derive", "itertools", "lazy_static", "leveldb", @@ -7429,6 +7682,16 @@ dependencies = [ "types", ] +[[package]] +name = "stringprep" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ee348cb74b87454fff4b551cbf727025810a004f88aeacae7f85b87f4e9a1c1" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + [[package]] name = "strsim" version = "0.8.0" @@ -7460,7 +7723,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn", + "syn 1.0.109", ] [[package]] @@ -7508,7 +7771,7 @@ dependencies = [ "proc-macro2", "quote", "smallvec", - "syn", + "syn 1.0.109", ] [[package]] @@ -7522,7 +7785,7 @@ dependencies = [ "proc-macro2", "quote", "smallvec", - "syn", + "syn 1.0.109", ] [[package]] @@ -7530,15 +7793,26 @@ name = "swap_or_not_shuffle" version = "0.2.0" dependencies = [ "criterion", - "eth2_hashing", "ethereum-types 0.14.1", + "ethereum_hashing", +] + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", ] [[package]] name = "syn" -version = "1.0.107" +version = "2.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f4064b5b16e03ae50984a5a8ed5d4f8803e6bc1fd170a3cda91a1be4b18e3f5" +checksum = "a34fcf3e8b60f57e6a14301a2e916d323af98b0ea63c599441eec8558660c822" dependencies = [ "proc-macro2", "quote", @@ -7559,7 +7833,7 @@ checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", "unicode-xid", ] @@ -7652,15 +7926,15 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.4.0" +version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af18f7ae1acd354b992402e9ec5864359d693cd8a79dcbef59f76891701c1e95" +checksum = "b9fbec84f381d5795b08656e4912bec604d162bff9291d6189a78f4c8ab87998" dependencies = [ "cfg-if", "fastrand", - "redox_syscall", + "redox_syscall 0.3.5", "rustix", - "windows-sys 0.42.0", + "windows-sys 0.45.0", ] [[package]] @@ -7696,7 +7970,24 @@ name = "test_random_derive" version = "0.2.0" dependencies = [ "quote", - "syn", + "syn 1.0.109", +] + +[[package]] +name = "testcontainers" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e2b1567ca8a2b819ea7b28c92be35d9f76fb9edb214321dcc86eb96023d1f87" +dependencies = [ + "bollard-stubs", + "futures", + "hex", + "hmac 0.12.1", + "log", + "rand 0.8.5", + "serde", + "serde_json", + "sha2 0.10.6", ] [[package]] @@ -7710,22 +8001,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.38" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a9cd18aa97d5c45c6603caea1da6628790b37f7a34b6ca89522331c5180fed0" +checksum = "978c9a314bd8dc99be594bc3c175faaa9794be04a5a5e153caba6915336cebac" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.38" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fb327af4685e4d03fa8cbcf1716380da910eeb2bb8be417e7f9fd3fb164f36f" +checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] @@ -7760,9 +8051,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.17" +version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a561bf4617eebd33bca6434b988f39ed798e527f51a1e797d0ee4f61c0a38376" +checksum = "cd0cbfecb4d19b5ea75bb31ad904eb5b9fa13f21079c3b92017ebdf4999a5890" dependencies = [ "itoa", "libc", @@ -7780,9 +8071,9 @@ checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd" [[package]] name = "time-macros" -version = "0.2.6" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d967f99f534ca7e495c575c62638eebc2898a8c84c119b89e250477bc4ba16b2" +checksum = "fd80a657e71da814b8e5d60d3374fc6d35045062245d80224748ae522dd76f36" dependencies = [ "time-core", ] @@ -7853,22 +8144,21 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.26.0" +version = "1.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03201d01c3c27a29c8a5cee5b55a93ddae1ccf6f08f65365c2c918f8c1b76f64" +checksum = "c3c786bf8134e5a3a166db9b29ab8f48134739014a3eca7bc6bfa95d673b136f" dependencies = [ "autocfg 1.1.0", "bytes", "libc", - "memchr", "mio", "num_cpus", "parking_lot 0.12.1", "pin-project-lite 0.2.9", "signal-hook-registry", - "socket2", + "socket2 0.4.9", "tokio-macros", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -7883,13 +8173,13 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "1.8.2" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d266c00fde287f55d3f1c3e96c500c362a2b8c695076ec180f27918820bc6df8" +checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] @@ -7902,6 +8192,30 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-postgres" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e89f6234aa8fd43779746012fcf53603cdb91fdd8399aa0de868c2d56b6dde1" +dependencies = [ + "async-trait", + "byteorder", + "bytes", + "fallible-iterator", + "futures-channel", + "futures-util", + "log", + "parking_lot 0.12.1", + "percent-encoding", + "phf", + "pin-project-lite 0.2.9", + "postgres-protocol", + "postgres-types", + "socket2 0.5.2", + "tokio", + "tokio-util 0.7.8", +] + [[package]] name = "tokio-rustls" version = "0.22.0" @@ -7926,14 +8240,14 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.11" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d660770404473ccd7bc9f8b28494a811bc18542b915c0855c51e8f419d5223ce" +checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" dependencies = [ "futures-core", "pin-project-lite 0.2.9", "tokio", - "tokio-util 0.7.7", + "tokio-util 0.7.8", ] [[package]] @@ -7983,15 +8297,16 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.7" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5427d89453009325de0d8f342c9490009f76e999cb7672d77e46267448f7e6b2" +checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" dependencies = [ "bytes", "futures-core", "futures-io", "futures-sink", "pin-project-lite 0.2.9", + "slab", "tokio", "tracing", ] @@ -8067,13 +8382,13 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" +checksum = "0f57e3ca2a01450b1a921183a9c9cbfda207fd822cef4ccb00a65402cbba7a74" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] @@ -8109,9 +8424,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6176eae26dd70d0c919749377897b54a9276bd7061339665dd68777926b5a70" +checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77" dependencies = [ "matchers", "nu-ansi-term", @@ -8141,31 +8456,29 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ebeb235c5847e2f82cfe0f07eb971d1e5f6804b18dac2ae16349cc604380f82f" dependencies = [ "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "tree_hash" -version = "0.4.1" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca8488e272d45adc36db8f6c99d09613f58a7cd06c7b347546c87d9a29ca11e8" dependencies = [ - "beacon_chain", - "eth2_hashing", - "eth2_ssz", - "eth2_ssz_derive", "ethereum-types 0.14.1", - "rand 0.8.5", + "ethereum_hashing", "smallvec", - "tree_hash_derive", - "types", ] [[package]] name = "tree_hash_derive" -version = "0.4.0" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83baa26594d96889e5fef7638dfb0f41e16070301a5cf6da99b9a6a0804cec89" dependencies = [ "darling 0.13.4", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -8196,7 +8509,7 @@ dependencies = [ "lazy_static", "rand 0.8.5", "smallvec", - "socket2", + "socket2 0.4.9", "thiserror", "tinyvec", "tokio", @@ -8316,13 +8629,12 @@ dependencies = [ "compare_fields_derive", "criterion", "derivative", - "eth2_hashing", "eth2_interop_keypairs", - "eth2_serde_utils", - "eth2_ssz", - "eth2_ssz_derive", - "eth2_ssz_types", "ethereum-types 0.14.1", + "ethereum_hashing", + "ethereum_serde_utils", + "ethereum_ssz", + "ethereum_ssz_derive", "hex", "int_to_bytes", "itertools", @@ -8345,6 +8657,7 @@ dependencies = [ "serde_yaml", "slog", "smallvec", + "ssz_types", "state_processing", "superstruct 0.6.0", "swap_or_not_shuffle", @@ -8355,12 +8668,6 @@ dependencies = [ "tree_hash_derive", ] -[[package]] -name = "ucd-trie" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e79c4d996edb816c91e4308506774452e55e95c3c9de07b6729e17e15a5ef81" - [[package]] name = "uint" version = "0.9.5" @@ -8391,15 +8698,15 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.10" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54675592c1dbefd78cbd98db9bacd89886e1ca50692a0692baefffdeb92dd58" +checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" -version = "1.0.6" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84a22b9f218b40614adcb3f4ff08b703773ad44fa9423e4e0d346d5db86e4ebc" +checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" [[package]] name = "unicode-normalization" @@ -8432,6 +8739,16 @@ dependencies = [ "subtle", ] +[[package]] +name = "universal-hash" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d3160b73c9a19f7e2939a2fdad446c57c1bbbbf4d919d3213ff1267a580d8b5" +dependencies = [ + "crypto-common", + "subtle", +] + [[package]] name = "unsigned-varint" version = "0.6.0" @@ -8461,6 +8778,11 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "unused_port" version = "0.1.0" +dependencies = [ + "lazy_static", + "lru_cache", + "parking_lot 0.12.1", +] [[package]] name = "url" @@ -8485,17 +8807,17 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.9", "serde", ] [[package]] name = "uuid" -version = "1.3.0" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1674845326ee10d37ca60470760d4288a6f80f304007d92e5c53bab78c9cfd79" +checksum = "4dad5567ad0cf5b760e5665964bec1b47dfd077ba8a2544b513f3556d3d239a2" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.9", ] [[package]] @@ -8513,7 +8835,7 @@ dependencies = [ "environment", "eth2", "eth2_keystore", - "eth2_serde_utils", + "ethereum_serde_utils", "exit-future", "filesystem", "futures", @@ -8618,12 +8940,11 @@ checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" [[package]] name = "walkdir" -version = "2.3.2" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56" +checksum = "36df944cda56c7d8d8b7496af378e6b16de9284591917d307c9b4d313c44e698" dependencies = [ "same-file", - "winapi", "winapi-util", ] @@ -8724,7 +9045,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn", + "syn 1.0.109", "wasm-bindgen-shared", ] @@ -8758,7 +9079,7 @@ checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -8769,30 +9090,6 @@ version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0046fef7e28c3804e5e38bfa31ea2a0f73905319b677e57ebe37e49358989b5d" -[[package]] -name = "wasm-bindgen-test" -version = "0.3.34" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db36fc0f9fb209e88fb3642590ae0205bb5a56216dabd963ba15879fe53a30b" -dependencies = [ - "console_error_panic_hook", - "js-sys", - "scoped-tls", - "wasm-bindgen", - "wasm-bindgen-futures", - "wasm-bindgen-test-macro", -] - -[[package]] -name = "wasm-bindgen-test-macro" -version = "0.3.34" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0734759ae6b3b1717d661fe4f016efcfb9828f5edb4520c18eaee05af3b43be9" -dependencies = [ - "proc-macro2", - "quote", -] - [[package]] name = "wasm-streams" version = "0.2.3" @@ -8821,6 +9118,39 @@ dependencies = [ "web-sys", ] +[[package]] +name = "watch" +version = "0.1.0" +dependencies = [ + "axum", + "beacon_chain", + "beacon_node", + "bls", + "byteorder", + "clap", + "diesel", + "diesel_migrations", + "env_logger 0.9.3", + "eth2", + "hex", + "http_api", + "hyper", + "log", + "network", + "r2d2", + "rand 0.7.3", + "reqwest", + "serde", + "serde_json", + "serde_yaml", + "testcontainers", + "tokio", + "tokio-postgres", + "types", + "unused_port", + "url", +] + [[package]] name = "web-sys" version = "0.3.61" @@ -8888,6 +9218,8 @@ dependencies = [ "eth2_network_config", "exit-future", "futures", + "lazy_static", + "parking_lot 0.12.1", "reqwest", "serde", "serde_derive", @@ -8958,7 +9290,7 @@ dependencies = [ "sha2 0.10.6", "stun", "thiserror", - "time 0.3.17", + "time 0.3.20", "tokio", "turn", "url", @@ -8990,22 +9322,22 @@ dependencies = [ [[package]] name = "webrtc-dtls" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7021987ae0a2ed6c8cd33f68e98e49bb6e74ffe9543310267b48a1bbe3900e5f" +checksum = "942be5bd85f072c3128396f6e5a9bfb93ca8c1939ded735d177b7bcba9a13d05" dependencies = [ "aes 0.6.0", - "aes-gcm 0.8.0", + "aes-gcm 0.10.1", "async-trait", "bincode", "block-modes", "byteorder", "ccm", "curve25519-dalek 3.2.0", - "der-parser 8.1.0", + "der-parser 8.2.0", "elliptic-curve", "hkdf", - "hmac 0.10.1", + "hmac 0.12.1", "log", "oid-registry 0.6.1", "p256", @@ -9017,8 +9349,8 @@ dependencies = [ "rustls 0.19.1", "sec1", "serde", - "sha-1 0.9.8", - "sha2 0.9.9", + "sha1", + "sha2 0.10.6", "signature", "subtle", "thiserror", @@ -9047,7 +9379,7 @@ dependencies = [ "tokio", "turn", "url", - "uuid 1.3.0", + "uuid 1.3.2", "waitgroup", "webrtc-mdns", "webrtc-util", @@ -9060,7 +9392,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f08dfd7a6e3987e255c4dbe710dde5d94d0f0574f8a21afa95d171376c143106" dependencies = [ "log", - "socket2", + "socket2 0.4.9", "thiserror", "tokio", "webrtc-util", @@ -9068,18 +9400,15 @@ dependencies = [ [[package]] name = "webrtc-media" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee2a3c157a040324e5049bcbd644ffc9079e6738fa2cfab2bcff64e5cc4c00d7" +checksum = "f72e1650a8ae006017d1a5280efb49e2610c19ccc3c0905b03b648aee9554991" dependencies = [ "byteorder", "bytes", - "derive_builder", - "displaydoc", "rand 0.8.5", "rtp", "thiserror", - "webrtc-util", ] [[package]] @@ -9144,15 +9473,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "wepoll-ffi" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d743fdedc5c64377b5fc2bc036b01c7fd642205a0d96356034ae3404d49eb7fb" -dependencies = [ - "cc", -] - [[package]] name = "which" version = "4.4.0" @@ -9220,6 +9540,15 @@ dependencies = [ "windows_x86_64_msvc 0.34.0", ] +[[package]] +name = "windows" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" +dependencies = [ + "windows-targets 0.48.0", +] + [[package]] name = "windows-acl" version = "0.3.0" @@ -9238,13 +9567,13 @@ version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc 0.42.1", - "windows_i686_gnu 0.42.1", - "windows_i686_msvc 0.42.1", - "windows_x86_64_gnu 0.42.1", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc 0.42.1", + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", ] [[package]] @@ -9253,29 +9582,59 @@ version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" dependencies = [ - "windows-targets", + "windows-targets 0.42.2", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.0", ] [[package]] name = "windows-targets" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e2522491fbfcd58cc84d47aeb2958948c4b8982e9a2d8a2a35bbaed431390e7" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc 0.42.1", - "windows_i686_gnu 0.42.1", - "windows_i686_msvc 0.42.1", - "windows_x86_64_gnu 0.42.1", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc 0.42.1", + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", ] +[[package]] +name = "windows-targets" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" +dependencies = [ + "windows_aarch64_gnullvm 0.48.0", + "windows_aarch64_msvc 0.48.0", + "windows_i686_gnu 0.48.0", + "windows_i686_msvc 0.48.0", + "windows_x86_64_gnu 0.48.0", + "windows_x86_64_gnullvm 0.48.0", + "windows_x86_64_msvc 0.48.0", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" + [[package]] name = "windows_aarch64_gnullvm" -version = "0.42.1" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c9864e83243fdec7fc9c5444389dcbbfd258f745e7853198f365e3c4968a608" +checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" [[package]] name = "windows_aarch64_msvc" @@ -9285,9 +9644,15 @@ checksum = "17cffbe740121affb56fad0fc0e421804adf0ae00891205213b5cecd30db881d" [[package]] name = "windows_aarch64_msvc" -version = "0.42.1" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c8b1b673ffc16c47a9ff48570a9d85e25d265735c503681332589af6253c6c7" +checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" [[package]] name = "windows_i686_gnu" @@ -9297,9 +9662,15 @@ checksum = "2564fde759adb79129d9b4f54be42b32c89970c18ebf93124ca8870a498688ed" [[package]] name = "windows_i686_gnu" -version = "0.42.1" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3887528ad530ba7bdbb1faa8275ec7a1155a45ffa57c37993960277145d640" +checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" [[package]] name = "windows_i686_msvc" @@ -9309,9 +9680,15 @@ checksum = "9cd9d32ba70453522332c14d38814bceeb747d80b3958676007acadd7e166956" [[package]] name = "windows_i686_msvc" -version = "0.42.1" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf4d1122317eddd6ff351aa852118a2418ad4214e6613a50e0191f7004372605" +checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" [[package]] name = "windows_x86_64_gnu" @@ -9321,15 +9698,27 @@ checksum = "cfce6deae227ee8d356d19effc141a509cc503dfd1f850622ec4b0f84428e1f4" [[package]] name = "windows_x86_64_gnu" -version = "0.42.1" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1040f221285e17ebccbc2591ffdc2d44ee1f9186324dd3e84e99ac68d699c45" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" [[package]] name = "windows_x86_64_gnullvm" -version = "0.42.1" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "628bfdf232daa22b0d64fdb62b09fcc36bb01f05a3939e20ab73aaf9470d0463" +checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" [[package]] name = "windows_x86_64_msvc" @@ -9339,9 +9728,15 @@ checksum = "d19538ccc21819d01deaf88d6a17eae6596a12e9aafdbb97916fb49896d89de9" [[package]] name = "windows_x86_64_msvc" -version = "0.42.1" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "447660ad36a13288b1db4d4248e857b510e8c3a225c822ba4fb748c0aafecffd" +checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" [[package]] name = "winreg" @@ -9424,7 +9819,7 @@ dependencies = [ "ring", "rusticata-macros", "thiserror", - "time 0.3.17", + "time 0.3.20", ] [[package]] @@ -9433,16 +9828,16 @@ version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e0ecbeb7b67ce215e40e3cc7f2ff902f94a223acf44995934763467e7b1febc8" dependencies = [ - "asn1-rs 0.5.1", + "asn1-rs 0.5.2", "base64 0.13.1", "data-encoding", - "der-parser 8.1.0", + "der-parser 8.2.0", "lazy_static", "nom 7.1.3", "oid-registry 0.6.1", "rusticata-macros", "thiserror", - "time 0.3.17", + "time 0.3.20", ] [[package]] @@ -9485,32 +9880,31 @@ dependencies = [ [[package]] name = "yasna" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aed2e7a52e3744ab4d0c05c20aa065258e84c49fd4226f5191b2ed29712710b4" +checksum = "e17bb3549cc1321ae1296b9cdc2698e2b6cb1992adfa19a8c72e5b7a738f44cd" dependencies = [ - "time 0.3.17", + "time 0.3.20", ] [[package]] name = "zeroize" -version = "1.5.7" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" +checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" dependencies = [ "zeroize_derive", ] [[package]] name = "zeroize_derive" -version = "1.3.3" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44bf07cb3e50ea2003396695d58bf46bc9887a1f362260446fad6bc4e79bd36c" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn", - "synstructure", + "syn 2.0.15", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 46852645eba..bbe77d2096a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -53,17 +53,10 @@ members = [ "consensus/fork_choice", "consensus/proto_array", "consensus/safe_arith", - "consensus/ssz", - "consensus/ssz_derive", - "consensus/ssz_types", - "consensus/serde_utils", "consensus/state_processing", "consensus/swap_or_not_shuffle", - "consensus/tree_hash", - "consensus/tree_hash_derive", "crypto/bls", - "crypto/eth2_hashing", "crypto/eth2_key_derivation", "crypto/eth2_keystore", "crypto/eth2_wallet", @@ -87,20 +80,15 @@ members = [ "validator_client", "validator_client/slashing_protection", + + "watch", ] resolver = "2" [patch] [patch.crates-io] warp = { git = "https://github.com/macladson/warp", rev="7e75acc368229a46a236a8c991bf251fe7fe50ef" } -eth2_ssz = { path = "consensus/ssz" } -eth2_ssz_derive = { path = "consensus/ssz_derive" } -eth2_ssz_types = { path = "consensus/ssz_types" } -eth2_hashing = { path = "crypto/eth2_hashing" } -tree_hash = { path = "consensus/tree_hash" } -tree_hash_derive = { path = "consensus/tree_hash_derive" } -eth2_serde_utils = { path = "consensus/serde_utils" } -arbitrary = { git = "https://github.com/michaelsproul/arbitrary", rev="a572fd8743012a4f1ada5ee5968b1b3619c427ba" } +arbitrary = { git = "https://github.com/michaelsproul/arbitrary", rev="f002b99989b561ddce62e4cf2887b0f8860ae991" } [patch."https://github.com/ralexstokes/mev-rs"] mev-rs = { git = "https://github.com/ralexstokes//mev-rs", rev = "7813d4a4a564e0754e9aaab2d95520ba437c3889" } diff --git a/Dockerfile b/Dockerfile index 7a0602a2213..0d268c7e1aa 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM rust:1.65.0-bullseye AS builder +FROM rust:1.68.2-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler COPY . lighthouse ARG FEATURES diff --git a/account_manager/src/validator/exit.rs b/account_manager/src/validator/exit.rs index 9e5b57a2975..5755a355f31 100644 --- a/account_manager/src/validator/exit.rs +++ b/account_manager/src/validator/exit.rs @@ -27,7 +27,6 @@ pub const PASSWORD_PROMPT: &str = "Enter the keystore password"; pub const DEFAULT_BEACON_NODE: &str = "http://localhost:5052/"; pub const CONFIRMATION_PHRASE: &str = "Exit my validator"; pub const WEBSITE_URL: &str = "https://lighthouse-book.sigmaprime.io/voluntary-exit.html"; -pub const PROMPT: &str = "WARNING: WITHDRAWING STAKED ETH IS NOT CURRENTLY POSSIBLE"; pub fn cli_app<'a, 'b>() -> App<'a, 'b> { App::new("exit") @@ -161,7 +160,6 @@ async fn publish_voluntary_exit( ); if !no_confirmation { eprintln!("WARNING: THIS IS AN IRREVERSIBLE OPERATION\n"); - eprintln!("{}\n", PROMPT); eprintln!( "PLEASE VISIT {} TO MAKE SURE YOU UNDERSTAND THE IMPLICATIONS OF A VOLUNTARY EXIT.", WEBSITE_URL diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 521e2b89c19..95f145a557d 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "3.5.1" +version = "4.1.0" authors = ["Paul Hauner ", "Age Manning ), + RequestNotFound, + BlockResultNotFound, +} + +const BLOCKS_PER_RANGE_REQUEST: u64 = 32; + +// This is the same as a DatabaseBlock but the Arc allows us to avoid an unnecessary clone. +enum LoadedBeaconBlock { + Full(Arc>), + Blinded(Box>), +} +type LoadResult = Result>, BeaconChainError>; +type BlockResult = Result>>, BeaconChainError>; + +enum RequestState { + UnSent(Vec>), + Sent(HashMap>>), +} + +struct BodiesByRange { + start: u64, + count: u64, + state: RequestState, +} + +// stores the components of a block for future re-construction in a small form +struct BlockParts { + blinded_block: Box>, + header: Box>, + body: Option>>, +} + +impl BlockParts { + pub fn new( + blinded: Box>, + header: ExecutionPayloadHeader, + ) -> Self { + Self { + blinded_block: blinded, + header: Box::new(header), + body: None, + } + } + + pub fn root(&self) -> Hash256 { + self.blinded_block.canonical_root() + } + + pub fn slot(&self) -> Slot { + self.blinded_block.message().slot() + } + + pub fn block_hash(&self) -> ExecutionBlockHash { + self.header.block_hash() + } +} + +fn reconstruct_default_header_block( + blinded_block: Box>, + header_from_block: ExecutionPayloadHeader, + spec: &ChainSpec, +) -> BlockResult { + let fork = blinded_block + .fork_name(spec) + .map_err(BeaconChainError::InconsistentFork)?; + + let payload: ExecutionPayload = match fork { + ForkName::Merge => ExecutionPayloadMerge::default().into(), + ForkName::Capella => ExecutionPayloadCapella::default().into(), + ForkName::Base | ForkName::Altair => { + return Err(Error::PayloadReconstruction(format!( + "Block with fork variant {} has execution payload", + fork + )) + .into()) + } + }; + + let header_from_payload = ExecutionPayloadHeader::from(payload.to_ref()); + if header_from_payload == header_from_block { + blinded_block + .try_into_full_block(Some(payload)) + .ok_or(BeaconChainError::AddPayloadLogicError) + .map(Arc::new) + .map(Some) + } else { + Err(BeaconChainError::InconsistentPayloadReconstructed { + slot: blinded_block.slot(), + exec_block_hash: header_from_block.block_hash(), + canonical_transactions_root: header_from_block.transactions_root(), + reconstructed_transactions_root: header_from_payload.transactions_root(), + }) + } +} + +fn reconstruct_blocks( + block_map: &mut HashMap>>, + block_parts_with_bodies: HashMap>, + log: &Logger, +) { + for (root, block_parts) in block_parts_with_bodies { + if let Some(payload_body) = block_parts.body { + match payload_body.to_payload(block_parts.header.as_ref().clone()) { + Ok(payload) => { + let header_from_payload = ExecutionPayloadHeader::from(payload.to_ref()); + if header_from_payload == *block_parts.header { + block_map.insert( + root, + Arc::new( + block_parts + .blinded_block + .try_into_full_block(Some(payload)) + .ok_or(BeaconChainError::AddPayloadLogicError) + .map(Arc::new) + .map(Some), + ), + ); + } else { + let error = BeaconChainError::InconsistentPayloadReconstructed { + slot: block_parts.blinded_block.slot(), + exec_block_hash: block_parts.header.block_hash(), + canonical_transactions_root: block_parts.header.transactions_root(), + reconstructed_transactions_root: header_from_payload + .transactions_root(), + }; + debug!(log, "Failed to reconstruct block"; "root" => ?root, "error" => ?error); + block_map.insert(root, Arc::new(Err(error))); + } + } + Err(string) => { + block_map.insert( + root, + Arc::new(Err(Error::PayloadReconstruction(string).into())), + ); + } + } + } else { + block_map.insert( + root, + Arc::new(Err(BeaconChainError::BlockHashMissingFromExecutionLayer( + block_parts.block_hash(), + ))), + ); + } + } +} + +impl BodiesByRange { + pub fn new(maybe_block_parts: Option>) -> Self { + if let Some(block_parts) = maybe_block_parts { + Self { + start: block_parts.header.block_number(), + count: 1, + state: RequestState::UnSent(vec![block_parts]), + } + } else { + Self { + start: 0, + count: 0, + state: RequestState::UnSent(vec![]), + } + } + } + + pub fn is_unsent(&self) -> bool { + matches!(self.state, RequestState::UnSent(_)) + } + + pub fn push_block_parts(&mut self, block_parts: BlockParts) -> Result<(), BlockParts> { + if self.count == BLOCKS_PER_RANGE_REQUEST { + return Err(block_parts); + } + + match &mut self.state { + RequestState::Sent(_) => Err(block_parts), + RequestState::UnSent(blocks_parts_vec) => { + let block_number = block_parts.header.block_number(); + if self.count == 0 { + self.start = block_number; + self.count = 1; + blocks_parts_vec.push(block_parts); + Ok(()) + } else { + // need to figure out if this block fits in the request + if block_number < self.start + || self.start + BLOCKS_PER_RANGE_REQUEST <= block_number + { + return Err(block_parts); + } + + blocks_parts_vec.push(block_parts); + if self.start + self.count <= block_number { + self.count = block_number - self.start + 1; + } + + Ok(()) + } + } + } + } + + async fn execute(&mut self, execution_layer: &ExecutionLayer, log: &Logger) { + if let RequestState::UnSent(blocks_parts_ref) = &mut self.state { + let block_parts_vec = std::mem::take(blocks_parts_ref); + + let mut block_map = HashMap::new(); + match execution_layer + .get_payload_bodies_by_range(self.start, self.count) + .await + { + Ok(bodies) => { + let mut range_map = (self.start..(self.start + self.count)) + .zip(bodies.into_iter().chain(std::iter::repeat(None))) + .collect::>(); + + let mut with_bodies = HashMap::new(); + for mut block_parts in block_parts_vec { + with_bodies + // it's possible the same block is requested twice, using + // or_insert_with() skips duplicates + .entry(block_parts.root()) + .or_insert_with(|| { + let block_number = block_parts.header.block_number(); + block_parts.body = + range_map.remove(&block_number).flatten().map(Box::new); + + block_parts + }); + } + + reconstruct_blocks(&mut block_map, with_bodies, log); + } + Err(e) => { + let block_result = + Arc::new(Err(Error::BlocksByRangeFailure(Box::new(e)).into())); + debug!(log, "Payload bodies by range failure"; "error" => ?block_result); + for block_parts in block_parts_vec { + block_map.insert(block_parts.root(), block_result.clone()); + } + } + } + self.state = RequestState::Sent(block_map); + } + } + + pub async fn get_block_result( + &mut self, + root: &Hash256, + execution_layer: &ExecutionLayer, + log: &Logger, + ) -> Option>> { + self.execute(execution_layer, log).await; + if let RequestState::Sent(map) = &self.state { + return map.get(root).cloned(); + } + // Shouldn't reach this point + None + } +} + +#[derive(Clone)] +enum EngineRequest { + ByRange(Arc>>), + // When we already have the data or there's an error + NoRequest(Arc>>>>), +} + +impl EngineRequest { + pub fn new_by_range() -> Self { + Self::ByRange(Arc::new(RwLock::new(BodiesByRange::new(None)))) + } + pub fn new_no_request() -> Self { + Self::NoRequest(Arc::new(RwLock::new(HashMap::new()))) + } + + pub async fn is_unsent(&self) -> bool { + match self { + Self::ByRange(bodies_by_range) => bodies_by_range.read().await.is_unsent(), + Self::NoRequest(_) => false, + } + } + + pub async fn push_block_parts(&mut self, block_parts: BlockParts, log: &Logger) { + match self { + Self::ByRange(bodies_by_range) => { + let mut request = bodies_by_range.write().await; + + if let Err(block_parts) = request.push_block_parts(block_parts) { + drop(request); + let new_by_range = BodiesByRange::new(Some(block_parts)); + *self = Self::ByRange(Arc::new(RwLock::new(new_by_range))); + } + } + Self::NoRequest(_) => { + // this should _never_ happen + crit!( + log, + "Please notify the devs"; + "beacon_block_streamer" => "push_block_parts called on NoRequest Variant", + ); + } + } + } + + pub async fn push_block_result( + &mut self, + root: Hash256, + block_result: BlockResult, + log: &Logger, + ) { + // this function will only fail if something is seriously wrong + match self { + Self::ByRange(_) => { + // this should _never_ happen + crit!( + log, + "Please notify the devs"; + "beacon_block_streamer" => "push_block_result called on ByRange", + ); + } + Self::NoRequest(results) => { + results.write().await.insert(root, Arc::new(block_result)); + } + } + } + + pub async fn get_block_result( + &self, + root: &Hash256, + execution_layer: &ExecutionLayer, + log: &Logger, + ) -> Arc> { + match self { + Self::ByRange(by_range) => { + by_range + .write() + .await + .get_block_result(root, execution_layer, log) + .await + } + Self::NoRequest(map) => map.read().await.get(root).cloned(), + } + .unwrap_or_else(|| { + crit!( + log, + "Please notify the devs"; + "beacon_block_streamer" => "block_result not found in request", + "root" => ?root, + ); + Arc::new(Err(Error::BlockResultNotFound.into())) + }) + } +} + +pub struct BeaconBlockStreamer { + execution_layer: ExecutionLayer, + check_early_attester_cache: CheckEarlyAttesterCache, + beacon_chain: Arc>, +} + +impl BeaconBlockStreamer { + pub fn new( + beacon_chain: &Arc>, + check_early_attester_cache: CheckEarlyAttesterCache, + ) -> Result { + let execution_layer = beacon_chain + .execution_layer + .as_ref() + .ok_or(BeaconChainError::ExecutionLayerMissing)? + .clone(); + + Ok(Self { + execution_layer, + check_early_attester_cache, + beacon_chain: beacon_chain.clone(), + }) + } + + fn check_early_attester_cache( + &self, + root: Hash256, + ) -> Option>> { + if self.check_early_attester_cache == CheckEarlyAttesterCache::Yes { + self.beacon_chain.early_attester_cache.get_block(root) + } else { + None + } + } + + fn load_payloads(&self, block_roots: Vec) -> Vec<(Hash256, LoadResult)> { + let mut db_blocks = Vec::new(); + + for root in block_roots { + if let Some(cached_block) = self + .check_early_attester_cache(root) + .map(LoadedBeaconBlock::Full) + { + db_blocks.push((root, Ok(Some(cached_block)))); + continue; + } + + match self.beacon_chain.store.try_get_full_block(&root) { + Err(e) => db_blocks.push((root, Err(e.into()))), + Ok(opt_block) => db_blocks.push(( + root, + Ok(opt_block.map(|db_block| match db_block { + DatabaseBlock::Full(block) => LoadedBeaconBlock::Full(Arc::new(block)), + DatabaseBlock::Blinded(block) => { + LoadedBeaconBlock::Blinded(Box::new(block)) + } + })), + )), + } + } + + db_blocks + } + + /// Pre-process the loaded blocks into execution engine requests. + /// + /// The purpose of this function is to separate the blocks into 2 categories: + /// 1) no_request - when we already have the full block or there's an error + /// 2) blocks_by_range - used for blinded blocks + /// + /// The function returns a vector of block roots in the same order as requested + /// along with the engine request that each root corresponds to. + async fn get_requests( + &self, + payloads: Vec<(Hash256, LoadResult)>, + ) -> Vec<(Hash256, EngineRequest)> { + let mut ordered_block_roots = Vec::new(); + let mut requests = HashMap::new(); + + // we sort the by range blocks by slot before adding them to the + // request as it should *better* optimize the number of blocks that + // can fit in the same request + let mut by_range_blocks: Vec> = vec![]; + let mut no_request = EngineRequest::new_no_request(); + + for (root, load_result) in payloads { + // preserve the order of the requested blocks + ordered_block_roots.push(root); + + let block_result = match load_result { + Err(e) => Err(e), + Ok(None) => Ok(None), + Ok(Some(LoadedBeaconBlock::Full(full_block))) => Ok(Some(full_block)), + Ok(Some(LoadedBeaconBlock::Blinded(blinded_block))) => { + match blinded_block + .message() + .execution_payload() + .map(|payload| payload.to_execution_payload_header()) + { + Ok(header) => { + if header.block_hash() == ExecutionBlockHash::zero() { + reconstruct_default_header_block( + blinded_block, + header, + &self.beacon_chain.spec, + ) + } else { + // Add the block to the set requiring a by-range request. + let block_parts = BlockParts::new(blinded_block, header); + by_range_blocks.push(block_parts); + continue; + } + } + Err(e) => Err(BeaconChainError::BeaconStateError(e)), + } + } + }; + + no_request + .push_block_result(root, block_result, &self.beacon_chain.log) + .await; + requests.insert(root, no_request.clone()); + } + + // Now deal with the by_range requests. Sort them in order of increasing slot + let mut by_range = EngineRequest::::new_by_range(); + by_range_blocks.sort_by_key(|block_parts| block_parts.slot()); + for block_parts in by_range_blocks { + let root = block_parts.root(); + by_range + .push_block_parts(block_parts, &self.beacon_chain.log) + .await; + requests.insert(root, by_range.clone()); + } + + let mut result = vec![]; + for root in ordered_block_roots { + if let Some(request) = requests.get(&root) { + result.push((root, request.clone())) + } else { + crit!( + self.beacon_chain.log, + "Please notify the devs"; + "beacon_block_streamer" => "request not found", + "root" => ?root, + ); + no_request + .push_block_result( + root, + Err(Error::RequestNotFound.into()), + &self.beacon_chain.log, + ) + .await; + result.push((root, no_request.clone())); + } + } + + result + } + + // used when the execution engine doesn't support the payload bodies methods + async fn stream_blocks_fallback( + &self, + block_roots: Vec, + sender: UnboundedSender<(Hash256, Arc>)>, + ) { + debug!( + self.beacon_chain.log, + "Using slower fallback method of eth_getBlockByHash()" + ); + for root in block_roots { + let cached_block = self.check_early_attester_cache(root); + let block_result = if cached_block.is_some() { + Ok(cached_block) + } else { + self.beacon_chain + .get_block(&root) + .await + .map(|opt_block| opt_block.map(Arc::new)) + }; + + if sender.send((root, Arc::new(block_result))).is_err() { + break; + } + } + } + + async fn stream_blocks( + &self, + block_roots: Vec, + sender: UnboundedSender<(Hash256, Arc>)>, + ) { + let n_roots = block_roots.len(); + let mut n_success = 0usize; + let mut n_sent = 0usize; + let mut engine_requests = 0usize; + + let payloads = self.load_payloads(block_roots); + let requests = self.get_requests(payloads).await; + + for (root, request) in requests { + if request.is_unsent().await { + engine_requests += 1; + } + + let result = request + .get_block_result(&root, &self.execution_layer, &self.beacon_chain.log) + .await; + + let successful = result + .as_ref() + .as_ref() + .map(|opt| opt.is_some()) + .unwrap_or(false); + + if sender.send((root, result)).is_err() { + break; + } else { + n_sent += 1; + if successful { + n_success += 1; + } + } + } + + debug!( + self.beacon_chain.log, + "BeaconBlockStreamer finished"; + "requested blocks" => n_roots, + "sent" => n_sent, + "succeeded" => n_success, + "failed" => (n_sent - n_success), + "engine requests" => engine_requests, + ); + } + + pub async fn stream( + self, + block_roots: Vec, + sender: UnboundedSender<(Hash256, Arc>)>, + ) { + match self + .execution_layer + .get_engine_capabilities(None) + .await + .map_err(Box::new) + .map_err(BeaconChainError::EngineGetCapabilititesFailed) + { + Ok(engine_capabilities) => { + if engine_capabilities.get_payload_bodies_by_range_v1 { + self.stream_blocks(block_roots, sender).await; + } else { + // use the fallback method + self.stream_blocks_fallback(block_roots, sender).await; + } + } + Err(e) => { + send_errors(block_roots, sender, e).await; + } + } + } + + pub fn launch_stream( + self, + block_roots: Vec, + executor: &TaskExecutor, + ) -> impl Stream>)> { + let (block_tx, block_rx) = mpsc::unbounded_channel(); + debug!( + self.beacon_chain.log, + "Launching a BeaconBlockStreamer"; + "blocks" => block_roots.len(), + ); + executor.spawn(self.stream(block_roots, block_tx), "get_blocks_sender"); + UnboundedReceiverStream::new(block_rx) + } +} + +async fn send_errors( + block_roots: Vec, + sender: UnboundedSender<(Hash256, Arc>)>, + beacon_chain_error: BeaconChainError, +) { + let result = Arc::new(Err(beacon_chain_error)); + for root in block_roots { + if sender.send((root, result.clone())).is_err() { + break; + } + } +} + +impl From for BeaconChainError { + fn from(value: Error) -> Self { + BeaconChainError::BlockStreamerError(value) + } +} + +#[cfg(test)] +mod tests { + use crate::beacon_block_streamer::{BeaconBlockStreamer, CheckEarlyAttesterCache}; + use crate::test_utils::{test_spec, BeaconChainHarness, EphemeralHarnessType}; + use execution_layer::test_utils::{Block, DEFAULT_ENGINE_CAPABILITIES}; + use execution_layer::EngineCapabilities; + use lazy_static::lazy_static; + use std::time::Duration; + use tokio::sync::mpsc; + use types::{ChainSpec, Epoch, EthSpec, Hash256, Keypair, MinimalEthSpec, Slot}; + + const VALIDATOR_COUNT: usize = 48; + lazy_static! { + /// A cached set of keys. + static ref KEYPAIRS: Vec = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); + } + + fn get_harness( + validator_count: usize, + spec: ChainSpec, + ) -> BeaconChainHarness> { + let harness = BeaconChainHarness::builder(MinimalEthSpec) + .spec(spec) + .keypairs(KEYPAIRS[0..validator_count].to_vec()) + .logger(logging::test_logger()) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + + harness.advance_slot(); + + harness + } + + #[tokio::test] + async fn check_all_blocks_from_altair_to_capella() { + let slots_per_epoch = MinimalEthSpec::slots_per_epoch() as usize; + let num_epochs = 8; + let bellatrix_fork_epoch = 2usize; + let capella_fork_epoch = 4usize; + let num_blocks_produced = num_epochs * slots_per_epoch; + + let mut spec = test_spec::(); + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(bellatrix_fork_epoch as u64)); + spec.capella_fork_epoch = Some(Epoch::new(capella_fork_epoch as u64)); + + let harness = get_harness(VALIDATOR_COUNT, spec); + // go to bellatrix fork + harness + .extend_slots(bellatrix_fork_epoch * slots_per_epoch) + .await; + // extend half an epoch + harness.extend_slots(slots_per_epoch / 2).await; + // trigger merge + harness + .execution_block_generator() + .move_to_terminal_block() + .expect("should move to terminal block"); + let timestamp = harness.get_timestamp_at_slot() + harness.spec.seconds_per_slot; + harness + .execution_block_generator() + .modify_last_block(|block| { + if let Block::PoW(terminal_block) = block { + terminal_block.timestamp = timestamp; + } + }); + // finish out merge epoch + harness.extend_slots(slots_per_epoch / 2).await; + // finish rest of epochs + harness + .extend_slots((num_epochs - 1 - bellatrix_fork_epoch) * slots_per_epoch) + .await; + + let head = harness.chain.head_snapshot(); + let state = &head.beacon_state; + + assert_eq!( + state.slot(), + Slot::new(num_blocks_produced as u64), + "head should be at the current slot" + ); + assert_eq!( + state.current_epoch(), + num_blocks_produced as u64 / MinimalEthSpec::slots_per_epoch(), + "head should be at the expected epoch" + ); + assert_eq!( + state.current_justified_checkpoint().epoch, + state.current_epoch() - 1, + "the head should be justified one behind the current epoch" + ); + assert_eq!( + state.finalized_checkpoint().epoch, + state.current_epoch() - 2, + "the head should be finalized two behind the current epoch" + ); + + let block_roots: Vec = harness + .chain + .forwards_iter_block_roots(Slot::new(0)) + .expect("should get iter") + .map(Result::unwrap) + .map(|(root, _)| root) + .collect(); + + let mut expected_blocks = vec![]; + // get all blocks the old fashioned way + for root in &block_roots { + let block = harness + .chain + .get_block(root) + .await + .expect("should get block") + .expect("block should exist"); + expected_blocks.push(block); + } + + for epoch in 0..num_epochs { + let start = epoch * slots_per_epoch; + let mut epoch_roots = vec![Hash256::zero(); slots_per_epoch]; + epoch_roots[..].clone_from_slice(&block_roots[start..(start + slots_per_epoch)]); + let streamer = BeaconBlockStreamer::new(&harness.chain, CheckEarlyAttesterCache::No) + .expect("should create streamer"); + let (block_tx, mut block_rx) = mpsc::unbounded_channel(); + streamer.stream(epoch_roots.clone(), block_tx).await; + + for (i, expected_root) in epoch_roots.into_iter().enumerate() { + let (found_root, found_block_result) = + block_rx.recv().await.expect("should get block"); + + assert_eq!( + found_root, expected_root, + "expected block root should match" + ); + match found_block_result.as_ref() { + Ok(maybe_block) => { + let found_block = maybe_block.clone().expect("should have a block"); + let expected_block = expected_blocks + .get(start + i) + .expect("should get expected block"); + assert_eq!( + found_block.as_ref(), + expected_block, + "expected block should match found block" + ); + } + Err(e) => panic!("Error retrieving block {}: {:?}", expected_root, e), + } + } + } + } + + #[tokio::test] + async fn check_fallback_altair_to_capella() { + let slots_per_epoch = MinimalEthSpec::slots_per_epoch() as usize; + let num_epochs = 8; + let bellatrix_fork_epoch = 2usize; + let capella_fork_epoch = 4usize; + let num_blocks_produced = num_epochs * slots_per_epoch; + + let mut spec = test_spec::(); + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(bellatrix_fork_epoch as u64)); + spec.capella_fork_epoch = Some(Epoch::new(capella_fork_epoch as u64)); + + let harness = get_harness(VALIDATOR_COUNT, spec); + + // modify execution engine so it doesn't support engine_payloadBodiesBy* methods + let mock_execution_layer = harness.mock_execution_layer.as_ref().unwrap(); + mock_execution_layer + .server + .set_engine_capabilities(EngineCapabilities { + get_payload_bodies_by_hash_v1: false, + get_payload_bodies_by_range_v1: false, + ..DEFAULT_ENGINE_CAPABILITIES + }); + // refresh capabilities cache + harness + .chain + .execution_layer + .as_ref() + .unwrap() + .get_engine_capabilities(Some(Duration::ZERO)) + .await + .unwrap(); + + // go to bellatrix fork + harness + .extend_slots(bellatrix_fork_epoch * slots_per_epoch) + .await; + // extend half an epoch + harness.extend_slots(slots_per_epoch / 2).await; + // trigger merge + harness + .execution_block_generator() + .move_to_terminal_block() + .expect("should move to terminal block"); + let timestamp = harness.get_timestamp_at_slot() + harness.spec.seconds_per_slot; + harness + .execution_block_generator() + .modify_last_block(|block| { + if let Block::PoW(terminal_block) = block { + terminal_block.timestamp = timestamp; + } + }); + // finish out merge epoch + harness.extend_slots(slots_per_epoch / 2).await; + // finish rest of epochs + harness + .extend_slots((num_epochs - 1 - bellatrix_fork_epoch) * slots_per_epoch) + .await; + + let head = harness.chain.head_snapshot(); + let state = &head.beacon_state; + + assert_eq!( + state.slot(), + Slot::new(num_blocks_produced as u64), + "head should be at the current slot" + ); + assert_eq!( + state.current_epoch(), + num_blocks_produced as u64 / MinimalEthSpec::slots_per_epoch(), + "head should be at the expected epoch" + ); + assert_eq!( + state.current_justified_checkpoint().epoch, + state.current_epoch() - 1, + "the head should be justified one behind the current epoch" + ); + assert_eq!( + state.finalized_checkpoint().epoch, + state.current_epoch() - 2, + "the head should be finalized two behind the current epoch" + ); + + let block_roots: Vec = harness + .chain + .forwards_iter_block_roots(Slot::new(0)) + .expect("should get iter") + .map(Result::unwrap) + .map(|(root, _)| root) + .collect(); + + let mut expected_blocks = vec![]; + // get all blocks the old fashioned way + for root in &block_roots { + let block = harness + .chain + .get_block(root) + .await + .expect("should get block") + .expect("block should exist"); + expected_blocks.push(block); + } + + for epoch in 0..num_epochs { + let start = epoch * slots_per_epoch; + let mut epoch_roots = vec![Hash256::zero(); slots_per_epoch]; + epoch_roots[..].clone_from_slice(&block_roots[start..(start + slots_per_epoch)]); + let streamer = BeaconBlockStreamer::new(&harness.chain, CheckEarlyAttesterCache::No) + .expect("should create streamer"); + let (block_tx, mut block_rx) = mpsc::unbounded_channel(); + streamer.stream(epoch_roots.clone(), block_tx).await; + + for (i, expected_root) in epoch_roots.into_iter().enumerate() { + let (found_root, found_block_result) = + block_rx.recv().await.expect("should get block"); + + assert_eq!( + found_root, expected_root, + "expected block root should match" + ); + match found_block_result.as_ref() { + Ok(maybe_block) => { + let found_block = maybe_block.clone().expect("should have a block"); + let expected_block = expected_blocks + .get(start + i) + .expect("should get expected block"); + assert_eq!( + found_block.as_ref(), + expected_block, + "expected block should match found block" + ); + } + Err(e) => panic!("Error retrieving block {}: {:?}", expected_root, e), + } + } + } + } +} diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 9ba8f24cbe7..ca0c5ce15b7 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -4,6 +4,7 @@ use crate::attestation_verification::{ VerifiedUnaggregatedAttestation, }; use crate::attester_cache::{AttesterCache, AttesterCacheKey}; +use crate::beacon_block_streamer::{BeaconBlockStreamer, CheckEarlyAttesterCache}; use crate::beacon_proposer_cache::compute_proposer_duties_from_head; use crate::beacon_proposer_cache::BeaconProposerCache; use crate::block_times_cache::BlockTimesCache; @@ -72,7 +73,7 @@ use itertools::process_results; use itertools::Itertools; use operation_pool::{AttestationRef, OperationPool, PersistedOperationPool, ReceivedPreCapella}; use parking_lot::{Mutex, RwLock}; -use proto_array::{CountUnrealizedFull, DoNotReOrg, ProposerHeadError}; +use proto_array::{DoNotReOrg, ProposerHeadError}; use safe_arith::SafeArith; use slasher::Slasher; use slog::{crit, debug, error, info, trace, warn, Logger}; @@ -102,9 +103,9 @@ use store::{ DatabaseBlock, Error as DBError, HotColdDB, KeyValueStore, KeyValueStoreOp, StoreItem, StoreOp, }; use task_executor::{ShutdownReason, TaskExecutor}; +use tokio_stream::Stream; use tree_hash::TreeHash; use types::beacon_state::CloneConfig; -use types::consts::merge::INTERVALS_PER_SLOT; use types::*; pub type ForkChoiceError = fork_choice::Error; @@ -126,12 +127,6 @@ pub const VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT: Duration = Duration::from_secs(1) /// The timeout for the eth1 finalization cache pub const ETH1_FINALIZATION_CACHE_LOCK_TIMEOUT: Duration = Duration::from_millis(200); -/// The latest delay from the start of the slot at which to attempt a 1-slot re-org. -fn max_re_org_slot_delay(seconds_per_slot: u64) -> Duration { - // Allow at least half of the attestation deadline for the block to propagate. - Duration::from_secs(seconds_per_slot) / INTERVALS_PER_SLOT as u32 / 2 -} - // These keys are all zero because they get stored in different columns, see `DBColumn` type. pub const BEACON_CHAIN_DB_KEY: Hash256 = Hash256::zero(); pub const OP_POOL_DB_KEY: Hash256 = Hash256::zero(); @@ -197,6 +192,9 @@ pub enum ProduceBlockVerification { pub struct PrePayloadAttributes { pub proposer_index: u64, pub prev_randao: Hash256, + /// The parent block number is not part of the payload attributes sent to the EL, but *is* + /// sent to builders via SSE. + pub parent_block_number: u64, } /// Define whether a forkchoiceUpdate needs to be checked for an override (`Yes`) or has already @@ -354,7 +352,7 @@ pub struct BeaconChain { /// in recent epochs. pub(crate) observed_sync_aggregators: RwLock>, /// Maintains a record of which validators have proposed blocks for each slot. - pub(crate) observed_block_producers: RwLock>, + pub observed_block_producers: RwLock>, /// Maintains a record of which validators have submitted voluntary exits. pub(crate) observed_voluntary_exits: Mutex>, /// Maintains a record of which validators we've seen proposer slashings for. @@ -427,6 +425,46 @@ pub struct BeaconChain { type BeaconBlockAndState = (BeaconBlock, BeaconState); impl BeaconChain { + /// Checks if a block is finalized. + /// The finalization check is done with the block slot. The block root is used to verify that + /// the finalized slot is in the canonical chain. + pub fn is_finalized_block( + &self, + block_root: &Hash256, + block_slot: Slot, + ) -> Result { + let finalized_slot = self + .canonical_head + .cached_head() + .finalized_checkpoint() + .epoch + .start_slot(T::EthSpec::slots_per_epoch()); + let is_canonical = self + .block_root_at_slot(block_slot, WhenSlotSkipped::None)? + .map_or(false, |canonical_root| block_root == &canonical_root); + Ok(block_slot <= finalized_slot && is_canonical) + } + + /// Checks if a state is finalized. + /// The finalization check is done with the slot. The state root is used to verify that + /// the finalized state is in the canonical chain. + pub fn is_finalized_state( + &self, + state_root: &Hash256, + state_slot: Slot, + ) -> Result { + let finalized_slot = self + .canonical_head + .cached_head() + .finalized_checkpoint() + .epoch + .start_slot(T::EthSpec::slots_per_epoch()); + let is_canonical = self + .state_root_at_slot(state_slot)? + .map_or(false, |canonical_root| state_root == &canonical_root); + Ok(state_slot <= finalized_slot && is_canonical) + } + /// Persists the head tracker and fork choice. /// /// We do it atomically even though no guarantees need to be made about blocks from @@ -474,7 +512,6 @@ impl BeaconChain { pub fn load_fork_choice( store: BeaconStore, reset_payload_statuses: ResetPayloadStatuses, - count_unrealized_full: CountUnrealizedFull, spec: &ChainSpec, log: &Logger, ) -> Result>, Error> { @@ -491,7 +528,6 @@ impl BeaconChain { persisted_fork_choice.fork_choice, reset_payload_statuses, fc_store, - count_unrealized_full, spec, log, )?)) @@ -938,14 +974,42 @@ impl BeaconChain { /// ## Errors /// /// May return a database error. - pub async fn get_block_checking_early_attester_cache( - &self, - block_root: &Hash256, - ) -> Result>>, Error> { - if let Some(block) = self.early_attester_cache.get_block(*block_root) { - return Ok(Some(block)); - } - Ok(self.get_block(block_root).await?.map(Arc::new)) + pub fn get_blocks_checking_early_attester_cache( + self: &Arc, + block_roots: Vec, + executor: &TaskExecutor, + ) -> Result< + impl Stream< + Item = ( + Hash256, + Arc>>, Error>>, + ), + >, + Error, + > { + Ok( + BeaconBlockStreamer::::new(self, CheckEarlyAttesterCache::Yes)? + .launch_stream(block_roots, executor), + ) + } + + pub fn get_blocks( + self: &Arc, + block_roots: Vec, + executor: &TaskExecutor, + ) -> Result< + impl Stream< + Item = ( + Hash256, + Arc>>, Error>>, + ), + >, + Error, + > { + Ok( + BeaconBlockStreamer::::new(self, CheckEarlyAttesterCache::No)? + .launch_stream(block_roots, executor), + ) } /// Returns the block at the given root, if any. @@ -979,7 +1043,7 @@ impl BeaconChain { .execution_layer .as_ref() .ok_or(Error::ExecutionLayerMissing)? - .get_payload_by_block_hash(exec_block_hash, fork) + .get_payload_for_header(&execution_payload_header, fork) .await .map_err(|e| { Error::ExecutionLayerErrorPayloadReconstruction(exec_block_hash, Box::new(e)) @@ -1867,7 +1931,6 @@ impl BeaconChain { self.slot()?, verified.indexed_attestation(), AttestationFromBlock::False, - &self.spec, ) .map_err(Into::into) } @@ -2143,12 +2206,14 @@ impl BeaconChain { &self, exit: SignedVoluntaryExit, ) -> Result, Error> { - // NOTE: this could be more efficient if it avoided cloning the head state - let wall_clock_state = self.wall_clock_state()?; + let head_snapshot = self.head().snapshot; + let head_state = &head_snapshot.beacon_state; + let wall_clock_epoch = self.epoch()?; + Ok(self .observed_voluntary_exits .lock() - .verify_and_observe(exit, &wall_clock_state, &self.spec) + .verify_and_observe_at(exit, wall_clock_epoch, head_state, &self.spec) .map(|exit| { // this method is called for both API and gossip exits, so this covers all exit events if let Some(event_handler) = self.event_handler.as_ref() { @@ -2823,7 +2888,7 @@ impl BeaconChain { metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_BLOCK_TIMES); let block_delay = self .slot_clock - .seconds_from_current_slot_start(self.spec.seconds_per_slot) + .seconds_from_current_slot_start() .ok_or(Error::UnableToComputeTimeAtSlot)?; fork_choice @@ -2835,7 +2900,7 @@ impl BeaconChain { &state, payload_verification_status, &self.spec, - count_unrealized.and(self.config.count_unrealized.into()), + count_unrealized, ) .map_err(|e| BlockError::BeaconChainError(e.into()))?; } @@ -2954,7 +3019,6 @@ impl BeaconChain { ResetPayloadStatuses::always_reset_conditionally( self.config.always_reset_payload_statuses, ), - self.config.count_unrealized_full, &self.store, &self.spec, &self.log, @@ -3555,7 +3619,7 @@ impl BeaconChain { let (state, state_root_opt) = self .task_executor .spawn_blocking_handle( - move || chain.load_state_for_block_production::(slot), + move || chain.load_state_for_block_production(slot), "produce_partial_beacon_block", ) .ok_or(BlockProductionError::ShuttingDown)? @@ -3578,7 +3642,7 @@ impl BeaconChain { /// Load a beacon state from the database for block production. This is a long-running process /// that should not be performed in an `async` context. - fn load_state_for_block_production>( + fn load_state_for_block_production( self: &Arc, slot: Slot, ) -> Result<(BeaconState, Option), BlockProductionError> { @@ -3677,7 +3741,7 @@ impl BeaconChain { let slot_delay = self .slot_clock - .seconds_from_current_slot_start(self.spec.seconds_per_slot) + .seconds_from_current_slot_start() .or_else(|| { warn!( self.log, @@ -3692,7 +3756,7 @@ impl BeaconChain { // 1. It seems we have time to propagate and still receive the proposer boost. // 2. The current head block was seen late. // 3. The `get_proposer_head` conditions from fork choice pass. - let proposing_on_time = slot_delay < max_re_org_slot_delay(self.spec.seconds_per_slot); + let proposing_on_time = slot_delay < self.config.re_org_cutoff(self.spec.seconds_per_slot); if !proposing_on_time { debug!( self.log, @@ -3722,6 +3786,7 @@ impl BeaconChain { slot, canonical_head, re_org_threshold, + &self.config.re_org_disallowed_offsets, self.config.re_org_max_epochs_since_finalization, ) .map_err(|e| match e { @@ -3866,16 +3931,21 @@ impl BeaconChain { proposer as u64 }; - // Get the `prev_randao` value. - let prev_randao = if proposer_head == parent_block_root { - cached_head.parent_random() + // Get the `prev_randao` and parent block number. + let head_block_number = cached_head.head_block_number()?; + let (prev_randao, parent_block_number) = if proposer_head == parent_block_root { + ( + cached_head.parent_random()?, + head_block_number.saturating_sub(1), + ) } else { - cached_head.head_random() - }?; + (cached_head.head_random()?, head_block_number) + }; Ok(Some(PrePayloadAttributes { proposer_index, prev_randao, + parent_block_number, })) } @@ -3995,6 +4065,7 @@ impl BeaconChain { .get_preliminary_proposer_head( head_block_root, re_org_threshold, + &self.config.re_org_disallowed_offsets, self.config.re_org_max_epochs_since_finalization, ) .map_err(|e| e.map_inner_error(Error::ProposerHeadForkChoiceError))?; @@ -4005,7 +4076,7 @@ impl BeaconChain { let re_org_block_slot = head_slot + 1; let fork_choice_slot = info.current_slot; - // If a re-orging proposal isn't made by the `max_re_org_slot_delay` then we give up + // If a re-orging proposal isn't made by the `re_org_cutoff` then we give up // and allow the fork choice update for the canonical head through so that we may attest // correctly. let current_slot_ok = if head_slot == fork_choice_slot { @@ -4016,7 +4087,7 @@ impl BeaconChain { .and_then(|slot_start| { let now = self.slot_clock.now_duration()?; let slot_delay = now.saturating_sub(slot_start); - Some(slot_delay <= max_re_org_slot_delay(self.spec.seconds_per_slot)) + Some(slot_delay <= self.config.re_org_cutoff(self.spec.seconds_per_slot)) }) .unwrap_or(false) } else { @@ -4865,6 +4936,7 @@ impl BeaconChain { proposal_slot: prepare_slot, proposer_index: proposer, parent_block_root: head_root, + parent_block_number: pre_payload_attributes.parent_block_number, parent_block_hash: forkchoice_update_params.head_hash.unwrap_or_default(), payload_attributes: payload_attributes.into(), }, @@ -5088,7 +5160,7 @@ impl BeaconChain { latest_valid_hash, ref validation_error, } => { - debug!( + warn!( self.log, "Invalid execution payload"; "validation_error" => ?validation_error, @@ -5097,11 +5169,6 @@ impl BeaconChain { "head_block_root" => ?head_block_root, "method" => "fcU", ); - warn!( - self.log, - "Fork choice update invalidated payload"; - "status" => ?status - ); match latest_valid_hash { // The `latest_valid_hash` is set to `None` when the EE @@ -5147,7 +5214,7 @@ impl BeaconChain { PayloadStatus::InvalidBlockHash { ref validation_error, } => { - debug!( + warn!( self.log, "Invalid execution payload block hash"; "validation_error" => ?validation_error, @@ -5155,11 +5222,6 @@ impl BeaconChain { "head_block_root" => ?head_block_root, "method" => "fcU", ); - warn!( - self.log, - "Fork choice update invalidated payload"; - "status" => ?status - ); // The execution engine has stated that the head block is invalid, however it // hasn't returned a latest valid ancestor. // diff --git a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs index b17613da0d3..71160fcb638 100644 --- a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs +++ b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs @@ -20,6 +20,14 @@ use types::{ Hash256, Slot, }; +/// Ensure this justified checkpoint has an epoch of 0 so that it is never +/// greater than the justified checkpoint and enshrined as the actual justified +/// checkpoint. +const JUNK_BEST_JUSTIFIED_CHECKPOINT: Checkpoint = Checkpoint { + epoch: Epoch::new(0), + root: Hash256::repeat_byte(0), +}; + #[derive(Debug)] pub enum Error { UnableToReadSlot, @@ -144,7 +152,6 @@ pub struct BeaconForkChoiceStore, Cold: ItemStore< finalized_checkpoint: Checkpoint, justified_checkpoint: Checkpoint, justified_balances: JustifiedBalances, - best_justified_checkpoint: Checkpoint, unrealized_justified_checkpoint: Checkpoint, unrealized_finalized_checkpoint: Checkpoint, proposer_boost_root: Hash256, @@ -194,7 +201,6 @@ where justified_checkpoint, justified_balances, finalized_checkpoint, - best_justified_checkpoint: justified_checkpoint, unrealized_justified_checkpoint: justified_checkpoint, unrealized_finalized_checkpoint: finalized_checkpoint, proposer_boost_root: Hash256::zero(), @@ -212,7 +218,7 @@ where finalized_checkpoint: self.finalized_checkpoint, justified_checkpoint: self.justified_checkpoint, justified_balances: self.justified_balances.effective_balances.clone(), - best_justified_checkpoint: self.best_justified_checkpoint, + best_justified_checkpoint: JUNK_BEST_JUSTIFIED_CHECKPOINT, unrealized_justified_checkpoint: self.unrealized_justified_checkpoint, unrealized_finalized_checkpoint: self.unrealized_finalized_checkpoint, proposer_boost_root: self.proposer_boost_root, @@ -234,7 +240,6 @@ where finalized_checkpoint: persisted.finalized_checkpoint, justified_checkpoint: persisted.justified_checkpoint, justified_balances, - best_justified_checkpoint: persisted.best_justified_checkpoint, unrealized_justified_checkpoint: persisted.unrealized_justified_checkpoint, unrealized_finalized_checkpoint: persisted.unrealized_finalized_checkpoint, proposer_boost_root: persisted.proposer_boost_root, @@ -277,10 +282,6 @@ where &self.justified_balances } - fn best_justified_checkpoint(&self) -> &Checkpoint { - &self.best_justified_checkpoint - } - fn finalized_checkpoint(&self) -> &Checkpoint { &self.finalized_checkpoint } @@ -333,10 +334,6 @@ where Ok(()) } - fn set_best_justified_checkpoint(&mut self, checkpoint: Checkpoint) { - self.best_justified_checkpoint = checkpoint - } - fn set_unrealized_justified_checkpoint(&mut self, checkpoint: Checkpoint) { self.unrealized_justified_checkpoint = checkpoint; } diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 7d5d350108d..5102381a1a1 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -280,10 +280,10 @@ pub enum BlockError { /// /// ## Peer scoring /// - /// TODO(merge): reconsider how we score peers for this. - /// - /// The peer sent us an invalid block, but I'm not really sure how to score this in an - /// "optimistic" sync world. + /// The peer sent us an invalid block, we must penalise harshly. + /// If it's actually our fault (e.g. our execution node database is corrupt) we have bigger + /// problems to worry about than losing peers, and we're doing the network a favour by + /// disconnecting. ParentExecutionPayloadInvalid { parent_root: Hash256 }, } @@ -1468,7 +1468,6 @@ impl ExecutionPendingBlock { current_slot, indexed_attestation, AttestationFromBlock::True, - &chain.spec, ) { Ok(()) => Ok(()), // Ignore invalid attestations whilst importing attestations from a block. The diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 0bff5aa075f..56006b4d622 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -18,11 +18,11 @@ use crate::{ }; use eth1::Config as Eth1Config; use execution_layer::ExecutionLayer; -use fork_choice::{ForkChoice, ResetPayloadStatuses}; +use fork_choice::{CountUnrealized, ForkChoice, ResetPayloadStatuses}; use futures::channel::mpsc::Sender; use operation_pool::{OperationPool, PersistedOperationPool}; use parking_lot::RwLock; -use proto_array::ReOrgThreshold; +use proto_array::{DisallowedReOrgOffsets, ReOrgThreshold}; use slasher::Slasher; use slog::{crit, error, info, Logger}; use slot_clock::{SlotClock, TestingSlotClock}; @@ -175,6 +175,15 @@ where self } + /// Sets the proposer re-org disallowed offsets list. + pub fn proposer_re_org_disallowed_offsets( + mut self, + disallowed_offsets: DisallowedReOrgOffsets, + ) -> Self { + self.chain_config.re_org_disallowed_offsets = disallowed_offsets; + self + } + /// Sets the store (database). /// /// Should generally be called early in the build chain. @@ -265,7 +274,6 @@ where ResetPayloadStatuses::always_reset_conditionally( self.chain_config.always_reset_payload_statuses, ), - self.chain_config.count_unrealized_full, &self.spec, log, ) @@ -384,7 +392,6 @@ where &genesis.beacon_block, &genesis.beacon_state, current_slot, - self.chain_config.count_unrealized_full, &self.spec, ) .map_err(|e| format!("Unable to initialize ForkChoice: {:?}", e))?; @@ -503,7 +510,6 @@ where &snapshot.beacon_block, &snapshot.beacon_state, current_slot, - self.chain_config.count_unrealized_full, &self.spec, ) .map_err(|e| format!("Unable to initialize ForkChoice: {:?}", e))?; @@ -681,8 +687,7 @@ where store.clone(), Some(current_slot), &self.spec, - self.chain_config.count_unrealized.into(), - self.chain_config.count_unrealized_full, + CountUnrealized::True, )?; } @@ -765,6 +770,7 @@ where let genesis_time = head_snapshot.beacon_state.genesis_time(); let head_for_snapshot_cache = head_snapshot.clone(); let canonical_head = CanonicalHead::new(fork_choice, Arc::new(head_snapshot)); + let shuffling_cache_size = self.chain_config.shuffling_cache_size; let beacon_chain = BeaconChain { spec: self.spec, @@ -818,7 +824,7 @@ where DEFAULT_SNAPSHOT_CACHE_SIZE, head_for_snapshot_cache, )), - shuffling_cache: TimeoutRwLock::new(ShufflingCache::new()), + shuffling_cache: TimeoutRwLock::new(ShufflingCache::new(shuffling_cache_size)), eth1_finalization_cache: TimeoutRwLock::new(Eth1FinalizationCache::new(log.clone())), beacon_proposer_cache: <_>::default(), block_times_cache: <_>::default(), @@ -993,7 +999,7 @@ fn descriptive_db_error(item: &str, error: &StoreError) -> String { mod test { use super::*; use crate::validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD; - use eth2_hashing::hash; + use ethereum_hashing::hash; use genesis::{ generate_deterministic_keypairs, interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH, }; diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index 19eddf60263..0e1c8a5305d 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -45,8 +45,7 @@ use crate::{ }; use eth2::types::{EventKind, SseChainReorg, SseFinalizedCheckpoint, SseHead, SseLateHead}; use fork_choice::{ - CountUnrealizedFull, ExecutionStatus, ForkChoiceView, ForkchoiceUpdateParameters, ProtoBlock, - ResetPayloadStatuses, + ExecutionStatus, ForkChoiceView, ForkchoiceUpdateParameters, ProtoBlock, ResetPayloadStatuses, }; use itertools::process_results; use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard}; @@ -167,6 +166,17 @@ impl CachedHead { .map(|payload| payload.prev_randao()) } + /// Returns the execution block number of the block at the head of the chain. + /// + /// Returns an error if the chain is prior to Bellatrix. + pub fn head_block_number(&self) -> Result { + self.snapshot + .beacon_block + .message() + .execution_payload() + .map(|payload| payload.block_number()) + } + /// Returns the active validator count for the current epoch of the head state. /// /// Should only return `None` if the caches have not been built on the head state (this should @@ -274,19 +284,13 @@ impl CanonicalHead { // defensive programming. mut fork_choice_write_lock: RwLockWriteGuard>, reset_payload_statuses: ResetPayloadStatuses, - count_unrealized_full: CountUnrealizedFull, store: &BeaconStore, spec: &ChainSpec, log: &Logger, ) -> Result<(), Error> { - let fork_choice = >::load_fork_choice( - store.clone(), - reset_payload_statuses, - count_unrealized_full, - spec, - log, - )? - .ok_or(Error::MissingPersistedForkChoice)?; + let fork_choice = + >::load_fork_choice(store.clone(), reset_payload_statuses, spec, log)? + .ok_or(Error::MissingPersistedForkChoice)?; let fork_choice_view = fork_choice.cached_fork_choice_view(); let beacon_block_root = fork_choice_view.head_block_root; let beacon_block = store diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index 6e3538aeda5..9921435313d 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -1,10 +1,12 @@ -pub use proto_array::{CountUnrealizedFull, ReOrgThreshold}; +pub use proto_array::{DisallowedReOrgOffsets, ReOrgThreshold}; use serde_derive::{Deserialize, Serialize}; use std::time::Duration; use types::{Checkpoint, Epoch}; pub const DEFAULT_RE_ORG_THRESHOLD: ReOrgThreshold = ReOrgThreshold(20); pub const DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION: Epoch = Epoch::new(2); +/// Default to 1/12th of the slot, which is 1 second on mainnet. +pub const DEFAULT_RE_ORG_CUTOFF_DENOMINATOR: u32 = 12; pub const DEFAULT_FORK_CHOICE_BEFORE_PROPOSAL_TIMEOUT: u64 = 250; /// Default fraction of a slot lookahead for payload preparation (12/3 = 4 seconds on mainnet). @@ -34,6 +36,13 @@ pub struct ChainConfig { pub re_org_threshold: Option, /// Maximum number of epochs since finalization for attempting a proposer re-org. pub re_org_max_epochs_since_finalization: Epoch, + /// Maximum delay after the start of the slot at which to propose a reorging block. + pub re_org_cutoff_millis: Option, + /// Additional epoch offsets at which re-orging block proposals are not permitted. + /// + /// By default this list is empty, but it can be useful for reacting to network conditions, e.g. + /// slow gossip of re-org blocks at slot 1 in the epoch. + pub re_org_disallowed_offsets: DisallowedReOrgOffsets, /// Number of milliseconds to wait for fork choice before proposing a block. /// /// If set to 0 then block proposal will not wait for fork choice at all. @@ -48,16 +57,11 @@ pub struct ChainConfig { pub builder_fallback_epochs_since_finalization: usize, /// Whether any chain health checks should be considered when deciding whether to use the builder API. pub builder_fallback_disable_checks: bool, - /// When set to `true`, weigh the "unrealized" FFG progression when choosing a head in fork - /// choice. - pub count_unrealized: bool, /// When set to `true`, forget any valid/invalid/optimistic statuses in fork choice during start /// up. pub always_reset_payload_statuses: bool, /// Whether to apply paranoid checks to blocks proposed by this beacon node. pub paranoid_block_proposal: bool, - /// Whether to strictly count unrealized justified votes. - pub count_unrealized_full: CountUnrealizedFull, /// Optionally set timeout for calls to checkpoint sync endpoint. pub checkpoint_sync_url_timeout: u64, /// The offset before the start of a proposal slot at which payload attributes should be sent. @@ -67,10 +71,14 @@ pub struct ChainConfig { pub prepare_payload_lookahead: Duration, /// Use EL-free optimistic sync for the finalized part of the chain. pub optimistic_finalized_sync: bool, + /// The size of the shuffling cache, + pub shuffling_cache_size: usize, /// Whether to send payload attributes every slot, regardless of connected proposers. /// /// This is useful for block builders and testing. pub always_prepare_payload: bool, + /// Whether backfill sync processing should be rate-limited. + pub enable_backfill_rate_limiting: bool, } impl Default for ChainConfig { @@ -83,21 +91,34 @@ impl Default for ChainConfig { max_network_size: 10 * 1_048_576, // 10M re_org_threshold: Some(DEFAULT_RE_ORG_THRESHOLD), re_org_max_epochs_since_finalization: DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, + re_org_cutoff_millis: None, + re_org_disallowed_offsets: DisallowedReOrgOffsets::default(), fork_choice_before_proposal_timeout_ms: DEFAULT_FORK_CHOICE_BEFORE_PROPOSAL_TIMEOUT, // Builder fallback configs that are set in `clap` will override these. builder_fallback_skips: 3, builder_fallback_skips_per_epoch: 8, builder_fallback_epochs_since_finalization: 3, builder_fallback_disable_checks: false, - count_unrealized: true, always_reset_payload_statuses: false, paranoid_block_proposal: false, - count_unrealized_full: CountUnrealizedFull::default(), checkpoint_sync_url_timeout: 60, prepare_payload_lookahead: Duration::from_secs(4), // This value isn't actually read except in tests. optimistic_finalized_sync: true, + shuffling_cache_size: crate::shuffling_cache::DEFAULT_CACHE_SIZE, always_prepare_payload: false, + enable_backfill_rate_limiting: true, } } } + +impl ChainConfig { + /// The latest delay from the start of the slot at which to attempt a 1-slot re-org. + pub fn re_org_cutoff(&self, seconds_per_slot: u64) -> Duration { + self.re_org_cutoff_millis + .map(Duration::from_millis) + .unwrap_or_else(|| { + Duration::from_secs(seconds_per_slot) / DEFAULT_RE_ORG_CUTOFF_DENOMINATOR + }) + } +} diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 45609783426..e789b54a21b 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -1,4 +1,5 @@ use crate::attester_cache::Error as AttesterCacheError; +use crate::beacon_block_streamer::Error as BlockStreamerError; use crate::beacon_chain::ForkChoiceError; use crate::beacon_fork_choice_store::Error as ForkChoiceStoreError; use crate::eth1_chain::Error as Eth1ChainError; @@ -143,6 +144,7 @@ pub enum BeaconChainError { ExecutionLayerMissing, BlockVariantLacksExecutionPayload(Hash256), ExecutionLayerErrorPayloadReconstruction(ExecutionBlockHash, Box), + EngineGetCapabilititesFailed(Box), BlockHashMissingFromExecutionLayer(ExecutionBlockHash), InconsistentPayloadReconstructed { slot: Slot, @@ -150,6 +152,7 @@ pub enum BeaconChainError { canonical_transactions_root: Hash256, reconstructed_transactions_root: Hash256, }, + BlockStreamerError(BlockStreamerError), AddPayloadLogicError, ExecutionForkChoiceUpdateFailed(execution_layer::Error), PrepareProposerFailed(BlockProcessingError), diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index 25971bf85bd..8b6c6b37409 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -1,7 +1,7 @@ use crate::metrics; use eth1::{Config as Eth1Config, Eth1Block, Service as HttpService}; use eth2::lighthouse::Eth1SyncStatusData; -use eth2_hashing::hash; +use ethereum_hashing::hash; use int_to_bytes::int_to_bytes32; use slog::{debug, error, trace, Logger}; use ssz::{Decode, Encode}; @@ -88,7 +88,7 @@ fn get_sync_status( let period = T::SlotsPerEth1VotingPeriod::to_u64(); let voting_period_start_slot = (current_slot / period) * period; - let period_start = slot_start_seconds::( + let period_start = slot_start_seconds( genesis_time, spec.seconds_per_slot, voting_period_start_slot, @@ -470,7 +470,7 @@ impl Eth1ChainBackend for CachingEth1Backend { fn eth1_data(&self, state: &BeaconState, spec: &ChainSpec) -> Result { let period = T::SlotsPerEth1VotingPeriod::to_u64(); let voting_period_start_slot = (state.slot() / period) * period; - let voting_period_start_seconds = slot_start_seconds::( + let voting_period_start_seconds = slot_start_seconds( state.genesis_time(), spec.seconds_per_slot, voting_period_start_slot, @@ -658,11 +658,7 @@ fn find_winning_vote(valid_votes: Eth1DataVoteCount) -> Option { } /// Returns the unix-epoch seconds at the start of the given `slot`. -fn slot_start_seconds( - genesis_unix_seconds: u64, - seconds_per_slot: u64, - slot: Slot, -) -> u64 { +fn slot_start_seconds(genesis_unix_seconds: u64, seconds_per_slot: u64, slot: Slot) -> u64 { genesis_unix_seconds + slot.as_u64() * seconds_per_slot } @@ -698,7 +694,7 @@ mod test { fn get_voting_period_start_seconds(state: &BeaconState, spec: &ChainSpec) -> u64 { let period = ::SlotsPerEth1VotingPeriod::to_u64(); let voting_period_start_slot = (state.slot() / period) * period; - slot_start_seconds::( + slot_start_seconds( state.genesis_time(), spec.seconds_per_slot, voting_period_start_slot, @@ -708,23 +704,23 @@ mod test { #[test] fn slot_start_time() { let zero_sec = 0; - assert_eq!(slot_start_seconds::(100, zero_sec, Slot::new(2)), 100); + assert_eq!(slot_start_seconds(100, zero_sec, Slot::new(2)), 100); let one_sec = 1; - assert_eq!(slot_start_seconds::(100, one_sec, Slot::new(0)), 100); - assert_eq!(slot_start_seconds::(100, one_sec, Slot::new(1)), 101); - assert_eq!(slot_start_seconds::(100, one_sec, Slot::new(2)), 102); + assert_eq!(slot_start_seconds(100, one_sec, Slot::new(0)), 100); + assert_eq!(slot_start_seconds(100, one_sec, Slot::new(1)), 101); + assert_eq!(slot_start_seconds(100, one_sec, Slot::new(2)), 102); let three_sec = 3; - assert_eq!(slot_start_seconds::(100, three_sec, Slot::new(0)), 100); - assert_eq!(slot_start_seconds::(100, three_sec, Slot::new(1)), 103); - assert_eq!(slot_start_seconds::(100, three_sec, Slot::new(2)), 106); + assert_eq!(slot_start_seconds(100, three_sec, Slot::new(0)), 100); + assert_eq!(slot_start_seconds(100, three_sec, Slot::new(1)), 103); + assert_eq!(slot_start_seconds(100, three_sec, Slot::new(2)), 106); let five_sec = 5; - assert_eq!(slot_start_seconds::(100, five_sec, Slot::new(0)), 100); - assert_eq!(slot_start_seconds::(100, five_sec, Slot::new(1)), 105); - assert_eq!(slot_start_seconds::(100, five_sec, Slot::new(2)), 110); - assert_eq!(slot_start_seconds::(100, five_sec, Slot::new(3)), 115); + assert_eq!(slot_start_seconds(100, five_sec, Slot::new(0)), 100); + assert_eq!(slot_start_seconds(100, five_sec, Slot::new(1)), 105); + assert_eq!(slot_start_seconds(100, five_sec, Slot::new(2)), 110); + assert_eq!(slot_start_seconds(100, five_sec, Slot::new(3)), 115); } fn get_eth1_block(timestamp: u64, number: u64) -> Eth1Block { diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 5cc8ee2d28d..1ac7229cc6d 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -159,7 +159,7 @@ async fn notify_new_payload<'a, T: BeaconChainTypes>( latest_valid_hash, ref validation_error, } => { - debug!( + warn!( chain.log, "Invalid execution payload"; "validation_error" => ?validation_error, @@ -206,7 +206,7 @@ async fn notify_new_payload<'a, T: BeaconChainTypes>( PayloadStatus::InvalidBlockHash { ref validation_error, } => { - debug!( + warn!( chain.log, "Invalid execution payload block hash"; "validation_error" => ?validation_error, diff --git a/beacon_node/beacon_chain/src/fork_revert.rs b/beacon_node/beacon_chain/src/fork_revert.rs index 6d5b5ddc4ae..ef23248aba6 100644 --- a/beacon_node/beacon_chain/src/fork_revert.rs +++ b/beacon_node/beacon_chain/src/fork_revert.rs @@ -1,7 +1,6 @@ use crate::{BeaconForkChoiceStore, BeaconSnapshot}; use fork_choice::{CountUnrealized, ForkChoice, PayloadVerificationStatus}; use itertools::process_results; -use proto_array::CountUnrealizedFull; use slog::{info, warn, Logger}; use state_processing::state_advance::complete_state_advance; use state_processing::{ @@ -102,7 +101,6 @@ pub fn reset_fork_choice_to_finalization, Cold: It current_slot: Option, spec: &ChainSpec, count_unrealized_config: CountUnrealized, - count_unrealized_full_config: CountUnrealizedFull, ) -> Result, E>, String> { // Fetch finalized block. let finalized_checkpoint = head_state.finalized_checkpoint(); @@ -156,7 +154,6 @@ pub fn reset_fork_choice_to_finalization, Cold: It &finalized_snapshot.beacon_block, &finalized_snapshot.beacon_state, current_slot, - count_unrealized_full_config, spec, ) .map_err(|e| format!("Unable to reset fork choice for revert: {:?}", e))?; diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 173ce13b4a1..be1522a3b80 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -2,6 +2,7 @@ pub mod attestation_rewards; pub mod attestation_verification; mod attester_cache; pub mod beacon_block_reward; +mod beacon_block_streamer; mod beacon_chain; mod beacon_fork_choice_store; pub mod beacon_proposer_cache; @@ -31,7 +32,7 @@ pub mod migrate; mod naive_aggregation_pool; mod observed_aggregates; mod observed_attesters; -mod observed_block_producers; +pub mod observed_block_producers; pub mod observed_operations; pub mod otb_verification_service; mod persisted_beacon_chain; @@ -39,7 +40,7 @@ mod persisted_fork_choice; mod pre_finalization_cache; pub mod proposer_prep_service; pub mod schema_change; -mod shuffling_cache; +pub mod shuffling_cache; mod snapshot_cache; pub mod state_advance_timer; pub mod sync_committee_rewards; @@ -56,7 +57,7 @@ pub use self::beacon_chain::{ INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, MAXIMUM_GOSSIP_CLOCK_DISPARITY, }; pub use self::beacon_snapshot::BeaconSnapshot; -pub use self::chain_config::{ChainConfig, CountUnrealizedFull}; +pub use self::chain_config::ChainConfig; pub use self::errors::{BeaconChainError, BlockProductionError}; pub use self::historical_blocks::HistoricalBlockError; pub use attestation_verification::Error as AttestationError; diff --git a/beacon_node/beacon_chain/src/observed_operations.rs b/beacon_node/beacon_chain/src/observed_operations.rs index 6e53373939a..4121111b3ee 100644 --- a/beacon_node/beacon_chain/src/observed_operations.rs +++ b/beacon_node/beacon_chain/src/observed_operations.rs @@ -1,11 +1,11 @@ use derivative::Derivative; use smallvec::{smallvec, SmallVec}; use ssz::{Decode, Encode}; -use state_processing::{SigVerifiedOp, VerifyOperation}; +use state_processing::{SigVerifiedOp, VerifyOperation, VerifyOperationAt}; use std::collections::HashSet; use std::marker::PhantomData; use types::{ - AttesterSlashing, BeaconState, ChainSpec, EthSpec, ForkName, ProposerSlashing, + AttesterSlashing, BeaconState, ChainSpec, Epoch, EthSpec, ForkName, ProposerSlashing, SignedBlsToExecutionChange, SignedVoluntaryExit, Slot, }; @@ -87,12 +87,16 @@ impl ObservableOperation for SignedBlsToExecutionChange { } impl, E: EthSpec> ObservedOperations { - pub fn verify_and_observe( + pub fn verify_and_observe_parametric( &mut self, op: T, + validate: F, head_state: &BeaconState, spec: &ChainSpec, - ) -> Result, T::Error> { + ) -> Result, T::Error> + where + F: Fn(T) -> Result, T::Error>, + { self.reset_at_fork_boundary(head_state.slot(), spec); let observed_validator_indices = &mut self.observed_validator_indices; @@ -112,7 +116,7 @@ impl, E: EthSpec> ObservedOperations { } // Validate the op using operation-specific logic (`verify_attester_slashing`, etc). - let verified_op = op.validate(head_state, spec)?; + let verified_op = validate(op)?; // Add the relevant indices to the set of known indices to prevent processing of duplicates // in the future. @@ -121,6 +125,16 @@ impl, E: EthSpec> ObservedOperations { Ok(ObservationOutcome::New(verified_op)) } + pub fn verify_and_observe( + &mut self, + op: T, + head_state: &BeaconState, + spec: &ChainSpec, + ) -> Result, T::Error> { + let validate = |op: T| op.validate(head_state, spec); + self.verify_and_observe_parametric(op, validate, head_state, spec) + } + /// Reset the cache when crossing a fork boundary. /// /// This prevents an attacker from crafting a self-slashing which is only valid before the fork @@ -140,3 +154,16 @@ impl, E: EthSpec> ObservedOperations { } } } + +impl + VerifyOperationAt, E: EthSpec> ObservedOperations { + pub fn verify_and_observe_at( + &mut self, + op: T, + verify_at_epoch: Epoch, + head_state: &BeaconState, + spec: &ChainSpec, + ) -> Result, T::Error> { + let validate = |op: T| op.validate_at(head_state, verify_at_epoch, spec); + self.verify_and_observe_parametric(op, validate, head_state, spec) + } +} diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index 35202a3c5d3..5808e648a2c 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -3,6 +3,7 @@ mod migration_schema_v12; mod migration_schema_v13; mod migration_schema_v14; mod migration_schema_v15; +mod migration_schema_v16; use crate::beacon_chain::{BeaconChainTypes, ETH1_CACHE_DB_KEY}; use crate::eth1_chain::SszEth1; @@ -132,6 +133,14 @@ pub fn migrate_schema( let ops = migration_schema_v15::downgrade_from_v15::(db.clone(), log)?; db.store_schema_version_atomically(to, ops) } + (SchemaVersion(15), SchemaVersion(16)) => { + let ops = migration_schema_v16::upgrade_to_v16::(db.clone(), log)?; + db.store_schema_version_atomically(to, ops) + } + (SchemaVersion(16), SchemaVersion(15)) => { + let ops = migration_schema_v16::downgrade_from_v16::(db.clone(), log)?; + db.store_schema_version_atomically(to, ops) + } // Anything else is an error. (_, _) => Err(HotColdDBError::UnsupportedSchemaVersion { target_version: to, diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v16.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v16.rs new file mode 100644 index 00000000000..230573b0288 --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v16.rs @@ -0,0 +1,46 @@ +use crate::beacon_chain::{BeaconChainTypes, FORK_CHOICE_DB_KEY}; +use crate::persisted_fork_choice::PersistedForkChoiceV11; +use slog::{debug, Logger}; +use std::sync::Arc; +use store::{Error, HotColdDB, KeyValueStoreOp, StoreItem}; + +pub fn upgrade_to_v16( + db: Arc>, + log: Logger, +) -> Result, Error> { + drop_balances_cache::(db, log) +} + +pub fn downgrade_from_v16( + db: Arc>, + log: Logger, +) -> Result, Error> { + drop_balances_cache::(db, log) +} + +/// Drop the balances cache from the fork choice store. +/// +/// There aren't any type-level changes in this schema migration, however the +/// way that we compute the `JustifiedBalances` has changed due to: +/// https://github.com/sigp/lighthouse/pull/3962 +pub fn drop_balances_cache( + db: Arc>, + log: Logger, +) -> Result, Error> { + let mut persisted_fork_choice = db + .get_item::(&FORK_CHOICE_DB_KEY)? + .ok_or_else(|| Error::SchemaMigrationError("fork choice missing from database".into()))?; + + debug!( + log, + "Dropping fork choice balances cache"; + "item_count" => persisted_fork_choice.fork_choice_store.balances_cache.items.len() + ); + + // Drop all items in the balances cache. + persisted_fork_choice.fork_choice_store.balances_cache = <_>::default(); + + let kv_op = persisted_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY); + + Ok(vec![kv_op]) +} diff --git a/beacon_node/beacon_chain/src/shuffling_cache.rs b/beacon_node/beacon_chain/src/shuffling_cache.rs index a01847a0e13..91a1e24d82b 100644 --- a/beacon_node/beacon_chain/src/shuffling_cache.rs +++ b/beacon_node/beacon_chain/src/shuffling_cache.rs @@ -9,7 +9,7 @@ use types::{beacon_state::CommitteeCache, AttestationShufflingId, Epoch, Hash256 /// Each entry should be `8 + 800,000 = 800,008` bytes in size with 100k validators. (8-byte hash + /// 100k indices). Therefore, this cache should be approx `16 * 800,008 = 12.8 MB`. (Note: this /// ignores a few extra bytes in the caches that should be insignificant compared to the indices). -const CACHE_SIZE: usize = 16; +pub const DEFAULT_CACHE_SIZE: usize = 16; /// The maximum number of concurrent committee cache "promises" that can be issued. In effect, this /// limits the number of concurrent states that can be loaded into memory for the committee cache. @@ -54,9 +54,9 @@ pub struct ShufflingCache { } impl ShufflingCache { - pub fn new() -> Self { + pub fn new(cache_size: usize) -> Self { Self { - cache: LruCache::new(CACHE_SIZE), + cache: LruCache::new(cache_size), } } @@ -172,7 +172,7 @@ impl ToArcCommitteeCache for Arc { impl Default for ShufflingCache { fn default() -> Self { - Self::new() + Self::new(DEFAULT_CACHE_SIZE) } } @@ -249,7 +249,7 @@ mod test { fn resolved_promise() { let (committee_a, _) = committee_caches(); let id_a = shuffling_id(1); - let mut cache = ShufflingCache::new(); + let mut cache = ShufflingCache::default(); // Create a promise. let sender = cache.create_promise(id_a.clone()).unwrap(); @@ -276,7 +276,7 @@ mod test { #[test] fn unresolved_promise() { let id_a = shuffling_id(1); - let mut cache = ShufflingCache::new(); + let mut cache = ShufflingCache::default(); // Create a promise. let sender = cache.create_promise(id_a.clone()).unwrap(); @@ -301,7 +301,7 @@ mod test { fn two_promises() { let (committee_a, committee_b) = committee_caches(); let (id_a, id_b) = (shuffling_id(1), shuffling_id(2)); - let mut cache = ShufflingCache::new(); + let mut cache = ShufflingCache::default(); // Create promise A. let sender_a = cache.create_promise(id_a.clone()).unwrap(); @@ -355,7 +355,7 @@ mod test { #[test] fn too_many_promises() { - let mut cache = ShufflingCache::new(); + let mut cache = ShufflingCache::default(); for i in 0..MAX_CONCURRENT_PROMISES { cache.create_promise(shuffling_id(i as u64)).unwrap(); diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index afb31bba77a..3c5d1fd3b1a 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -457,7 +457,7 @@ where builder_threshold: Option, ) -> Self { // Get a random unused port - let port = unused_port::unused_tcp_port().unwrap(); + let port = unused_port::unused_tcp4_port().unwrap(); let builder_url = SensitiveUrl::parse(format!("http://127.0.0.1:{port}").as_str()).unwrap(); let spec = self.spec.clone().expect("cannot build without spec"); diff --git a/beacon_node/beacon_chain/src/validator_monitor.rs b/beacon_node/beacon_chain/src/validator_monitor.rs index de26810126a..396aac71b07 100644 --- a/beacon_node/beacon_chain/src/validator_monitor.rs +++ b/beacon_node/beacon_chain/src/validator_monitor.rs @@ -15,6 +15,7 @@ use std::io; use std::marker::PhantomData; use std::str::Utf8Error; use std::time::{Duration, SystemTime, UNIX_EPOCH}; +use store::AbstractExecPayload; use types::{ AttesterSlashing, BeaconBlockRef, BeaconState, ChainSpec, Epoch, EthSpec, Hash256, IndexedAttestation, ProposerSlashing, PublicKeyBytes, SignedAggregateAndProof, @@ -198,6 +199,7 @@ pub struct ValidatorMetrics { pub attestation_head_misses: u64, pub attestation_target_hits: u64, pub attestation_target_misses: u64, + pub latest_attestation_inclusion_distance: u64, } impl ValidatorMetrics { @@ -224,6 +226,10 @@ impl ValidatorMetrics { pub fn increment_head_misses(&mut self) { self.attestation_head_misses += 1; } + + pub fn set_latest_inclusion_distance(&mut self, distance: u64) { + self.latest_attestation_inclusion_distance = distance; + } } /// A validator that is being monitored by the `ValidatorMonitor`. @@ -567,7 +573,6 @@ impl ValidatorMonitor { } else { validator_metrics.increment_misses() } - drop(validator_metrics); // Indicates if any attestation made it on-chain. // @@ -692,8 +697,10 @@ impl ValidatorMonitor { &[id], inclusion_delay as i64, ); + validator_metrics.set_latest_inclusion_distance(inclusion_delay); } } + drop(validator_metrics); // Indicates the number of sync committee signatures that made it into // a sync aggregate in the current_epoch (state.epoch - 1). @@ -1736,9 +1743,9 @@ fn u64_to_i64(n: impl Into) -> i64 { } /// Returns the delay between the start of `block.slot` and `seen_timestamp`. -pub fn get_block_delay_ms( +pub fn get_block_delay_ms>( seen_timestamp: Duration, - block: BeaconBlockRef<'_, T>, + block: BeaconBlockRef<'_, T, P>, slot_clock: &S, ) -> Duration { get_slot_delay_ms::(seen_timestamp, block.slot(), slot_clock) diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index 384fcbe5db6..b4eabc8093f 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -500,7 +500,7 @@ async fn unaggregated_attestations_added_to_fork_choice_some_none() { // Move forward a slot so all queued attestations can be processed. harness.advance_slot(); fork_choice - .update_time(harness.chain.slot().unwrap(), &harness.chain.spec) + .update_time(harness.chain.slot().unwrap()) .unwrap(); let validator_slots: Vec<(usize, Slot)> = (0..VALIDATOR_COUNT) @@ -614,7 +614,7 @@ async fn unaggregated_attestations_added_to_fork_choice_all_updated() { // Move forward a slot so all queued attestations can be processed. harness.advance_slot(); fork_choice - .update_time(harness.chain.slot().unwrap(), &harness.chain.spec) + .update_time(harness.chain.slot().unwrap()) .unwrap(); let validators: Vec = (0..VALIDATOR_COUNT).collect(); diff --git a/beacon_node/builder_client/Cargo.toml b/beacon_node/builder_client/Cargo.toml index 48ac0300c98..b79fc5e4073 100644 --- a/beacon_node/builder_client/Cargo.toml +++ b/beacon_node/builder_client/Cargo.toml @@ -10,3 +10,4 @@ sensitive_url = { path = "../../common/sensitive_url" } eth2 = { path = "../../common/eth2" } serde = { version = "1.0.116", features = ["derive"] } serde_json = "1.0.58" +lighthouse_version = { path = "../../common/lighthouse_version" } diff --git a/beacon_node/builder_client/src/lib.rs b/beacon_node/builder_client/src/lib.rs index fecf6512ac8..255c2fdd19b 100644 --- a/beacon_node/builder_client/src/lib.rs +++ b/beacon_node/builder_client/src/lib.rs @@ -17,6 +17,9 @@ pub const DEFAULT_TIMEOUT_MILLIS: u64 = 15000; /// This timeout is in accordance with v0.2.0 of the [builder specs](https://github.com/flashbots/mev-boost/pull/20). pub const DEFAULT_GET_HEADER_TIMEOUT_MILLIS: u64 = 1000; +/// Default user agent for HTTP requests. +pub const DEFAULT_USER_AGENT: &str = lighthouse_version::VERSION; + #[derive(Clone)] pub struct Timeouts { get_header: Duration, @@ -41,23 +44,23 @@ pub struct BuilderHttpClient { client: reqwest::Client, server: SensitiveUrl, timeouts: Timeouts, + user_agent: String, } impl BuilderHttpClient { - pub fn new(server: SensitiveUrl) -> Result { + pub fn new(server: SensitiveUrl, user_agent: Option) -> Result { + let user_agent = user_agent.unwrap_or(DEFAULT_USER_AGENT.to_string()); + let client = reqwest::Client::builder().user_agent(&user_agent).build()?; Ok(Self { - client: reqwest::Client::new(), + client, server, timeouts: Timeouts::default(), + user_agent, }) } - pub fn new_with_timeouts(server: SensitiveUrl, timeouts: Timeouts) -> Result { - Ok(Self { - client: reqwest::Client::new(), - server, - timeouts, - }) + pub fn get_user_agent(&self) -> &str { + &self.user_agent } async fn get_with_timeout( diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index fde82cd7fba..64fa5b79d60 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -347,12 +347,6 @@ where while block.slot() % slots_per_epoch != 0 { block_slot = (block_slot / slots_per_epoch - 1) * slots_per_epoch; - debug!( - context.log(), - "Searching for aligned checkpoint block"; - "block_slot" => block_slot, - ); - debug!( context.log(), "Searching for aligned checkpoint block"; diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index b0184dc0ffc..584a0d736de 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -46,9 +46,18 @@ impl Client { self.http_metrics_listen_addr } - /// Returns the port of the client's libp2p stack, if it was started. - pub fn libp2p_listen_port(&self) -> Option { - self.network_globals.as_ref().map(|n| n.listen_port_tcp()) + /// Returns the ipv4 port of the client's libp2p stack, if it was started. + pub fn libp2p_listen_ipv4_port(&self) -> Option { + self.network_globals + .as_ref() + .and_then(|n| n.listen_port_tcp4()) + } + + /// Returns the ipv6 port of the client's libp2p stack, if it was started. + pub fn libp2p_listen_ipv6_port(&self) -> Option { + self.network_globals + .as_ref() + .and_then(|n| n.listen_port_tcp6()) } /// Returns the list of libp2p addresses the client is listening to. diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index fb8a9b6349b..1105bc41f67 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -460,7 +460,11 @@ async fn capella_readiness_logging( match beacon_chain.check_capella_readiness().await { CapellaReadiness::Ready => { - info!(log, "Ready for Capella") + info!( + log, + "Ready for Capella"; + "info" => "ensure the execution endpoint is updated to the latest Capella/Shanghai release" + ) } readiness @ CapellaReadiness::ExchangeCapabilitiesFailed { error: _ } => { error!( diff --git a/beacon_node/eth1/Cargo.toml b/beacon_node/eth1/Cargo.toml index e0dd797bfaf..1148f063d8d 100644 --- a/beacon_node/eth1/Cargo.toml +++ b/beacon_node/eth1/Cargo.toml @@ -20,9 +20,9 @@ serde = { version = "1.0.116", features = ["derive"] } hex = "0.4.2" types = { path = "../../consensus/types"} merkle_proof = { path = "../../consensus/merkle_proof"} -eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.1" -tree_hash = "0.4.1" +ethereum_ssz = "0.5.0" +ethereum_ssz_derive = "0.5.0" +tree_hash = "0.5.0" parking_lot = "0.12.0" slog = "2.5.2" superstruct = "0.5.0" diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 1b687a8b60e..3ed7ba65d6a 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -13,7 +13,7 @@ slog = "2.5.2" futures = "0.3.7" sensitive_url = { path = "../../common/sensitive_url" } reqwest = { version = "0.11.0", features = ["json","stream"] } -eth2_serde_utils = "0.1.1" +ethereum_serde_utils = "0.5.0" serde_json = "1.0.58" serde = { version = "1.0.116", features = ["derive"] } warp = { version = "0.3.2", features = ["tls"] } @@ -22,15 +22,15 @@ environment = { path = "../../lighthouse/environment" } bytes = "1.1.0" task_executor = { path = "../../common/task_executor" } hex = "0.4.2" -eth2_ssz = "0.4.1" -eth2_ssz_types = "0.2.2" +ethereum_ssz = "0.5.0" +ssz_types = "0.5.0" eth2 = { path = "../../common/eth2" } state_processing = { path = "../../consensus/state_processing" } superstruct = "0.6.0" lru = "0.7.1" exit-future = "0.2.0" -tree_hash = "0.4.1" -tree_hash_derive = { path = "../../consensus/tree_hash_derive"} +tree_hash = "0.5.0" +tree_hash_derive = "0.5.0" parking_lot = "0.12.0" slot_clock = { path = "../../common/slot_clock" } tempfile = "3.1.0" diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 38311b82302..4d2eb565e1c 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -1,7 +1,8 @@ use crate::engines::ForkchoiceState; use crate::http::{ ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1, ENGINE_FORKCHOICE_UPDATED_V1, - ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, + ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, + ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V1, ENGINE_NEW_PAYLOAD_V2, }; use eth2::types::{SsePayloadAttributes, SsePayloadAttributesV1, SsePayloadAttributesV2}; @@ -16,7 +17,8 @@ use strum::IntoStaticStr; use superstruct::superstruct; pub use types::{ Address, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, - ExecutionPayloadRef, FixedVector, ForkName, Hash256, Uint256, VariableList, Withdrawal, + ExecutionPayloadRef, FixedVector, ForkName, Hash256, Transactions, Uint256, VariableList, + Withdrawal, Withdrawals, }; use types::{ExecutionPayloadCapella, ExecutionPayloadMerge}; @@ -125,11 +127,11 @@ pub enum BlockByNumberQuery<'a> { pub struct ExecutionBlock { #[serde(rename = "hash")] pub block_hash: ExecutionBlockHash, - #[serde(rename = "number", with = "eth2_serde_utils::u64_hex_be")] + #[serde(rename = "number", with = "serde_utils::u64_hex_be")] pub block_number: u64, pub parent_hash: ExecutionBlockHash, pub total_difficulty: Uint256, - #[serde(with = "eth2_serde_utils::u64_hex_be")] + #[serde(with = "serde_utils::u64_hex_be")] pub timestamp: u64, } @@ -155,13 +157,13 @@ pub struct ExecutionBlockWithTransactions { pub logs_bloom: FixedVector, #[serde(alias = "mixHash")] pub prev_randao: Hash256, - #[serde(rename = "number", with = "eth2_serde_utils::u64_hex_be")] + #[serde(rename = "number", with = "serde_utils::u64_hex_be")] pub block_number: u64, - #[serde(with = "eth2_serde_utils::u64_hex_be")] + #[serde(with = "serde_utils::u64_hex_be")] pub gas_limit: u64, - #[serde(with = "eth2_serde_utils::u64_hex_be")] + #[serde(with = "serde_utils::u64_hex_be")] pub gas_used: u64, - #[serde(with = "eth2_serde_utils::u64_hex_be")] + #[serde(with = "serde_utils::u64_hex_be")] pub timestamp: u64, #[serde(with = "ssz_types::serde_utils::hex_var_list")] pub extra_data: VariableList, @@ -371,12 +373,80 @@ impl GetPayloadResponse { } } +#[derive(Clone, Debug)] +pub struct ExecutionPayloadBodyV1 { + pub transactions: Transactions, + pub withdrawals: Option>, +} + +impl ExecutionPayloadBodyV1 { + pub fn to_payload( + self, + header: ExecutionPayloadHeader, + ) -> Result, String> { + match header { + ExecutionPayloadHeader::Merge(header) => { + if self.withdrawals.is_some() { + return Err(format!( + "block {} is merge but payload body has withdrawals", + header.block_hash + )); + } + Ok(ExecutionPayload::Merge(ExecutionPayloadMerge { + parent_hash: header.parent_hash, + fee_recipient: header.fee_recipient, + state_root: header.state_root, + receipts_root: header.receipts_root, + logs_bloom: header.logs_bloom, + prev_randao: header.prev_randao, + block_number: header.block_number, + gas_limit: header.gas_limit, + gas_used: header.gas_used, + timestamp: header.timestamp, + extra_data: header.extra_data, + base_fee_per_gas: header.base_fee_per_gas, + block_hash: header.block_hash, + transactions: self.transactions, + })) + } + ExecutionPayloadHeader::Capella(header) => { + if let Some(withdrawals) = self.withdrawals { + Ok(ExecutionPayload::Capella(ExecutionPayloadCapella { + parent_hash: header.parent_hash, + fee_recipient: header.fee_recipient, + state_root: header.state_root, + receipts_root: header.receipts_root, + logs_bloom: header.logs_bloom, + prev_randao: header.prev_randao, + block_number: header.block_number, + gas_limit: header.gas_limit, + gas_used: header.gas_used, + timestamp: header.timestamp, + extra_data: header.extra_data, + base_fee_per_gas: header.base_fee_per_gas, + block_hash: header.block_hash, + transactions: self.transactions, + withdrawals, + })) + } else { + Err(format!( + "block {} is capella but payload body doesn't have withdrawals", + header.block_hash + )) + } + } + } + } +} + #[derive(Clone, Copy, Debug)] pub struct EngineCapabilities { pub new_payload_v1: bool, pub new_payload_v2: bool, pub forkchoice_updated_v1: bool, pub forkchoice_updated_v2: bool, + pub get_payload_bodies_by_hash_v1: bool, + pub get_payload_bodies_by_range_v1: bool, pub get_payload_v1: bool, pub get_payload_v2: bool, pub exchange_transition_configuration_v1: bool, @@ -397,6 +467,12 @@ impl EngineCapabilities { if self.forkchoice_updated_v2 { response.push(ENGINE_FORKCHOICE_UPDATED_V2); } + if self.get_payload_bodies_by_hash_v1 { + response.push(ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1); + } + if self.get_payload_bodies_by_range_v1 { + response.push(ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1); + } if self.get_payload_v1 { response.push(ENGINE_GET_PAYLOAD_V1); } diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 8492dbc4cee..029866d95b5 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -42,6 +42,10 @@ pub const ENGINE_FORKCHOICE_UPDATED_V1: &str = "engine_forkchoiceUpdatedV1"; pub const ENGINE_FORKCHOICE_UPDATED_V2: &str = "engine_forkchoiceUpdatedV2"; pub const ENGINE_FORKCHOICE_UPDATED_TIMEOUT: Duration = Duration::from_secs(8); +pub const ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1: &str = "engine_getPayloadBodiesByHashV1"; +pub const ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1: &str = "engine_getPayloadBodiesByRangeV1"; +pub const ENGINE_GET_PAYLOAD_BODIES_TIMEOUT: Duration = Duration::from_secs(10); + pub const ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1: &str = "engine_exchangeTransitionConfigurationV1"; pub const ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1_TIMEOUT: Duration = Duration::from_secs(1); @@ -62,6 +66,8 @@ pub static LIGHTHOUSE_CAPABILITIES: &[&str] = &[ ENGINE_GET_PAYLOAD_V2, ENGINE_FORKCHOICE_UPDATED_V1, ENGINE_FORKCHOICE_UPDATED_V2, + ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, + ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1, ]; @@ -73,6 +79,8 @@ pub static PRE_CAPELLA_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilit new_payload_v2: false, forkchoice_updated_v1: true, forkchoice_updated_v2: false, + get_payload_bodies_by_hash_v1: false, + get_payload_bodies_by_range_v1: false, get_payload_v1: true, get_payload_v2: false, exchange_transition_configuration_v1: true, @@ -882,6 +890,50 @@ impl HttpJsonRpc { Ok(response.into()) } + pub async fn get_payload_bodies_by_hash_v1( + &self, + block_hashes: Vec, + ) -> Result>>, Error> { + let params = json!([block_hashes]); + + let response: Vec>> = self + .rpc_request( + ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, + params, + ENGINE_GET_PAYLOAD_BODIES_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?; + + Ok(response + .into_iter() + .map(|opt_json| opt_json.map(From::from)) + .collect()) + } + + pub async fn get_payload_bodies_by_range_v1( + &self, + start: u64, + count: u64, + ) -> Result>>, Error> { + #[derive(Serialize)] + #[serde(transparent)] + struct Quantity(#[serde(with = "serde_utils::u64_hex_be")] u64); + + let params = json!([Quantity(start), Quantity(count)]); + let response: Vec>> = self + .rpc_request( + ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, + params, + ENGINE_GET_PAYLOAD_BODIES_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?; + + Ok(response + .into_iter() + .map(|opt_json| opt_json.map(From::from)) + .collect()) + } + pub async fn exchange_transition_configuration_v1( &self, transition_configuration: TransitionConfigurationV1, @@ -924,6 +976,10 @@ impl HttpJsonRpc { new_payload_v2: capabilities.contains(ENGINE_NEW_PAYLOAD_V2), forkchoice_updated_v1: capabilities.contains(ENGINE_FORKCHOICE_UPDATED_V1), forkchoice_updated_v2: capabilities.contains(ENGINE_FORKCHOICE_UPDATED_V2), + get_payload_bodies_by_hash_v1: capabilities + .contains(ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1), + get_payload_bodies_by_range_v1: capabilities + .contains(ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1), get_payload_v1: capabilities.contains(ENGINE_GET_PAYLOAD_V1), get_payload_v2: capabilities.contains(ENGINE_GET_PAYLOAD_V2), exchange_transition_configuration_v1: capabilities @@ -1131,7 +1187,7 @@ mod test { transactions, ..<_>::default() }); - let json = serde_json::to_value(&ep)?; + let json = serde_json::to_value(ep)?; Ok(json.get("transactions").unwrap().clone()) } diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index dcfa6354539..d85d294c836 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -3,7 +3,7 @@ use serde::{Deserialize, Serialize}; use strum::EnumString; use superstruct::superstruct; use types::{ - EthSpec, ExecutionBlockHash, FixedVector, Transaction, Unsigned, VariableList, Withdrawal, + EthSpec, ExecutionBlockHash, FixedVector, Transactions, Unsigned, VariableList, Withdrawal, }; use types::{ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadMerge}; @@ -35,7 +35,7 @@ pub struct JsonResponseBody { #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(transparent)] -pub struct TransparentJsonPayloadId(#[serde(with = "eth2_serde_utils::bytes_8_hex")] pub PayloadId); +pub struct TransparentJsonPayloadId(#[serde(with = "serde_utils::bytes_8_hex")] pub PayloadId); impl From for TransparentJsonPayloadId { fn from(id: PayloadId) -> Self { @@ -56,7 +56,7 @@ pub type JsonPayloadIdRequest = TransparentJsonPayloadId; #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct JsonPayloadIdResponse { - #[serde(with = "eth2_serde_utils::bytes_8_hex")] + #[serde(with = "serde_utils::bytes_8_hex")] pub payload_id: PayloadId, } @@ -79,22 +79,21 @@ pub struct JsonExecutionPayload { #[serde(with = "serde_logs_bloom")] pub logs_bloom: FixedVector, pub prev_randao: Hash256, - #[serde(with = "eth2_serde_utils::u64_hex_be")] + #[serde(with = "serde_utils::u64_hex_be")] pub block_number: u64, - #[serde(with = "eth2_serde_utils::u64_hex_be")] + #[serde(with = "serde_utils::u64_hex_be")] pub gas_limit: u64, - #[serde(with = "eth2_serde_utils::u64_hex_be")] + #[serde(with = "serde_utils::u64_hex_be")] pub gas_used: u64, - #[serde(with = "eth2_serde_utils::u64_hex_be")] + #[serde(with = "serde_utils::u64_hex_be")] pub timestamp: u64, #[serde(with = "ssz_types::serde_utils::hex_var_list")] pub extra_data: VariableList, - #[serde(with = "eth2_serde_utils::u256_hex_be")] + #[serde(with = "serde_utils::u256_hex_be")] pub base_fee_per_gas: Uint256, pub block_hash: ExecutionBlockHash, #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] - pub transactions: - VariableList, T::MaxTransactionsPerPayload>, + pub transactions: Transactions, #[superstruct(only(V2))] pub withdrawals: VariableList, } @@ -227,7 +226,7 @@ pub struct JsonGetPayloadResponse { pub execution_payload: JsonExecutionPayloadV1, #[superstruct(only(V2), partial_getter(rename = "execution_payload_v2"))] pub execution_payload: JsonExecutionPayloadV2, - #[serde(with = "eth2_serde_utils::u256_hex_be")] + #[serde(with = "serde_utils::u256_hex_be")] pub block_value: Uint256, } @@ -253,12 +252,12 @@ impl From> for GetPayloadResponse { #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct JsonWithdrawal { - #[serde(with = "eth2_serde_utils::u64_hex_be")] + #[serde(with = "serde_utils::u64_hex_be")] pub index: u64, - #[serde(with = "eth2_serde_utils::u64_hex_be")] + #[serde(with = "serde_utils::u64_hex_be")] pub validator_index: u64, pub address: Address, - #[serde(with = "eth2_serde_utils::u64_hex_be")] + #[serde(with = "serde_utils::u64_hex_be")] pub amount: u64, } @@ -296,7 +295,7 @@ impl From for Withdrawal { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(untagged)] pub struct JsonPayloadAttributes { - #[serde(with = "eth2_serde_utils::u64_hex_be")] + #[serde(with = "serde_utils::u64_hex_be")] pub timestamp: u64, pub prev_randao: Hash256, pub suggested_fee_recipient: Address, @@ -494,21 +493,45 @@ impl From for JsonForkchoiceUpdatedV1Response { } } +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(bound = "E: EthSpec")] +pub struct JsonExecutionPayloadBodyV1 { + #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] + pub transactions: Transactions, + pub withdrawals: Option>, +} + +impl From> for ExecutionPayloadBodyV1 { + fn from(value: JsonExecutionPayloadBodyV1) -> Self { + Self { + transactions: value.transactions, + withdrawals: value.withdrawals.map(|json_withdrawals| { + Withdrawals::::from( + json_withdrawals + .into_iter() + .map(Into::into) + .collect::>(), + ) + }), + } + } +} + #[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct TransitionConfigurationV1 { - #[serde(with = "eth2_serde_utils::u256_hex_be")] + #[serde(with = "serde_utils::u256_hex_be")] pub terminal_total_difficulty: Uint256, pub terminal_block_hash: ExecutionBlockHash, - #[serde(with = "eth2_serde_utils::u64_hex_be")] + #[serde(with = "serde_utils::u64_hex_be")] pub terminal_block_number: u64, } /// Serializes the `logs_bloom` field of an `ExecutionPayload`. pub mod serde_logs_bloom { use super::*; - use eth2_serde_utils::hex::PrefixedHexVisitor; use serde::{Deserializer, Serializer}; + use serde_utils::hex::PrefixedHexVisitor; pub fn serialize(bytes: &FixedVector, serializer: S) -> Result where diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index d12f9996d87..09be379d240 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -47,7 +47,7 @@ use types::{ mod block_hash; mod engine_api; -mod engines; +pub mod engines; mod keccak; mod metrics; pub mod payload_cache; @@ -77,7 +77,7 @@ const DEFAULT_SUGGESTED_FEE_RECIPIENT: [u8; 20] = const CONFIG_POLL_INTERVAL: Duration = Duration::from_secs(60); /// A payload alongside some information about where it came from. -enum ProvenancedPayload

{ +pub enum ProvenancedPayload

{ /// A good ol' fashioned farm-to-table payload from your local EE. Local(P), /// A payload from a builder (e.g. mev-boost). @@ -103,6 +103,8 @@ pub enum Error { transactions_root: Hash256, }, InvalidJWTSecret(String), + InvalidForkForPayload, + InvalidPayloadBody(String), BeaconStateError(BeaconStateError), } @@ -228,6 +230,8 @@ pub struct Config { pub execution_endpoints: Vec, /// Endpoint urls for services providing the builder api. pub builder_url: Option, + /// User agent to send with requests to the builder API. + pub builder_user_agent: Option, /// JWT secrets for the above endpoints running the engine api. pub secret_files: Vec, /// The default fee recipient to use on the beacon node if none if provided from @@ -258,6 +262,7 @@ impl ExecutionLayer { let Config { execution_endpoints: urls, builder_url, + builder_user_agent, secret_files, suggested_fee_recipient, jwt_id, @@ -293,6 +298,7 @@ impl ExecutionLayer { .map_err(Error::InvalidJWTSecret) } else { // Create a new file and write a randomly generated secret to it if file does not exist + warn!(log, "No JWT found on disk. Generating"; "path" => %secret_file.display()); std::fs::File::options() .write(true) .create_new(true) @@ -317,12 +323,17 @@ impl ExecutionLayer { let builder = builder_url .map(|url| { - let builder_client = BuilderHttpClient::new(url.clone()).map_err(Error::Builder); - info!(log, + let builder_client = BuilderHttpClient::new(url.clone(), builder_user_agent) + .map_err(Error::Builder)?; + + info!( + log, "Connected to external block builder"; "builder_url" => ?url, - "builder_profit_threshold" => builder_profit_threshold); - builder_client + "builder_profit_threshold" => builder_profit_threshold, + "local_user_agent" => builder_client.get_user_agent(), + ); + Ok::<_, Error>(builder_client) }) .transpose()?; @@ -1570,14 +1581,90 @@ impl ExecutionLayer { } } - pub async fn get_payload_by_block_hash( + pub async fn get_payload_bodies_by_hash( + &self, + hashes: Vec, + ) -> Result>>, Error> { + self.engine() + .request(|engine: &Engine| async move { + engine.api.get_payload_bodies_by_hash_v1(hashes).await + }) + .await + .map_err(Box::new) + .map_err(Error::EngineError) + } + + pub async fn get_payload_bodies_by_range( + &self, + start: u64, + count: u64, + ) -> Result>>, Error> { + let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_GET_PAYLOAD_BODIES_BY_RANGE); + self.engine() + .request(|engine: &Engine| async move { + engine + .api + .get_payload_bodies_by_range_v1(start, count) + .await + }) + .await + .map_err(Box::new) + .map_err(Error::EngineError) + } + + /// Fetch a full payload from the execution node. + /// + /// This will fail if the payload is not from the finalized portion of the chain. + pub async fn get_payload_for_header( + &self, + header: &ExecutionPayloadHeader, + fork: ForkName, + ) -> Result>, Error> { + let hash = header.block_hash(); + let block_number = header.block_number(); + + // Handle default payload body. + if header.block_hash() == ExecutionBlockHash::zero() { + let payload = match fork { + ForkName::Merge => ExecutionPayloadMerge::default().into(), + ForkName::Capella => ExecutionPayloadCapella::default().into(), + ForkName::Base | ForkName::Altair => { + return Err(Error::InvalidForkForPayload); + } + }; + return Ok(Some(payload)); + } + + // Use efficient payload bodies by range method if supported. + let capabilities = self.get_engine_capabilities(None).await?; + if capabilities.get_payload_bodies_by_range_v1 { + let mut payload_bodies = self.get_payload_bodies_by_range(block_number, 1).await?; + + if payload_bodies.len() != 1 { + return Ok(None); + } + + let opt_payload_body = payload_bodies.pop().flatten(); + opt_payload_body + .map(|body| { + body.to_payload(header.clone()) + .map_err(Error::InvalidPayloadBody) + }) + .transpose() + } else { + // Fall back to eth_blockByHash. + self.get_payload_by_hash_legacy(hash, fork).await + } + } + + pub async fn get_payload_by_hash_legacy( &self, hash: ExecutionBlockHash, fork: ForkName, ) -> Result>, Error> { self.engine() .request(|engine| async move { - self.get_payload_by_block_hash_from_engine(engine, hash, fork) + self.get_payload_by_hash_from_engine(engine, hash, fork) .await }) .await @@ -1585,7 +1672,7 @@ impl ExecutionLayer { .map_err(Error::EngineError) } - async fn get_payload_by_block_hash_from_engine( + async fn get_payload_by_hash_from_engine( &self, engine: &Engine, hash: ExecutionBlockHash, @@ -1598,7 +1685,7 @@ impl ExecutionLayer { ForkName::Merge => Ok(Some(ExecutionPayloadMerge::default().into())), ForkName::Capella => Ok(Some(ExecutionPayloadCapella::default().into())), ForkName::Base | ForkName::Altair => Err(ApiError::UnsupportedForkVariant( - format!("called get_payload_by_block_hash_from_engine with {}", fork), + format!("called get_payload_by_hash_from_engine with {}", fork), )), }; } diff --git a/beacon_node/execution_layer/src/metrics.rs b/beacon_node/execution_layer/src/metrics.rs index 287050f66be..3ed99ca6068 100644 --- a/beacon_node/execution_layer/src/metrics.rs +++ b/beacon_node/execution_layer/src/metrics.rs @@ -45,6 +45,10 @@ lazy_static::lazy_static! { "execution_layer_get_payload_by_block_hash_time", "Time to reconstruct a payload from the EE using eth_getBlockByHash" ); + pub static ref EXECUTION_LAYER_GET_PAYLOAD_BODIES_BY_RANGE: Result = try_create_histogram( + "execution_layer_get_payload_bodies_by_range_time", + "Time to fetch a range of payload bodies from the EE" + ); pub static ref EXECUTION_LAYER_VERIFY_BLOCK_HASH: Result = try_create_histogram_with_buckets( "execution_layer_verify_block_hash_time", "Time to verify the execution block hash in Lighthouse, without the EL", diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index c016a16a21a..a8d98a767fb 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -199,6 +199,14 @@ impl ExecutionBlockGenerator { .and_then(|block| block.as_execution_block_with_tx()) } + pub fn execution_block_with_txs_by_number( + &self, + number: u64, + ) -> Option> { + self.block_by_number(number) + .and_then(|block| block.as_execution_block_with_tx()) + } + pub fn move_to_block_prior_to_terminal_block(&mut self) -> Result<(), String> { let target_block = self .terminal_block_number diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 2a54dfae611..e3c58cfc270 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -2,7 +2,7 @@ use super::Context; use crate::engine_api::{http::*, *}; use crate::json_structures::*; use crate::test_utils::DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI; -use serde::de::DeserializeOwned; +use serde::{de::DeserializeOwned, Deserialize}; use serde_json::Value as JsonValue; use std::sync::Arc; use types::{EthSpec, ForkName}; @@ -359,6 +359,61 @@ pub async fn handle_rpc( let engine_capabilities = ctx.engine_capabilities.read(); Ok(serde_json::to_value(engine_capabilities.to_response()).unwrap()) } + ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1 => { + #[derive(Deserialize)] + #[serde(transparent)] + struct Quantity(#[serde(with = "serde_utils::u64_hex_be")] pub u64); + + let start = get_param::(params, 0) + .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))? + .0; + let count = get_param::(params, 1) + .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))? + .0; + + let mut response = vec![]; + for block_num in start..(start + count) { + let maybe_block = ctx + .execution_block_generator + .read() + .execution_block_with_txs_by_number(block_num); + + match maybe_block { + Some(block) => { + let transactions = Transactions::::new( + block + .transactions() + .iter() + .map(|transaction| VariableList::new(transaction.rlp().to_vec())) + .collect::>() + .map_err(|e| { + ( + format!("failed to deserialize transaction: {:?}", e), + GENERIC_ERROR_CODE, + ) + })?, + ) + .map_err(|e| { + ( + format!("failed to deserialize transactions: {:?}", e), + GENERIC_ERROR_CODE, + ) + })?; + + response.push(Some(JsonExecutionPayloadBodyV1:: { + transactions, + withdrawals: block + .withdrawals() + .ok() + .map(|withdrawals| VariableList::from(withdrawals.clone())), + })); + } + None => response.push(None), + } + } + + Ok(serde_json::to_value(response).unwrap()) + } other => Err(( format!("The method {} does not exist/is not available", other), METHOD_NOT_FOUND_CODE, diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index 36b24bfc39b..9379a3c2389 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -39,6 +39,8 @@ pub const DEFAULT_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities { new_payload_v2: true, forkchoice_updated_v1: true, forkchoice_updated_v2: true, + get_payload_bodies_by_hash_v1: true, + get_payload_bodies_by_range_v1: true, get_payload_v1: true, get_payload_v2: true, exchange_transition_configuration_v1: true, diff --git a/beacon_node/genesis/Cargo.toml b/beacon_node/genesis/Cargo.toml index 87c56d360b2..8a7d224963e 100644 --- a/beacon_node/genesis/Cargo.toml +++ b/beacon_node/genesis/Cargo.toml @@ -16,9 +16,9 @@ eth1 = { path = "../eth1"} rayon = "1.4.1" state_processing = { path = "../../consensus/state_processing" } merkle_proof = { path = "../../consensus/merkle_proof" } -eth2_ssz = "0.4.1" -eth2_hashing = "0.3.0" -tree_hash = "0.4.1" +ethereum_ssz = "0.5.0" +ethereum_hashing = "1.0.0-beta.2" +tree_hash = "0.5.0" tokio = { version = "1.14.0", features = ["full"] } slog = "2.5.2" int_to_bytes = { path = "../../consensus/int_to_bytes" } diff --git a/beacon_node/genesis/src/interop.rs b/beacon_node/genesis/src/interop.rs index 122ca8eda6b..d0129834300 100644 --- a/beacon_node/genesis/src/interop.rs +++ b/beacon_node/genesis/src/interop.rs @@ -1,5 +1,5 @@ use crate::common::genesis_deposits; -use eth2_hashing::hash; +use ethereum_hashing::hash; use rayon::prelude::*; use ssz::Encode; use state_processing::initialize_beacon_state_from_eth1; diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index e2c44afd360..2b117b26cef 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -24,7 +24,7 @@ lighthouse_metrics = { path = "../../common/lighthouse_metrics" } lazy_static = "1.4.0" warp_utils = { path = "../../common/warp_utils" } slot_clock = { path = "../../common/slot_clock" } -eth2_ssz = "0.4.1" +ethereum_ssz = "0.5.0" bs58 = "0.4.0" futures = "0.3.8" execution_layer = {path = "../execution_layer"} @@ -32,24 +32,23 @@ parking_lot = "0.12.0" safe_arith = {path = "../../consensus/safe_arith"} task_executor = { path = "../../common/task_executor" } lru = "0.7.7" -tree_hash = "0.4.1" +tree_hash = "0.5.0" sysinfo = "0.26.5" system_health = { path = "../../common/system_health" } directory = { path = "../../common/directory" } logging = { path = "../../common/logging" } -eth2_serde_utils = "0.1.1" +ethereum_serde_utils = "0.5.0" operation_pool = { path = "../operation_pool" } +sensitive_url = { path = "../../common/sensitive_url" } +unused_port = {path = "../../common/unused_port"} +store = { path = "../store" } [dev-dependencies] -store = { path = "../store" } environment = { path = "../../lighthouse/environment" } -sensitive_url = { path = "../../common/sensitive_url" } -logging = { path = "../../common/logging" } serde_json = "1.0.58" proto_array = { path = "../../consensus/proto_array" } -unused_port = {path = "../../common/unused_port"} genesis = { path = "../genesis" } [[test]] name = "bn_http_api_tests" -path = "tests/main.rs" +path = "tests/main.rs" \ No newline at end of file diff --git a/beacon_node/http_api/src/attestation_performance.rs b/beacon_node/http_api/src/attestation_performance.rs index ca68d4d04cc..3e7d8d5e316 100644 --- a/beacon_node/http_api/src/attestation_performance.rs +++ b/beacon_node/http_api/src/attestation_performance.rs @@ -77,8 +77,8 @@ pub fn get_attestation_performance( // query is within permitted bounds to prevent potential OOM errors. if (end_epoch - start_epoch).as_usize() > MAX_REQUEST_RANGE_EPOCHS { return Err(custom_bad_request(format!( - "end_epoch must not exceed start_epoch by more than 100 epochs. start: {}, end: {}", - query.start_epoch, query.end_epoch + "end_epoch must not exceed start_epoch by more than {} epochs. start: {}, end: {}", + MAX_REQUEST_RANGE_EPOCHS, query.start_epoch, query.end_epoch ))); } diff --git a/beacon_node/http_api/src/attester_duties.rs b/beacon_node/http_api/src/attester_duties.rs index 9febae5b197..5c3e420839d 100644 --- a/beacon_node/http_api/src/attester_duties.rs +++ b/beacon_node/http_api/src/attester_duties.rs @@ -114,8 +114,10 @@ fn compute_historic_attester_duties( )?; (state, execution_optimistic) } else { - StateId::from_slot(request_epoch.start_slot(T::EthSpec::slots_per_epoch())) - .state(chain)? + let (state, execution_optimistic, _finalized) = + StateId::from_slot(request_epoch.start_slot(T::EthSpec::slots_per_epoch())) + .state(chain)?; + (state, execution_optimistic) }; // Sanity-check the state lookup. diff --git a/beacon_node/http_api/src/block_id.rs b/beacon_node/http_api/src/block_id.rs index 5c785fe6517..f1a42b87442 100644 --- a/beacon_node/http_api/src/block_id.rs +++ b/beacon_node/http_api/src/block_id.rs @@ -4,13 +4,15 @@ use eth2::types::BlockId as CoreBlockId; use std::fmt; use std::str::FromStr; use std::sync::Arc; -use types::{Hash256, SignedBeaconBlock, SignedBlindedBeaconBlock, Slot}; +use types::{EthSpec, Hash256, SignedBeaconBlock, SignedBlindedBeaconBlock, Slot}; /// Wraps `eth2::types::BlockId` and provides a simple way to obtain a block or root for a given /// `BlockId`. #[derive(Debug)] pub struct BlockId(pub CoreBlockId); +type Finalized = bool; + impl BlockId { pub fn from_slot(slot: Slot) -> Self { Self(CoreBlockId::Slot(slot)) @@ -24,7 +26,7 @@ impl BlockId { pub fn root( &self, chain: &BeaconChain, - ) -> Result<(Hash256, ExecutionOptimistic), warp::Rejection> { + ) -> Result<(Hash256, ExecutionOptimistic, Finalized), warp::Rejection> { match &self.0 { CoreBlockId::Head => { let (cached_head, execution_status) = chain @@ -34,22 +36,23 @@ impl BlockId { Ok(( cached_head.head_block_root(), execution_status.is_optimistic_or_invalid(), + false, )) } - CoreBlockId::Genesis => Ok((chain.genesis_block_root, false)), + CoreBlockId::Genesis => Ok((chain.genesis_block_root, false, true)), CoreBlockId::Finalized => { let finalized_checkpoint = chain.canonical_head.cached_head().finalized_checkpoint(); let (_slot, execution_optimistic) = checkpoint_slot_and_execution_optimistic(chain, finalized_checkpoint)?; - Ok((finalized_checkpoint.root, execution_optimistic)) + Ok((finalized_checkpoint.root, execution_optimistic, true)) } CoreBlockId::Justified => { let justified_checkpoint = chain.canonical_head.cached_head().justified_checkpoint(); let (_slot, execution_optimistic) = checkpoint_slot_and_execution_optimistic(chain, justified_checkpoint)?; - Ok((justified_checkpoint.root, execution_optimistic)) + Ok((justified_checkpoint.root, execution_optimistic, false)) } CoreBlockId::Slot(slot) => { let execution_optimistic = chain @@ -66,7 +69,14 @@ impl BlockId { )) }) })?; - Ok((root, execution_optimistic)) + let finalized = *slot + <= chain + .canonical_head + .cached_head() + .finalized_checkpoint() + .epoch + .start_slot(T::EthSpec::slots_per_epoch()); + Ok((root, execution_optimistic, finalized)) } CoreBlockId::Root(root) => { // This matches the behaviour of other consensus clients (e.g. Teku). @@ -88,7 +98,20 @@ impl BlockId { .is_optimistic_or_invalid_block(root) .map_err(BeaconChainError::ForkChoiceError) .map_err(warp_utils::reject::beacon_chain_error)?; - Ok((*root, execution_optimistic)) + let blinded_block = chain + .get_blinded_block(root) + .map_err(warp_utils::reject::beacon_chain_error)? + .ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "beacon block with root {}", + root + )) + })?; + let block_slot = blinded_block.slot(); + let finalized = chain + .is_finalized_block(root, block_slot) + .map_err(warp_utils::reject::beacon_chain_error)?; + Ok((*root, execution_optimistic, finalized)) } else { Err(warp_utils::reject::custom_not_found(format!( "beacon block with root {}", @@ -103,7 +126,14 @@ impl BlockId { pub fn blinded_block( &self, chain: &BeaconChain, - ) -> Result<(SignedBlindedBeaconBlock, ExecutionOptimistic), warp::Rejection> { + ) -> Result< + ( + SignedBlindedBeaconBlock, + ExecutionOptimistic, + Finalized, + ), + warp::Rejection, + > { match &self.0 { CoreBlockId::Head => { let (cached_head, execution_status) = chain @@ -113,10 +143,11 @@ impl BlockId { Ok(( cached_head.snapshot.beacon_block.clone_as_blinded(), execution_status.is_optimistic_or_invalid(), + false, )) } CoreBlockId::Slot(slot) => { - let (root, execution_optimistic) = self.root(chain)?; + let (root, execution_optimistic, finalized) = self.root(chain)?; chain .get_blinded_block(&root) .map_err(warp_utils::reject::beacon_chain_error) @@ -128,7 +159,7 @@ impl BlockId { slot ))); } - Ok((block, execution_optimistic)) + Ok((block, execution_optimistic, finalized)) } None => Err(warp_utils::reject::custom_not_found(format!( "beacon block with root {}", @@ -137,7 +168,7 @@ impl BlockId { }) } _ => { - let (root, execution_optimistic) = self.root(chain)?; + let (root, execution_optimistic, finalized) = self.root(chain)?; let block = chain .get_blinded_block(&root) .map_err(warp_utils::reject::beacon_chain_error) @@ -149,7 +180,7 @@ impl BlockId { )) }) })?; - Ok((block, execution_optimistic)) + Ok((block, execution_optimistic, finalized)) } } } @@ -158,7 +189,14 @@ impl BlockId { pub async fn full_block( &self, chain: &BeaconChain, - ) -> Result<(Arc>, ExecutionOptimistic), warp::Rejection> { + ) -> Result< + ( + Arc>, + ExecutionOptimistic, + Finalized, + ), + warp::Rejection, + > { match &self.0 { CoreBlockId::Head => { let (cached_head, execution_status) = chain @@ -168,10 +206,11 @@ impl BlockId { Ok(( cached_head.snapshot.beacon_block.clone(), execution_status.is_optimistic_or_invalid(), + false, )) } CoreBlockId::Slot(slot) => { - let (root, execution_optimistic) = self.root(chain)?; + let (root, execution_optimistic, finalized) = self.root(chain)?; chain .get_block(&root) .await @@ -184,7 +223,7 @@ impl BlockId { slot ))); } - Ok((Arc::new(block), execution_optimistic)) + Ok((Arc::new(block), execution_optimistic, finalized)) } None => Err(warp_utils::reject::custom_not_found(format!( "beacon block with root {}", @@ -193,14 +232,14 @@ impl BlockId { }) } _ => { - let (root, execution_optimistic) = self.root(chain)?; + let (root, execution_optimistic, finalized) = self.root(chain)?; chain .get_block(&root) .await .map_err(warp_utils::reject::beacon_chain_error) .and_then(|block_opt| { block_opt - .map(|block| (Arc::new(block), execution_optimistic)) + .map(|block| (Arc::new(block), execution_optimistic, finalized)) .ok_or_else(|| { warp_utils::reject::custom_not_found(format!( "beacon block with root {}", diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 6ee6ffd81da..fa75668e9f0 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -18,6 +18,7 @@ mod standard_block_rewards; mod state_id; mod sync_committee_rewards; mod sync_committees; +pub mod test_utils; mod ui; mod validator_inclusion; mod version; @@ -30,7 +31,8 @@ use beacon_chain::{ pub use block_id::BlockId; use directory::DEFAULT_ROOT_DIR; use eth2::types::{ - self as api_types, EndpointVersion, SkipRandaoVerification, ValidatorId, ValidatorStatus, + self as api_types, EndpointVersion, ForkChoice, ForkChoiceNode, SkipRandaoVerification, + ValidatorId, ValidatorStatus, }; use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage}; use lighthouse_version::version_with_platform; @@ -38,6 +40,7 @@ use logging::SSELoggingComponents; use network::{NetworkMessage, NetworkSenders, ValidatorSubscriptionMessage}; use operation_pool::ReceivedPreCapella; use parking_lot::RwLock; +use publish_blocks::ProvenancedBlock; use serde::{Deserialize, Serialize}; use slog::{crit, debug, error, info, warn, Logger}; use slot_clock::SlotClock; @@ -54,15 +57,15 @@ use system_health::observe_system_health_bn; use tokio::sync::mpsc::{Sender, UnboundedSender}; use tokio_stream::{wrappers::BroadcastStream, StreamExt}; use types::{ - Attestation, AttestationData, AttesterSlashing, BeaconStateError, BlindedPayload, - CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, FullPayload, + Attestation, AttestationData, AttestationShufflingId, AttesterSlashing, BeaconStateError, + BlindedPayload, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, FullPayload, ProposerPreparationData, ProposerSlashing, RelativeEpoch, SignedAggregateAndProof, SignedBeaconBlock, SignedBlindedBeaconBlock, SignedBlsToExecutionChange, SignedContributionAndProof, SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, SyncCommitteeMessage, SyncContributionData, }; use version::{ - add_consensus_version_header, execution_optimistic_fork_versioned_response, + add_consensus_version_header, execution_optimistic_finalized_fork_versioned_response, fork_versioned_response, inconsistent_fork_rejection, unsupported_version_rejection, V1, V2, }; use warp::http::StatusCode; @@ -525,12 +528,13 @@ pub fn serve( .and(warp::path::end()) .and_then(|state_id: StateId, chain: Arc>| { blocking_json_task(move || { - let (root, execution_optimistic) = state_id.root(&chain)?; - + let (root, execution_optimistic, finalized) = state_id.root(&chain)?; Ok(root) .map(api_types::RootData::from) .map(api_types::GenericResponse::from) - .map(|resp| resp.add_execution_optimistic(execution_optimistic)) + .map(|resp| { + resp.add_execution_optimistic_finalized(execution_optimistic, finalized) + }) }) }); @@ -541,11 +545,12 @@ pub fn serve( .and(warp::path::end()) .and_then(|state_id: StateId, chain: Arc>| { blocking_json_task(move || { - let (fork, execution_optimistic) = - state_id.fork_and_execution_optimistic(&chain)?; - Ok(api_types::ExecutionOptimisticResponse { + let (fork, execution_optimistic, finalized) = + state_id.fork_and_execution_optimistic_and_finalized(&chain)?; + Ok(api_types::ExecutionOptimisticFinalizedResponse { data: fork, execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), }) }) }); @@ -557,23 +562,26 @@ pub fn serve( .and(warp::path::end()) .and_then(|state_id: StateId, chain: Arc>| { blocking_json_task(move || { - let (data, execution_optimistic) = state_id.map_state_and_execution_optimistic( - &chain, - |state, execution_optimistic| { - Ok(( - api_types::FinalityCheckpointsData { - previous_justified: state.previous_justified_checkpoint(), - current_justified: state.current_justified_checkpoint(), - finalized: state.finalized_checkpoint(), - }, - execution_optimistic, - )) - }, - )?; + let (data, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + Ok(( + api_types::FinalityCheckpointsData { + previous_justified: state.previous_justified_checkpoint(), + current_justified: state.current_justified_checkpoint(), + finalized: state.finalized_checkpoint(), + }, + execution_optimistic, + finalized, + )) + }, + )?; - Ok(api_types::ExecutionOptimisticResponse { + Ok(api_types::ExecutionOptimisticFinalizedResponse { data, execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), }) }) }); @@ -590,10 +598,10 @@ pub fn serve( query_res: Result| { blocking_json_task(move || { let query = query_res?; - let (data, execution_optimistic) = state_id - .map_state_and_execution_optimistic( + let (data, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( &chain, - |state, execution_optimistic| { + |state, execution_optimistic, finalized| { Ok(( state .validators() @@ -621,13 +629,15 @@ pub fn serve( }) .collect::>(), execution_optimistic, + finalized, )) }, )?; - Ok(api_types::ExecutionOptimisticResponse { + Ok(api_types::ExecutionOptimisticFinalizedResponse { data, execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), }) }) }, @@ -645,10 +655,10 @@ pub fn serve( query_res: Result| { blocking_json_task(move || { let query = query_res?; - let (data, execution_optimistic) = state_id - .map_state_and_execution_optimistic( + let (data, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( &chain, - |state, execution_optimistic| { + |state, execution_optimistic, finalized| { let epoch = state.current_epoch(); let far_future_epoch = chain.spec.far_future_epoch; @@ -698,13 +708,15 @@ pub fn serve( }) .collect::>(), execution_optimistic, + finalized, )) }, )?; - Ok(api_types::ExecutionOptimisticResponse { + Ok(api_types::ExecutionOptimisticFinalizedResponse { data, execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), }) }) }, @@ -723,10 +735,10 @@ pub fn serve( .and_then( |state_id: StateId, chain: Arc>, validator_id: ValidatorId| { blocking_json_task(move || { - let (data, execution_optimistic) = state_id - .map_state_and_execution_optimistic( + let (data, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( &chain, - |state, execution_optimistic| { + |state, execution_optimistic, finalized| { let index_opt = match &validator_id { ValidatorId::PublicKey(pubkey) => { state.validators().iter().position(|v| v.pubkey == *pubkey) @@ -760,13 +772,15 @@ pub fn serve( )) })?, execution_optimistic, + finalized, )) }, )?; - Ok(api_types::ExecutionOptimisticResponse { + Ok(api_types::ExecutionOptimisticFinalizedResponse { data, execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), }) }) }, @@ -781,46 +795,119 @@ pub fn serve( .and_then( |state_id: StateId, chain: Arc>, query: api_types::CommitteesQuery| { blocking_json_task(move || { - let (data, execution_optimistic) = state_id - .map_state_and_execution_optimistic( + let (data, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( &chain, - |state, execution_optimistic| { + |state, execution_optimistic, finalized| { let current_epoch = state.current_epoch(); let epoch = query.epoch.unwrap_or(current_epoch); - let committee_cache = - match RelativeEpoch::from_epoch(current_epoch, epoch) { - Ok(relative_epoch) - if state - .committee_cache_is_initialized(relative_epoch) => - { - state.committee_cache(relative_epoch).map(Cow::Borrowed) - } - _ => CommitteeCache::initialized(state, epoch, &chain.spec) + // Attempt to obtain the committee_cache from the beacon chain + let decision_slot = (epoch.saturating_sub(2u64)) + .end_slot(T::EthSpec::slots_per_epoch()); + // Find the decision block and skip to another method on any kind + // of failure + let shuffling_id = if let Ok(Some(shuffling_decision_block)) = + chain.block_root_at_slot(decision_slot, WhenSlotSkipped::Prev) + { + Some(AttestationShufflingId { + shuffling_epoch: epoch, + shuffling_decision_block, + }) + } else { + None + }; + + // Attempt to read from the chain cache if there exists a + // shuffling_id + let maybe_cached_shuffling = if let Some(shuffling_id) = + shuffling_id.as_ref() + { + chain + .shuffling_cache + .try_write_for(std::time::Duration::from_secs(1)) + .and_then(|mut cache_write| cache_write.get(shuffling_id)) + .and_then(|cache_item| cache_item.wait().ok()) + } else { + None + }; + + let committee_cache = if let Some(ref shuffling) = + maybe_cached_shuffling + { + Cow::Borrowed(&**shuffling) + } else { + let possibly_built_cache = + match RelativeEpoch::from_epoch(current_epoch, epoch) { + Ok(relative_epoch) + if state.committee_cache_is_initialized( + relative_epoch, + ) => + { + state + .committee_cache(relative_epoch) + .map(Cow::Borrowed) + } + _ => CommitteeCache::initialized( + state, + epoch, + &chain.spec, + ) .map(Cow::Owned), - } - .map_err(|e| match e { - BeaconStateError::EpochOutOfBounds => { - let max_sprp = - T::EthSpec::slots_per_historical_root() as u64; - let first_subsequent_restore_point_slot = ((epoch - .start_slot(T::EthSpec::slots_per_epoch()) - / max_sprp) - + 1) - * max_sprp; - if epoch < current_epoch { - warp_utils::reject::custom_bad_request(format!( - "epoch out of bounds, try state at slot {}", - first_subsequent_restore_point_slot, - )) - } else { - warp_utils::reject::custom_bad_request( - "epoch out of bounds, too far in future".into(), - ) + } + .map_err(|e| { + match e { + BeaconStateError::EpochOutOfBounds => { + let max_sprp = + T::EthSpec::slots_per_historical_root() + as u64; + let first_subsequent_restore_point_slot = + ((epoch.start_slot( + T::EthSpec::slots_per_epoch(), + ) / max_sprp) + + 1) + * max_sprp; + if epoch < current_epoch { + warp_utils::reject::custom_bad_request( + format!( + "epoch out of bounds, \ + try state at slot {}", + first_subsequent_restore_point_slot, + ), + ) + } else { + warp_utils::reject::custom_bad_request( + "epoch out of bounds, \ + too far in future" + .into(), + ) + } + } + _ => { + warp_utils::reject::beacon_chain_error(e.into()) + } + } + })?; + + // Attempt to write to the beacon cache (only if the cache + // size is not the default value). + if chain.config.shuffling_cache_size + != beacon_chain::shuffling_cache::DEFAULT_CACHE_SIZE + { + if let Some(shuffling_id) = shuffling_id { + if let Some(mut cache_write) = chain + .shuffling_cache + .try_write_for(std::time::Duration::from_secs(1)) + { + cache_write.insert_committee_cache( + shuffling_id, + &*possibly_built_cache, + ); } } - _ => warp_utils::reject::beacon_chain_error(e.into()), - })?; + } + possibly_built_cache + }; // Use either the supplied slot or all slots in the epoch. let slots = @@ -867,12 +954,13 @@ pub fn serve( } } - Ok((response, execution_optimistic)) + Ok((response, execution_optimistic, finalized)) }, )?; - Ok(api_types::ExecutionOptimisticResponse { + Ok(api_types::ExecutionOptimisticFinalizedResponse { data, execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), }) }) }, @@ -889,10 +977,10 @@ pub fn serve( chain: Arc>, query: api_types::SyncCommitteesQuery| { blocking_json_task(move || { - let (sync_committee, execution_optimistic) = state_id - .map_state_and_execution_optimistic( + let (sync_committee, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( &chain, - |state, execution_optimistic| { + |state, execution_optimistic, finalized| { let current_epoch = state.current_epoch(); let epoch = query.epoch.unwrap_or(current_epoch); Ok(( @@ -902,9 +990,10 @@ pub fn serve( .map_err(|e| match e { BeaconStateError::SyncCommitteeNotKnown { .. } => { warp_utils::reject::custom_bad_request(format!( - "state at epoch {} has no sync committee for epoch {}", - current_epoch, epoch - )) + "state at epoch {} has no \ + sync committee for epoch {}", + current_epoch, epoch + )) } BeaconStateError::IncorrectStateVariant => { warp_utils::reject::custom_bad_request(format!( @@ -915,6 +1004,7 @@ pub fn serve( e => warp_utils::reject::beacon_state_error(e), })?, execution_optimistic, + finalized, )) }, )?; @@ -936,7 +1026,7 @@ pub fn serve( }; Ok(api_types::GenericResponse::from(response) - .add_execution_optimistic(execution_optimistic)) + .add_execution_optimistic_finalized(execution_optimistic, finalized)) }) }, ); @@ -950,23 +1040,23 @@ pub fn serve( .and_then( |state_id: StateId, chain: Arc>, query: api_types::RandaoQuery| { blocking_json_task(move || { - let (randao, execution_optimistic) = state_id - .map_state_and_execution_optimistic( + let (randao, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( &chain, - |state, execution_optimistic| { + |state, execution_optimistic, finalized| { let epoch = query.epoch.unwrap_or_else(|| state.current_epoch()); let randao = *state.get_randao_mix(epoch).map_err(|e| { warp_utils::reject::custom_bad_request(format!( "epoch out of range: {e:?}" )) })?; - Ok((randao, execution_optimistic)) + Ok((randao, execution_optimistic, finalized)) }, )?; Ok( api_types::GenericResponse::from(api_types::RandaoMix { randao }) - .add_execution_optimistic(execution_optimistic), + .add_execution_optimistic_finalized(execution_optimistic, finalized), ) }) }, @@ -988,72 +1078,73 @@ pub fn serve( .and_then( |query: api_types::HeadersQuery, chain: Arc>| { blocking_json_task(move || { - let (root, block, execution_optimistic) = match (query.slot, query.parent_root) - { - // No query parameters, return the canonical head block. - (None, None) => { - let (cached_head, execution_status) = chain - .canonical_head - .head_and_execution_status() - .map_err(warp_utils::reject::beacon_chain_error)?; - ( - cached_head.head_block_root(), - cached_head.snapshot.beacon_block.clone_as_blinded(), - execution_status.is_optimistic_or_invalid(), - ) - } - // Only the parent root parameter, do a forwards-iterator lookup. - (None, Some(parent_root)) => { - let (parent, execution_optimistic) = - BlockId::from_root(parent_root).blinded_block(&chain)?; - let (root, _slot) = chain - .forwards_iter_block_roots(parent.slot()) - .map_err(warp_utils::reject::beacon_chain_error)? - // Ignore any skip-slots immediately following the parent. - .find(|res| { - res.as_ref().map_or(false, |(root, _)| *root != parent_root) - }) - .transpose() - .map_err(warp_utils::reject::beacon_chain_error)? - .ok_or_else(|| { - warp_utils::reject::custom_not_found(format!( - "child of block with root {}", - parent_root - )) - })?; + let (root, block, execution_optimistic, finalized) = + match (query.slot, query.parent_root) { + // No query parameters, return the canonical head block. + (None, None) => { + let (cached_head, execution_status) = chain + .canonical_head + .head_and_execution_status() + .map_err(warp_utils::reject::beacon_chain_error)?; + ( + cached_head.head_block_root(), + cached_head.snapshot.beacon_block.clone_as_blinded(), + execution_status.is_optimistic_or_invalid(), + false, + ) + } + // Only the parent root parameter, do a forwards-iterator lookup. + (None, Some(parent_root)) => { + let (parent, execution_optimistic, _parent_finalized) = + BlockId::from_root(parent_root).blinded_block(&chain)?; + let (root, _slot) = chain + .forwards_iter_block_roots(parent.slot()) + .map_err(warp_utils::reject::beacon_chain_error)? + // Ignore any skip-slots immediately following the parent. + .find(|res| { + res.as_ref().map_or(false, |(root, _)| *root != parent_root) + }) + .transpose() + .map_err(warp_utils::reject::beacon_chain_error)? + .ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "child of block with root {}", + parent_root + )) + })?; - BlockId::from_root(root) - .blinded_block(&chain) - // Ignore this `execution_optimistic` since the first value has - // more information about the original request. - .map(|(block, _execution_optimistic)| { - (root, block, execution_optimistic) - })? - } - // Slot is supplied, search by slot and optionally filter by - // parent root. - (Some(slot), parent_root_opt) => { - let (root, execution_optimistic) = - BlockId::from_slot(slot).root(&chain)?; - // Ignore the second `execution_optimistic`, the first one is the - // most relevant since it knows that we queried by slot. - let (block, _execution_optimistic) = - BlockId::from_root(root).blinded_block(&chain)?; - - // If the parent root was supplied, check that it matches the block - // obtained via a slot lookup. - if let Some(parent_root) = parent_root_opt { - if block.parent_root() != parent_root { - return Err(warp_utils::reject::custom_not_found(format!( - "no canonical block at slot {} with parent root {}", - slot, parent_root - ))); - } + BlockId::from_root(root) + .blinded_block(&chain) + // Ignore this `execution_optimistic` since the first value has + // more information about the original request. + .map(|(block, _execution_optimistic, finalized)| { + (root, block, execution_optimistic, finalized) + })? } + // Slot is supplied, search by slot and optionally filter by + // parent root. + (Some(slot), parent_root_opt) => { + let (root, execution_optimistic, finalized) = + BlockId::from_slot(slot).root(&chain)?; + // Ignore the second `execution_optimistic`, the first one is the + // most relevant since it knows that we queried by slot. + let (block, _execution_optimistic, _finalized) = + BlockId::from_root(root).blinded_block(&chain)?; + + // If the parent root was supplied, check that it matches the block + // obtained via a slot lookup. + if let Some(parent_root) = parent_root_opt { + if block.parent_root() != parent_root { + return Err(warp_utils::reject::custom_not_found(format!( + "no canonical block at slot {} with parent root {}", + slot, parent_root + ))); + } + } - (root, block, execution_optimistic) - } - }; + (root, block, execution_optimistic, finalized) + } + }; let data = api_types::BlockHeaderData { root, @@ -1065,7 +1156,7 @@ pub fn serve( }; Ok(api_types::GenericResponse::from(vec![data]) - .add_execution_optimistic(execution_optimistic)) + .add_execution_optimistic_finalized(execution_optimistic, finalized)) }) }, ); @@ -1083,10 +1174,10 @@ pub fn serve( .and(chain_filter.clone()) .and_then(|block_id: BlockId, chain: Arc>| { blocking_json_task(move || { - let (root, execution_optimistic) = block_id.root(&chain)?; + let (root, execution_optimistic, finalized) = block_id.root(&chain)?; // Ignore the second `execution_optimistic` since the first one has more // information about the original request. - let (block, _execution_optimistic) = + let (block, _execution_optimistic, _finalized) = BlockId::from_root(root).blinded_block(&chain)?; let canonical = chain @@ -1103,8 +1194,9 @@ pub fn serve( }, }; - Ok(api_types::ExecutionOptimisticResponse { + Ok(api_types::ExecutionOptimisticFinalizedResponse { execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), data, }) }) @@ -1128,9 +1220,15 @@ pub fn serve( chain: Arc>, network_tx: UnboundedSender>, log: Logger| async move { - publish_blocks::publish_block(None, block, chain, &network_tx, log) - .await - .map(|()| warp::reply().into_response()) + publish_blocks::publish_block( + None, + ProvenancedBlock::Local(block), + chain, + &network_tx, + log, + ) + .await + .map(|()| warp::reply().into_response()) }, ); @@ -1187,7 +1285,8 @@ pub fn serve( chain: Arc>, accept_header: Option| { async move { - let (block, execution_optimistic) = block_id.full_block(&chain).await?; + let (block, execution_optimistic, finalized) = + block_id.full_block(&chain).await?; let fork_name = block .fork_name(&chain.spec) .map_err(inconsistent_fork_rejection)?; @@ -1203,10 +1302,11 @@ pub fn serve( e )) }), - _ => execution_optimistic_fork_versioned_response( + _ => execution_optimistic_finalized_fork_versioned_response( endpoint_version, fork_name, execution_optimistic, + finalized, block, ) .map(|res| warp::reply::json(&res).into_response()), @@ -1223,12 +1323,11 @@ pub fn serve( .and(warp::path::end()) .and_then(|block_id: BlockId, chain: Arc>| { blocking_json_task(move || { - let (block, execution_optimistic) = block_id.blinded_block(&chain)?; - + let (block, execution_optimistic, finalized) = block_id.blinded_block(&chain)?; Ok(api_types::GenericResponse::from(api_types::RootData::from( block.canonical_root(), )) - .add_execution_optimistic(execution_optimistic)) + .add_execution_optimistic_finalized(execution_optimistic, finalized)) }) }); @@ -1239,11 +1338,10 @@ pub fn serve( .and(warp::path::end()) .and_then(|block_id: BlockId, chain: Arc>| { blocking_json_task(move || { - let (block, execution_optimistic) = block_id.blinded_block(&chain)?; - + let (block, execution_optimistic, finalized) = block_id.blinded_block(&chain)?; Ok( api_types::GenericResponse::from(block.message().body().attestations().clone()) - .add_execution_optimistic(execution_optimistic), + .add_execution_optimistic_finalized(execution_optimistic, finalized), ) }) }); @@ -1261,7 +1359,8 @@ pub fn serve( chain: Arc>, accept_header: Option| { blocking_response_task(move || { - let (block, execution_optimistic) = block_id.blinded_block(&chain)?; + let (block, execution_optimistic, finalized) = + block_id.blinded_block(&chain)?; let fork_name = block .fork_name(&chain.spec) .map_err(inconsistent_fork_rejection)?; @@ -1279,10 +1378,11 @@ pub fn serve( }), _ => { // Post as a V2 endpoint so we return the fork version. - execution_optimistic_fork_versioned_response( + execution_optimistic_finalized_fork_versioned_response( V2, fork_name, execution_optimistic, + finalized, block, ) .map(|res| warp::reply::json(&res).into_response()) @@ -1823,11 +1923,13 @@ pub fn serve( .and(warp::path::end()) .and_then(|chain: Arc>, block_id: BlockId| { blocking_json_task(move || { - let (rewards, execution_optimistic) = + let (rewards, execution_optimistic, finalized) = standard_block_rewards::compute_beacon_block_rewards(chain, block_id)?; Ok(rewards) .map(api_types::GenericResponse::from) - .map(|resp| resp.add_execution_optimistic(execution_optimistic)) + .map(|resp| { + resp.add_execution_optimistic_finalized(execution_optimistic, finalized) + }) }) }); @@ -1906,14 +2008,16 @@ pub fn serve( validators: Vec, log: Logger| { blocking_json_task(move || { - let (rewards, execution_optimistic) = + let (rewards, execution_optimistic, finalized) = sync_committee_rewards::compute_sync_committee_rewards( chain, block_id, validators, log, )?; Ok(rewards) .map(api_types::GenericResponse::from) - .map(|resp| resp.add_execution_optimistic(execution_optimistic)) + .map(|resp| { + resp.add_execution_optimistic_finalized(execution_optimistic, finalized) + }) }) }, ); @@ -1996,7 +2100,7 @@ pub fn serve( // We can ignore the optimistic status for the "fork" since it's a // specification constant that doesn't change across competing heads of the // beacon chain. - let (state, _execution_optimistic) = state_id.state(&chain)?; + let (state, _execution_optimistic, _finalized) = state_id.state(&chain)?; let fork_name = state .fork_name(&chain.spec) .map_err(inconsistent_fork_rejection)?; @@ -2014,16 +2118,17 @@ pub fn serve( )) }) } - _ => state_id.map_state_and_execution_optimistic( + _ => state_id.map_state_and_execution_optimistic_and_finalized( &chain, - |state, execution_optimistic| { + |state, execution_optimistic, finalized| { let fork_name = state .fork_name(&chain.spec) .map_err(inconsistent_fork_rejection)?; - let res = execution_optimistic_fork_versioned_response( + let res = execution_optimistic_finalized_fork_versioned_response( endpoint_version, fork_name, execution_optimistic, + finalized, &state, )?; Ok(add_consensus_version_header( @@ -2073,6 +2178,58 @@ pub fn serve( }, ); + // GET debug/fork_choice + let get_debug_fork_choice = eth_v1 + .and(warp::path("debug")) + .and(warp::path("fork_choice")) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and_then(|chain: Arc>| { + blocking_json_task(move || { + let beacon_fork_choice = chain.canonical_head.fork_choice_read_lock(); + + let proto_array = beacon_fork_choice.proto_array().core_proto_array(); + + let fork_choice_nodes = proto_array + .nodes + .iter() + .map(|node| { + let execution_status = if node.execution_status.is_execution_enabled() { + Some(node.execution_status.to_string()) + } else { + None + }; + + ForkChoiceNode { + slot: node.slot, + block_root: node.root, + parent_root: node + .parent + .and_then(|index| proto_array.nodes.get(index)) + .map(|parent| parent.root), + justified_epoch: node + .justified_checkpoint + .map(|checkpoint| checkpoint.epoch), + finalized_epoch: node + .finalized_checkpoint + .map(|checkpoint| checkpoint.epoch), + weight: node.weight, + validity: execution_status, + execution_block_hash: node + .execution_status + .block_hash() + .map(|block_hash| block_hash.into_root()), + } + }) + .collect::>(); + Ok(ForkChoice { + justified_checkpoint: proto_array.justified_checkpoint, + finalized_checkpoint: proto_array.finalized_checkpoint, + fork_choice_nodes, + }) + }) + }); + /* * node */ @@ -3355,7 +3512,7 @@ pub fn serve( .and_then(|state_id: StateId, chain: Arc>| { blocking_response_task(move || { // This debug endpoint provides no indication of optimistic status. - let (state, _execution_optimistic) = state_id.state(&chain)?; + let (state, _execution_optimistic, _finalized) = state_id.state(&chain)?; Response::builder() .status(200) .header("Content-Type", "application/ssz") @@ -3644,6 +3801,7 @@ pub fn serve( .uor(get_config_deposit_contract) .uor(get_debug_beacon_states) .uor(get_debug_beacon_heads) + .uor(get_debug_fork_choice) .uor(get_node_identity) .uor(get_node_version) .uor(get_node_syncing) diff --git a/beacon_node/http_api/src/metrics.rs b/beacon_node/http_api/src/metrics.rs index 1c3ab1f6804..26ee183c83f 100644 --- a/beacon_node/http_api/src/metrics.rs +++ b/beacon_node/http_api/src/metrics.rs @@ -29,9 +29,10 @@ lazy_static::lazy_static! { "http_api_beacon_proposer_cache_misses_total", "Count of times the proposer cache has been missed", ); - pub static ref HTTP_API_BLOCK_BROADCAST_DELAY_TIMES: Result = try_create_histogram( + pub static ref HTTP_API_BLOCK_BROADCAST_DELAY_TIMES: Result = try_create_histogram_vec( "http_api_block_broadcast_delay_times", - "Time between start of the slot and when the block was broadcast" + "Time between start of the slot and when the block was broadcast", + &["provenance"] ); pub static ref HTTP_API_BLOCK_PUBLISHED_LATE_TOTAL: Result = try_create_int_counter( "http_api_block_published_late_total", diff --git a/beacon_node/http_api/src/proposer_duties.rs b/beacon_node/http_api/src/proposer_duties.rs index 877d64e20f8..7e946b89e72 100644 --- a/beacon_node/http_api/src/proposer_duties.rs +++ b/beacon_node/http_api/src/proposer_duties.rs @@ -209,7 +209,9 @@ fn compute_historic_proposer_duties( .map_err(warp_utils::reject::beacon_chain_error)?; (state, execution_optimistic) } else { - StateId::from_slot(epoch.start_slot(T::EthSpec::slots_per_epoch())).state(chain)? + let (state, execution_optimistic, _finalized) = + StateId::from_slot(epoch.start_slot(T::EthSpec::slots_per_epoch())).state(chain)?; + (state, execution_optimistic) }; // Ensure the state lookup was correct. diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index 673ead1f211..1a5d5175bc2 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -3,28 +3,43 @@ use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now}; use beacon_chain::{ BeaconChain, BeaconChainTypes, BlockError, CountUnrealized, NotifyExecutionLayer, }; +use execution_layer::ProvenancedPayload; use lighthouse_network::PubsubMessage; use network::NetworkMessage; use slog::{debug, error, info, warn, Logger}; use slot_clock::SlotClock; use std::sync::Arc; +use std::time::Duration; use tokio::sync::mpsc::UnboundedSender; use tree_hash::TreeHash; use types::{ - AbstractExecPayload, BlindedPayload, EthSpec, ExecPayload, ExecutionBlockHash, FullPayload, - Hash256, SignedBeaconBlock, + AbstractExecPayload, BeaconBlockRef, BlindedPayload, EthSpec, ExecPayload, ExecutionBlockHash, + FullPayload, Hash256, SignedBeaconBlock, }; use warp::Rejection; +pub enum ProvenancedBlock { + /// The payload was built using a local EE. + Local(Arc>>), + /// The payload was build using a remote builder (e.g., via a mev-boost + /// compatible relay). + Builder(Arc>>), +} + /// Handles a request from the HTTP API for full blocks. pub async fn publish_block( block_root: Option, - block: Arc>, + provenanced_block: ProvenancedBlock, chain: Arc>, network_tx: &UnboundedSender>, log: Logger, ) -> Result<(), Rejection> { let seen_timestamp = timestamp_now(); + let (block, is_locally_built_block) = match provenanced_block { + ProvenancedBlock::Local(block) => (block, true), + ProvenancedBlock::Builder(block) => (block, false), + }; + let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock); debug!( log, @@ -38,10 +53,6 @@ pub async fn publish_block( let message = PubsubMessage::BeaconBlock(block.clone()); crate::publish_pubsub_message(network_tx, message)?; - // Determine the delay after the start of the slot, register it with metrics. - let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock); - metrics::observe_duration(&metrics::HTTP_API_BLOCK_BROADCAST_DELAY_TIMES, delay); - let block_root = block_root.unwrap_or_else(|| block.canonical_root()); match chain @@ -75,31 +86,11 @@ pub async fn publish_block( // head. chain.recompute_head_at_current_slot().await; - // Perform some logging to inform users if their blocks are being produced - // late. - // - // Check to see the thresholds are non-zero to avoid logging errors with small - // slot times (e.g., during testing) - let too_late_threshold = chain.slot_clock.unagg_attestation_production_delay(); - let delayed_threshold = too_late_threshold / 2; - if delay >= too_late_threshold { - error!( - log, - "Block was broadcast too late"; - "msg" => "system may be overloaded, block likely to be orphaned", - "delay_ms" => delay.as_millis(), - "slot" => block.slot(), - "root" => ?root, - ) - } else if delay >= delayed_threshold { - error!( - log, - "Block broadcast was delayed"; - "msg" => "system may be overloaded, block may be orphaned", - "delay_ms" => delay.as_millis(), - "slot" => block.slot(), - "root" => ?root, - ) + // Only perform late-block logging here if the block is local. For + // blocks built with builders we consider the broadcast time to be + // when the blinded block is published to the builder. + if is_locally_built_block { + late_block_logging(&chain, seen_timestamp, block.message(), root, "local", &log) } Ok(()) @@ -147,14 +138,7 @@ pub async fn publish_blinded_block( ) -> Result<(), Rejection> { let block_root = block.canonical_root(); let full_block = reconstruct_block(chain.clone(), block_root, block, log.clone()).await?; - publish_block::( - Some(block_root), - Arc::new(full_block), - chain, - network_tx, - log, - ) - .await + publish_block::(Some(block_root), full_block, chain, network_tx, log).await } /// Deconstruct the given blinded block, and construct a full block. This attempts to use the @@ -165,15 +149,15 @@ async fn reconstruct_block( block_root: Hash256, block: SignedBeaconBlock>, log: Logger, -) -> Result>, Rejection> { - let full_payload = if let Ok(payload_header) = block.message().body().execution_payload() { +) -> Result, Rejection> { + let full_payload_opt = if let Ok(payload_header) = block.message().body().execution_payload() { let el = chain.execution_layer.as_ref().ok_or_else(|| { warp_utils::reject::custom_server_error("Missing execution layer".to_string()) })?; // If the execution block hash is zero, use an empty payload. let full_payload = if payload_header.block_hash() == ExecutionBlockHash::zero() { - FullPayload::default_at_fork( + let payload = FullPayload::default_at_fork( chain .spec .fork_name_at_epoch(block.slot().epoch(T::EthSpec::slots_per_epoch())), @@ -183,15 +167,30 @@ async fn reconstruct_block( "Default payload construction error: {e:?}" )) })? - .into() - // If we already have an execution payload with this transactions root cached, use it. + .into(); + ProvenancedPayload::Local(payload) + // If we already have an execution payload with this transactions root cached, use it. } else if let Some(cached_payload) = el.get_payload_by_root(&payload_header.tree_hash_root()) { info!(log, "Reconstructing a full block using a local payload"; "block_hash" => ?cached_payload.block_hash()); - cached_payload - // Otherwise, this means we are attempting a blind block proposal. + ProvenancedPayload::Local(cached_payload) + // Otherwise, this means we are attempting a blind block proposal. } else { + // Perform the logging for late blocks when we publish to the + // builder, rather than when we publish to the network. This helps + // prevent false positive logs when the builder publishes to the P2P + // network significantly earlier than when they return the block to + // us. + late_block_logging( + &chain, + timestamp_now(), + block.message(), + block_root, + "builder", + &log, + ); + let full_payload = el .propose_blinded_beacon_block(block_root, &block) .await @@ -202,7 +201,7 @@ async fn reconstruct_block( )) })?; info!(log, "Successfully published a block to the builder network"; "block_hash" => ?full_payload.block_hash()); - full_payload + ProvenancedPayload::Builder(full_payload) }; Some(full_payload) @@ -210,7 +209,71 @@ async fn reconstruct_block( None }; - block.try_into_full_block(full_payload).ok_or_else(|| { + match full_payload_opt { + // A block without a payload is pre-merge and we consider it locally + // built. + None => block + .try_into_full_block(None) + .map(Arc::new) + .map(ProvenancedBlock::Local), + Some(ProvenancedPayload::Local(full_payload)) => block + .try_into_full_block(Some(full_payload)) + .map(Arc::new) + .map(ProvenancedBlock::Local), + Some(ProvenancedPayload::Builder(full_payload)) => block + .try_into_full_block(Some(full_payload)) + .map(Arc::new) + .map(ProvenancedBlock::Builder), + } + .ok_or_else(|| { warp_utils::reject::custom_server_error("Unable to add payload to block".to_string()) }) } + +/// If the `seen_timestamp` is some time after the start of the slot for +/// `block`, create some logs to indicate that the block was published late. +fn late_block_logging>( + chain: &BeaconChain, + seen_timestamp: Duration, + block: BeaconBlockRef, + root: Hash256, + provenance: &str, + log: &Logger, +) { + let delay = get_block_delay_ms(seen_timestamp, block, &chain.slot_clock); + + metrics::observe_timer_vec( + &metrics::HTTP_API_BLOCK_BROADCAST_DELAY_TIMES, + &[provenance], + delay, + ); + + // Perform some logging to inform users if their blocks are being produced + // late. + // + // Check to see the thresholds are non-zero to avoid logging errors with small + // slot times (e.g., during testing) + let too_late_threshold = chain.slot_clock.unagg_attestation_production_delay(); + let delayed_threshold = too_late_threshold / 2; + if delay >= too_late_threshold { + error!( + log, + "Block was broadcast too late"; + "msg" => "system may be overloaded, block likely to be orphaned", + "provenance" => provenance, + "delay_ms" => delay.as_millis(), + "slot" => block.slot(), + "root" => ?root, + ) + } else if delay >= delayed_threshold { + error!( + log, + "Block broadcast was delayed"; + "msg" => "system may be overloaded, block may be orphaned", + "provenance" => provenance, + "delay_ms" => delay.as_millis(), + "slot" => block.slot(), + "root" => ?root, + ) + } +} diff --git a/beacon_node/http_api/src/standard_block_rewards.rs b/beacon_node/http_api/src/standard_block_rewards.rs index b3c90d08a4d..de7e5eb7d3b 100644 --- a/beacon_node/http_api/src/standard_block_rewards.rs +++ b/beacon_node/http_api/src/standard_block_rewards.rs @@ -10,8 +10,8 @@ use warp_utils::reject::beacon_chain_error; pub fn compute_beacon_block_rewards( chain: Arc>, block_id: BlockId, -) -> Result<(StandardBlockReward, ExecutionOptimistic), warp::Rejection> { - let (block, execution_optimistic) = block_id.blinded_block(&chain)?; +) -> Result<(StandardBlockReward, ExecutionOptimistic, bool), warp::Rejection> { + let (block, execution_optimistic, finalized) = block_id.blinded_block(&chain)?; let block_ref = block.message(); @@ -23,5 +23,5 @@ pub fn compute_beacon_block_rewards( .compute_beacon_block_reward(block_ref, block_root, &mut state) .map_err(beacon_chain_error)?; - Ok((rewards, execution_optimistic)) + Ok((rewards, execution_optimistic, finalized)) } diff --git a/beacon_node/http_api/src/state_id.rs b/beacon_node/http_api/src/state_id.rs index 44354217bc4..9e4aadef17e 100644 --- a/beacon_node/http_api/src/state_id.rs +++ b/beacon_node/http_api/src/state_id.rs @@ -10,6 +10,9 @@ use types::{BeaconState, Checkpoint, EthSpec, Fork, Hash256, Slot}; #[derive(Debug)] pub struct StateId(pub CoreStateId); +// More clarity when returning if the state is finalized or not in the root function. +type Finalized = bool; + impl StateId { pub fn from_slot(slot: Slot) -> Self { Self(CoreStateId::Slot(slot)) @@ -19,8 +22,8 @@ impl StateId { pub fn root( &self, chain: &BeaconChain, - ) -> Result<(Hash256, ExecutionOptimistic), warp::Rejection> { - let (slot, execution_optimistic) = match &self.0 { + ) -> Result<(Hash256, ExecutionOptimistic, Finalized), warp::Rejection> { + let (slot, execution_optimistic, finalized) = match &self.0 { CoreStateId::Head => { let (cached_head, execution_status) = chain .canonical_head @@ -29,24 +32,36 @@ impl StateId { return Ok(( cached_head.head_state_root(), execution_status.is_optimistic_or_invalid(), + false, )); } - CoreStateId::Genesis => return Ok((chain.genesis_state_root, false)), + CoreStateId::Genesis => return Ok((chain.genesis_state_root, false, true)), CoreStateId::Finalized => { let finalized_checkpoint = chain.canonical_head.cached_head().finalized_checkpoint(); - checkpoint_slot_and_execution_optimistic(chain, finalized_checkpoint)? + let (slot, execution_optimistic) = + checkpoint_slot_and_execution_optimistic(chain, finalized_checkpoint)?; + (slot, execution_optimistic, true) } CoreStateId::Justified => { let justified_checkpoint = chain.canonical_head.cached_head().justified_checkpoint(); - checkpoint_slot_and_execution_optimistic(chain, justified_checkpoint)? + let (slot, execution_optimistic) = + checkpoint_slot_and_execution_optimistic(chain, justified_checkpoint)?; + (slot, execution_optimistic, false) } CoreStateId::Slot(slot) => ( *slot, chain .is_optimistic_or_invalid_head() .map_err(warp_utils::reject::beacon_chain_error)?, + *slot + <= chain + .canonical_head + .cached_head() + .finalized_checkpoint() + .epoch + .start_slot(T::EthSpec::slots_per_epoch()), ), CoreStateId::Root(root) => { if let Some(hot_summary) = chain @@ -61,7 +76,10 @@ impl StateId { .is_optimistic_or_invalid_block_no_fallback(&hot_summary.latest_block_root) .map_err(BeaconChainError::ForkChoiceError) .map_err(warp_utils::reject::beacon_chain_error)?; - return Ok((*root, execution_optimistic)); + let finalized = chain + .is_finalized_state(root, hot_summary.slot) + .map_err(warp_utils::reject::beacon_chain_error)?; + return Ok((*root, execution_optimistic, finalized)); } else if let Some(_cold_state_slot) = chain .store .load_cold_state_slot(root) @@ -77,7 +95,7 @@ impl StateId { .is_optimistic_or_invalid_block_no_fallback(&finalized_root) .map_err(BeaconChainError::ForkChoiceError) .map_err(warp_utils::reject::beacon_chain_error)?; - return Ok((*root, execution_optimistic)); + return Ok((*root, execution_optimistic, true)); } else { return Err(warp_utils::reject::custom_not_found(format!( "beacon state for state root {}", @@ -94,7 +112,7 @@ impl StateId { warp_utils::reject::custom_not_found(format!("beacon state at slot {}", slot)) })?; - Ok((root, execution_optimistic)) + Ok((root, execution_optimistic, finalized)) } /// Return the `fork` field of the state identified by `self`. @@ -103,9 +121,25 @@ impl StateId { &self, chain: &BeaconChain, ) -> Result<(Fork, bool), warp::Rejection> { - self.map_state_and_execution_optimistic(chain, |state, execution_optimistic| { - Ok((state.fork(), execution_optimistic)) - }) + self.map_state_and_execution_optimistic_and_finalized( + chain, + |state, execution_optimistic, _finalized| Ok((state.fork(), execution_optimistic)), + ) + } + + /// Return the `fork` field of the state identified by `self`. + /// Also returns the `execution_optimistic` value of the state. + /// Also returns the `finalized` value of the state. + pub fn fork_and_execution_optimistic_and_finalized( + &self, + chain: &BeaconChain, + ) -> Result<(Fork, bool, bool), warp::Rejection> { + self.map_state_and_execution_optimistic_and_finalized( + chain, + |state, execution_optimistic, finalized| { + Ok((state.fork(), execution_optimistic, finalized)) + }, + ) } /// Convenience function to compute `fork` when `execution_optimistic` isn't desired. @@ -121,8 +155,8 @@ impl StateId { pub fn state( &self, chain: &BeaconChain, - ) -> Result<(BeaconState, ExecutionOptimistic), warp::Rejection> { - let ((state_root, execution_optimistic), slot_opt) = match &self.0 { + ) -> Result<(BeaconState, ExecutionOptimistic, Finalized), warp::Rejection> { + let ((state_root, execution_optimistic, finalized), slot_opt) = match &self.0 { CoreStateId::Head => { let (cached_head, execution_status) = chain .canonical_head @@ -134,6 +168,7 @@ impl StateId { .beacon_state .clone_with_only_committee_caches(), execution_status.is_optimistic_or_invalid(), + false, )); } CoreStateId::Slot(slot) => (self.root(chain)?, Some(*slot)), @@ -152,24 +187,25 @@ impl StateId { }) })?; - Ok((state, execution_optimistic)) + Ok((state, execution_optimistic, finalized)) } /// Map a function across the `BeaconState` identified by `self`. /// - /// The optimistic status of the requested state is also provided to the `func` closure. + /// The optimistic and finalization status of the requested state is also provided to the `func` + /// closure. /// /// This function will avoid instantiating/copying a new state when `self` points to the head /// of the chain. - pub fn map_state_and_execution_optimistic( + pub fn map_state_and_execution_optimistic_and_finalized( &self, chain: &BeaconChain, func: F, ) -> Result where - F: Fn(&BeaconState, bool) -> Result, + F: Fn(&BeaconState, bool, bool) -> Result, { - let (state, execution_optimistic) = match &self.0 { + let (state, execution_optimistic, finalized) = match &self.0 { CoreStateId::Head => { let (head, execution_status) = chain .canonical_head @@ -178,12 +214,13 @@ impl StateId { return func( &head.snapshot.beacon_state, execution_status.is_optimistic_or_invalid(), + false, ); } _ => self.state(chain)?, }; - func(&state, execution_optimistic) + func(&state, execution_optimistic, finalized) } } diff --git a/beacon_node/http_api/src/sync_committee_rewards.rs b/beacon_node/http_api/src/sync_committee_rewards.rs index cefa98db415..68a06b1ce8c 100644 --- a/beacon_node/http_api/src/sync_committee_rewards.rs +++ b/beacon_node/http_api/src/sync_committee_rewards.rs @@ -13,8 +13,8 @@ pub fn compute_sync_committee_rewards( block_id: BlockId, validators: Vec, log: Logger, -) -> Result<(Option>, ExecutionOptimistic), warp::Rejection> { - let (block, execution_optimistic) = block_id.blinded_block(&chain)?; +) -> Result<(Option>, ExecutionOptimistic, bool), warp::Rejection> { + let (block, execution_optimistic, finalized) = block_id.blinded_block(&chain)?; let mut state = get_state_before_applying_block(chain.clone(), &block)?; @@ -44,7 +44,7 @@ pub fn compute_sync_committee_rewards( ) }; - Ok((data, execution_optimistic)) + Ok((data, execution_optimistic, finalized)) } pub fn get_state_before_applying_block( diff --git a/beacon_node/http_api/tests/common.rs b/beacon_node/http_api/src/test_utils.rs similarity index 95% rename from beacon_node/http_api/tests/common.rs rename to beacon_node/http_api/src/test_utils.rs index b12583ae02c..9880a8ca617 100644 --- a/beacon_node/http_api/tests/common.rs +++ b/beacon_node/http_api/src/test_utils.rs @@ -1,3 +1,4 @@ +use crate::{Config, Context}; use beacon_chain::{ test_utils::{ BeaconChainHarness, BoxedMutator, Builder as HarnessBuilder, EphemeralHarnessType, @@ -6,7 +7,6 @@ use beacon_chain::{ }; use directory::DEFAULT_ROOT_DIR; use eth2::{BeaconNodeHttpClient, Timeouts}; -use http_api::{Config, Context}; use lighthouse_network::{ discv5::enr::{CombinedKey, EnrBuilder}, libp2p::{ @@ -130,7 +130,7 @@ pub async fn create_api_server( log: Logger, ) -> ApiServer> { // Get a random unused port. - let port = unused_port::unused_tcp_port().unwrap(); + let port = unused_port::unused_tcp4_port().unwrap(); create_api_server_on_port(chain, log, port).await } @@ -151,10 +151,11 @@ pub async fn create_api_server_on_port( let enr = EnrBuilder::new("v4").build(&enr_key).unwrap(); let network_globals = Arc::new(NetworkGlobals::new( enr.clone(), - TCP_PORT, - UDP_PORT, + Some(TCP_PORT), + None, meta_data, vec![], + false, &log, )); @@ -182,7 +183,7 @@ pub async fn create_api_server_on_port( let eth1_service = eth1::Service::new(eth1::Config::default(), log.clone(), chain.spec.clone()).unwrap(); - let context = Arc::new(Context { + let ctx = Arc::new(Context { config: Config { enabled: true, listen_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), @@ -193,20 +194,20 @@ pub async fn create_api_server_on_port( data_dir: std::path::PathBuf::from(DEFAULT_ROOT_DIR), spec_fork_name: None, }, - chain: Some(chain.clone()), + chain: Some(chain), network_senders: Some(network_senders), network_globals: Some(network_globals), eth1_service: Some(eth1_service), sse_logging_components: None, log, }); - let ctx = context.clone(); + let (shutdown_tx, shutdown_rx) = oneshot::channel(); let server_shutdown = async { // It's not really interesting why this triggered, just that it happened. let _ = shutdown_rx.await; }; - let (listening_socket, server) = http_api::serve(ctx, server_shutdown).unwrap(); + let (listening_socket, server) = crate::serve(ctx, server_shutdown).unwrap(); ApiServer { server, diff --git a/beacon_node/http_api/src/ui.rs b/beacon_node/http_api/src/ui.rs index e8280a796a3..616745dbefe 100644 --- a/beacon_node/http_api/src/ui.rs +++ b/beacon_node/http_api/src/ui.rs @@ -75,15 +75,15 @@ pub fn get_validator_count( #[derive(PartialEq, Serialize, Deserialize)] pub struct ValidatorInfoRequestData { - #[serde(with = "eth2_serde_utils::quoted_u64_vec")] + #[serde(with = "serde_utils::quoted_u64_vec")] indices: Vec, } #[derive(PartialEq, Serialize, Deserialize)] pub struct ValidatorInfoValues { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] epoch: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] total_balance: u64, } @@ -165,6 +165,7 @@ pub struct ValidatorMetrics { attestation_target_hits: u64, attestation_target_misses: u64, attestation_target_hit_percentage: f64, + latest_attestation_inclusion_distance: u64, } #[derive(PartialEq, Serialize, Deserialize)] @@ -210,6 +211,8 @@ pub fn post_validator_monitor_metrics( let attestation_head_misses = val_metrics.attestation_head_misses; let attestation_target_hits = val_metrics.attestation_target_hits; let attestation_target_misses = val_metrics.attestation_target_misses; + let latest_attestation_inclusion_distance = + val_metrics.latest_attestation_inclusion_distance; drop(val_metrics); let attestations = attestation_hits + attestation_misses; @@ -242,6 +245,7 @@ pub fn post_validator_monitor_metrics( attestation_target_hits, attestation_target_misses, attestation_target_hit_percentage, + latest_attestation_inclusion_distance, }; validators.insert(id.clone(), metrics); diff --git a/beacon_node/http_api/src/validator_inclusion.rs b/beacon_node/http_api/src/validator_inclusion.rs index 917e85e6493..f22ced1e693 100644 --- a/beacon_node/http_api/src/validator_inclusion.rs +++ b/beacon_node/http_api/src/validator_inclusion.rs @@ -18,7 +18,7 @@ fn end_of_epoch_state( let target_slot = epoch.end_slot(T::EthSpec::slots_per_epoch()); // The execution status is not returned, any functions which rely upon this method might return // optimistic information without explicitly declaring so. - let (state, _execution_status) = StateId::from_slot(target_slot).state(chain)?; + let (state, _execution_status, _finalized) = StateId::from_slot(target_slot).state(chain)?; Ok(state) } diff --git a/beacon_node/http_api/src/version.rs b/beacon_node/http_api/src/version.rs index e7fd8910b18..e01ff982201 100644 --- a/beacon_node/http_api/src/version.rs +++ b/beacon_node/http_api/src/version.rs @@ -1,9 +1,8 @@ +use crate::api_types::fork_versioned_response::ExecutionOptimisticFinalizedForkVersionedResponse; use crate::api_types::EndpointVersion; use eth2::CONSENSUS_VERSION_HEADER; use serde::Serialize; -use types::{ - ExecutionOptimisticForkVersionedResponse, ForkName, ForkVersionedResponse, InconsistentFork, -}; +use types::{ForkName, ForkVersionedResponse, InconsistentFork}; use warp::reply::{self, Reply, Response}; pub const V1: EndpointVersion = EndpointVersion(1); @@ -27,12 +26,13 @@ pub fn fork_versioned_response( }) } -pub fn execution_optimistic_fork_versioned_response( +pub fn execution_optimistic_finalized_fork_versioned_response( endpoint_version: EndpointVersion, fork_name: ForkName, execution_optimistic: bool, + finalized: bool, data: T, -) -> Result, warp::reject::Rejection> { +) -> Result, warp::reject::Rejection> { let fork_name = if endpoint_version == V1 { None } else if endpoint_version == V2 { @@ -40,9 +40,10 @@ pub fn execution_optimistic_fork_versioned_response( } else { return Err(unsupported_version_rejection(endpoint_version)); }; - Ok(ExecutionOptimisticForkVersionedResponse { + Ok(ExecutionOptimisticFinalizedForkVersionedResponse { version: fork_name, execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), data, }) } diff --git a/beacon_node/http_api/tests/fork_tests.rs b/beacon_node/http_api/tests/fork_tests.rs index 6144123565e..8a3ba887b39 100644 --- a/beacon_node/http_api/tests/fork_tests.rs +++ b/beacon_node/http_api/tests/fork_tests.rs @@ -1,11 +1,11 @@ //! Tests for API behaviour across fork boundaries. -use crate::common::*; use beacon_chain::{ test_utils::{RelativeSyncCommittee, DEFAULT_ETH1_BLOCK_HASH, HARNESS_GENESIS_TIME}, StateSkipConfig, }; use eth2::types::{IndexedErrorMessage, StateId, SyncSubcommittee}; use genesis::{bls_withdrawal_credentials, interop_genesis_state_with_withdrawal_credentials}; +use http_api::test_utils::*; use std::collections::HashSet; use types::{ test_utils::{generate_deterministic_keypair, generate_deterministic_keypairs}, diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index 7db1b22d67e..da92419744e 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -1,11 +1,11 @@ //! Generic tests that make use of the (newer) `InteractiveApiTester` -use crate::common::*; use beacon_chain::{ - chain_config::ReOrgThreshold, + chain_config::{DisallowedReOrgOffsets, ReOrgThreshold}, test_utils::{AttestationStrategy, BlockStrategy, SyncCommitteeStrategy}, }; use eth2::types::DepositContractData; use execution_layer::{ForkchoiceState, PayloadAttributes}; +use http_api::test_utils::InteractiveTester; use parking_lot::Mutex; use slot_clock::SlotClock; use state_processing::{ @@ -110,6 +110,8 @@ pub struct ReOrgTest { misprediction: bool, /// Whether to expect withdrawals to change on epoch boundaries. expect_withdrawals_change_on_epoch: bool, + /// Epoch offsets to avoid proposing reorg blocks at. + disallowed_offsets: Vec, } impl Default for ReOrgTest { @@ -127,6 +129,7 @@ impl Default for ReOrgTest { should_re_org: true, misprediction: false, expect_withdrawals_change_on_epoch: false, + disallowed_offsets: vec![], } } } @@ -238,6 +241,32 @@ pub async fn proposer_boost_re_org_head_distance() { .await; } +// Check that a re-org at a disallowed offset fails. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn proposer_boost_re_org_disallowed_offset() { + let offset = 4; + proposer_boost_re_org_test(ReOrgTest { + head_slot: Slot::new(E::slots_per_epoch() + offset - 1), + disallowed_offsets: vec![offset], + should_re_org: false, + ..Default::default() + }) + .await; +} + +// Check that a re-org at the *only* allowed offset succeeds. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn proposer_boost_re_org_disallowed_offset_exact() { + let offset = 4; + let disallowed_offsets = (0..E::slots_per_epoch()).filter(|o| *o != offset).collect(); + proposer_boost_re_org_test(ReOrgTest { + head_slot: Slot::new(E::slots_per_epoch() + offset - 1), + disallowed_offsets, + ..Default::default() + }) + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] pub async fn proposer_boost_re_org_very_unhealthy() { proposer_boost_re_org_test(ReOrgTest { @@ -286,6 +315,7 @@ pub async fn proposer_boost_re_org_test( should_re_org, misprediction, expect_withdrawals_change_on_epoch, + disallowed_offsets, }: ReOrgTest, ) { assert!(head_slot > 0); @@ -320,6 +350,9 @@ pub async fn proposer_boost_re_org_test( .proposer_re_org_max_epochs_since_finalization(Epoch::new( max_epochs_since_finalization, )) + .proposer_re_org_disallowed_offsets( + DisallowedReOrgOffsets::new::(disallowed_offsets).unwrap(), + ) })), ) .await; diff --git a/beacon_node/http_api/tests/main.rs b/beacon_node/http_api/tests/main.rs index 88e0032ecde..342b72cc7de 100644 --- a/beacon_node/http_api/tests/main.rs +++ b/beacon_node/http_api/tests/main.rs @@ -1,6 +1,5 @@ #![cfg(not(debug_assertions))] // Tests are too slow in debug. -pub mod common; pub mod fork_tests; pub mod interactive_tests; pub mod tests; diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 6424d73eb5d..a54f17e96f6 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -1,4 +1,3 @@ -use crate::common::{create_api_server, create_api_server_on_port, ApiServer}; use beacon_chain::test_utils::RelativeSyncCommittee; use beacon_chain::{ test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType}, @@ -8,7 +7,7 @@ use environment::null_logger; use eth2::{ mixin::{RequestAccept, ResponseForkName, ResponseOptional}, reqwest::RequestBuilder, - types::{BlockId as CoreBlockId, StateId as CoreStateId, *}, + types::{BlockId as CoreBlockId, ForkChoiceNode, StateId as CoreStateId, *}, BeaconNodeHttpClient, Error, StatusCode, Timeouts, }; use execution_layer::test_utils::TestingBuilder; @@ -18,7 +17,10 @@ use execution_layer::test_utils::{ }; use futures::stream::{Stream, StreamExt}; use futures::FutureExt; -use http_api::{BlockId, StateId}; +use http_api::{ + test_utils::{create_api_server, create_api_server_on_port, ApiServer}, + BlockId, StateId, +}; use lighthouse_network::{Enr, EnrExt, PeerId}; use network::NetworkReceivers; use proto_array::ExecutionStatus; @@ -112,7 +114,7 @@ impl ApiTester { pub async fn new_from_config(config: ApiTesterConfig) -> Self { // Get a random unused port let spec = config.spec; - let port = unused_port::unused_tcp_port().unwrap(); + let port = unused_port::unused_tcp4_port().unwrap(); let beacon_url = SensitiveUrl::parse(format!("http://127.0.0.1:{port}").as_str()).unwrap(); let harness = Arc::new( @@ -462,6 +464,264 @@ impl ApiTester { self } + // finalization tests + pub async fn test_beacon_states_root_finalized(self) -> Self { + for state_id in self.interesting_state_ids() { + let state_root = state_id.root(&self.chain); + let state = state_id.state(&self.chain); + + // if .root or .state fail, skip the test. those would be errors outside the scope + // of this test, here we're testing the finalized field assuming the call to .is_finalized_state + // occurs after the state_root and state calls, and that the state_root and state calls + // were correct. + if state_root.is_err() || state.is_err() { + continue; + } + + // now that we know the state is valid, we can unwrap() everything we need + let result = self + .client + .get_beacon_states_root(state_id.0) + .await + .unwrap() + .unwrap() + .finalized + .unwrap(); + + let (state_root, _, _) = state_root.unwrap(); + let (state, _, _) = state.unwrap(); + let state_slot = state.slot(); + let expected = self + .chain + .is_finalized_state(&state_root, state_slot) + .unwrap(); + + assert_eq!(result, expected, "{:?}", state_id); + } + + self + } + + pub async fn test_beacon_states_fork_finalized(self) -> Self { + for state_id in self.interesting_state_ids() { + let state_root = state_id.root(&self.chain); + let state = state_id.state(&self.chain); + + // if .root or .state fail, skip the test. those would be errors outside the scope + // of this test, here we're testing the finalized field assuming the call to .is_finalized_state + // occurs after the state_root and state calls, and that the state_root and state calls + // were correct. + if state_root.is_err() || state.is_err() { + continue; + } + + // now that we know the state is valid, we can unwrap() everything we need + let result = self + .client + .get_beacon_states_fork(state_id.0) + .await + .unwrap() + .unwrap() + .finalized + .unwrap(); + + let (state_root, _, _) = state_root.unwrap(); + let (state, _, _) = state.unwrap(); + let state_slot = state.slot(); + let expected = self + .chain + .is_finalized_state(&state_root, state_slot) + .unwrap(); + + assert_eq!(result, expected, "{:?}", state_id); + } + + self + } + + pub async fn test_beacon_states_finality_checkpoints_finalized(self) -> Self { + for state_id in self.interesting_state_ids() { + let state_root = state_id.root(&self.chain); + let state = state_id.state(&self.chain); + + // if .root or .state fail, skip the test. those would be errors outside the scope + // of this test, here we're testing the finalized field assuming the call to .is_finalized_state + // occurs after the state_root and state calls, and that the state_root and state calls + // were correct. + if state_root.is_err() || state.is_err() { + continue; + } + + // now that we know the state is valid, we can unwrap() everything we need + let result = self + .client + .get_beacon_states_finality_checkpoints(state_id.0) + .await + .unwrap() + .unwrap() + .finalized + .unwrap(); + + let (state_root, _, _) = state_root.unwrap(); + let (state, _, _) = state.unwrap(); + let state_slot = state.slot(); + let expected = self + .chain + .is_finalized_state(&state_root, state_slot) + .unwrap(); + + assert_eq!(result, expected, "{:?}", state_id); + } + + self + } + + pub async fn test_beacon_headers_block_id_finalized(self) -> Self { + for block_id in self.interesting_block_ids() { + let block_root = block_id.root(&self.chain); + let block = block_id.full_block(&self.chain).await; + + // if .root or .state fail, skip the test. those would be errors outside the scope + // of this test, here we're testing the finalized field assuming the call to .is_finalized_state + // occurs after the state_root and state calls, and that the state_root and state calls + // were correct. + if block_root.is_err() || block.is_err() { + continue; + } + + // now that we know the block is valid, we can unwrap() everything we need + let result = self + .client + .get_beacon_headers_block_id(block_id.0) + .await + .unwrap() + .unwrap() + .finalized + .unwrap(); + + let (block_root, _, _) = block_root.unwrap(); + let (block, _, _) = block.unwrap(); + let block_slot = block.slot(); + let expected = self + .chain + .is_finalized_block(&block_root, block_slot) + .unwrap(); + + assert_eq!(result, expected, "{:?}", block_id); + } + + self + } + + pub async fn test_beacon_blocks_finalized(self) -> Self { + for block_id in self.interesting_block_ids() { + let block_root = block_id.root(&self.chain); + let block = block_id.full_block(&self.chain).await; + + // if .root or .full_block fail, skip the test. those would be errors outside the scope + // of this test, here we're testing the finalized field assuming the call to .is_finalized_block + // occurs after those calls, and that they were correct. + if block_root.is_err() || block.is_err() { + continue; + } + + // now that we know the block is valid, we can unwrap() everything we need + let result = self + .client + .get_beacon_blocks::(block_id.0) + .await + .unwrap() + .unwrap() + .finalized + .unwrap(); + + let (block_root, _, _) = block_root.unwrap(); + let (block, _, _) = block.unwrap(); + let block_slot = block.slot(); + let expected = self + .chain + .is_finalized_block(&block_root, block_slot) + .unwrap(); + + assert_eq!(result, expected, "{:?}", block_id); + } + + self + } + + pub async fn test_beacon_blinded_blocks_finalized(self) -> Self { + for block_id in self.interesting_block_ids() { + let block_root = block_id.root(&self.chain); + let block = block_id.full_block(&self.chain).await; + + // if .root or .full_block fail, skip the test. those would be errors outside the scope + // of this test, here we're testing the finalized field assuming the call to .is_finalized_block + // occurs after those calls, and that they were correct. + if block_root.is_err() || block.is_err() { + continue; + } + + // now that we know the block is valid, we can unwrap() everything we need + let result = self + .client + .get_beacon_blinded_blocks::(block_id.0) + .await + .unwrap() + .unwrap() + .finalized + .unwrap(); + + let (block_root, _, _) = block_root.unwrap(); + let (block, _, _) = block.unwrap(); + let block_slot = block.slot(); + let expected = self + .chain + .is_finalized_block(&block_root, block_slot) + .unwrap(); + + assert_eq!(result, expected, "{:?}", block_id); + } + + self + } + + pub async fn test_debug_beacon_states_finalized(self) -> Self { + for state_id in self.interesting_state_ids() { + let state_root = state_id.root(&self.chain); + let state = state_id.state(&self.chain); + + // if .root or .state fail, skip the test. those would be errors outside the scope + // of this test, here we're testing the finalized field assuming the call to .is_finalized_state + // occurs after the state_root and state calls, and that the state_root and state calls + // were correct. + if state_root.is_err() || state.is_err() { + continue; + } + + // now that we know the state is valid, we can unwrap() everything we need + let result = self + .client + .get_debug_beacon_states::(state_id.0) + .await + .unwrap() + .unwrap() + .finalized + .unwrap(); + + let (state_root, _, _) = state_root.unwrap(); + let (state, _, _) = state.unwrap(); + let state_slot = state.slot(); + let expected = self + .chain + .is_finalized_state(&state_root, state_slot) + .unwrap(); + + assert_eq!(result, expected, "{:?}", state_id); + } + + self + } + pub async fn test_beacon_states_root(self) -> Self { for state_id in self.interesting_state_ids() { let result = self @@ -474,7 +734,7 @@ impl ApiTester { let expected = state_id .root(&self.chain) .ok() - .map(|(root, _execution_optimistic)| root); + .map(|(root, _execution_optimistic, _finalized)| root); assert_eq!(result, expected, "{:?}", state_id); } @@ -508,15 +768,13 @@ impl ApiTester { .unwrap() .map(|res| res.data); - let expected = - state_id - .state(&self.chain) - .ok() - .map(|(state, _execution_optimistic)| FinalityCheckpointsData { - previous_justified: state.previous_justified_checkpoint(), - current_justified: state.current_justified_checkpoint(), - finalized: state.finalized_checkpoint(), - }); + let expected = state_id.state(&self.chain).ok().map( + |(state, _execution_optimistic, _finalized)| FinalityCheckpointsData { + previous_justified: state.previous_justified_checkpoint(), + current_justified: state.current_justified_checkpoint(), + finalized: state.finalized_checkpoint(), + }, + ); assert_eq!(result, expected, "{:?}", state_id); } @@ -529,7 +787,9 @@ impl ApiTester { for validator_indices in self.interesting_validator_indices() { let state_opt = state_id.state(&self.chain).ok(); let validators: Vec = match state_opt.as_ref() { - Some((state, _execution_optimistic)) => state.validators().clone().into(), + Some((state, _execution_optimistic, _finalized)) => { + state.validators().clone().into() + } None => vec![], }; let validator_index_ids = validator_indices @@ -568,7 +828,7 @@ impl ApiTester { .unwrap() .map(|res| res.data); - let expected = state_opt.map(|(state, _execution_optimistic)| { + let expected = state_opt.map(|(state, _execution_optimistic, _finalized)| { let mut validators = Vec::with_capacity(validator_indices.len()); for i in validator_indices { @@ -598,7 +858,7 @@ impl ApiTester { let state_opt = state_id .state(&self.chain) .ok() - .map(|(state, _execution_optimistic)| state); + .map(|(state, _execution_optimistic, _finalized)| state); let validators: Vec = match state_opt.as_ref() { Some(state) => state.validators().clone().into(), None => vec![], @@ -688,7 +948,7 @@ impl ApiTester { let state_opt = state_id .state(&self.chain) .ok() - .map(|(state, _execution_optimistic)| state); + .map(|(state, _execution_optimistic, _finalized)| state); let validators = match state_opt.as_ref() { Some(state) => state.validators().clone().into(), None => vec![], @@ -743,7 +1003,7 @@ impl ApiTester { let mut state_opt = state_id .state(&self.chain) .ok() - .map(|(state, _execution_optimistic)| state); + .map(|(state, _execution_optimistic, _finalized)| state); let epoch_opt = state_opt.as_ref().map(|state| state.current_epoch()); let results = self @@ -790,7 +1050,7 @@ impl ApiTester { let mut state_opt = state_id .state(&self.chain) .ok() - .map(|(state, _execution_optimistic)| state); + .map(|(state, _execution_optimistic, _finalized)| state); let epoch_opt = state_opt.as_ref().map(|state| state.current_epoch()); let result = self @@ -900,7 +1160,7 @@ impl ApiTester { let block_root_opt = block_id .root(&self.chain) .ok() - .map(|(root, _execution_optimistic)| root); + .map(|(root, _execution_optimistic, _finalized)| root); if let CoreBlockId::Slot(slot) = block_id.0 { if block_root_opt.is_none() { @@ -914,7 +1174,7 @@ impl ApiTester { .full_block(&self.chain) .await .ok() - .map(|(block, _execution_optimistic)| block); + .map(|(block, _execution_optimistic, _finalized)| block); if block_opt.is_none() && result.is_none() { continue; @@ -960,7 +1220,7 @@ impl ApiTester { let expected = block_id .root(&self.chain) .ok() - .map(|(root, _execution_optimistic)| root); + .map(|(root, _execution_optimistic, _finalized)| root); if let CoreBlockId::Slot(slot) = block_id.0 { if expected.is_none() { assert!(SKIPPED_SLOTS.contains(&slot.as_u64())); @@ -1007,7 +1267,7 @@ impl ApiTester { .full_block(&self.chain) .await .ok() - .map(|(block, _execution_optimistic)| block); + .map(|(block, _execution_optimistic, _finalized)| block); if let CoreBlockId::Slot(slot) = block_id.0 { if expected.is_none() { @@ -1091,7 +1351,7 @@ impl ApiTester { let expected = block_id .blinded_block(&self.chain) .ok() - .map(|(block, _execution_optimistic)| block); + .map(|(block, _execution_optimistic, _finalized)| block); if let CoreBlockId::Slot(slot) = block_id.0 { if expected.is_none() { @@ -1172,7 +1432,7 @@ impl ApiTester { .map(|res| res.data); let expected = block_id.full_block(&self.chain).await.ok().map( - |(block, _execution_optimistic)| { + |(block, _execution_optimistic, _finalized)| { block.message().body().attestations().clone().into() }, ); @@ -1593,7 +1853,7 @@ impl ApiTester { let mut expected = state_id .state(&self.chain) .ok() - .map(|(state, _execution_optimistic)| state); + .map(|(state, _execution_optimistic, _finalized)| state); expected.as_mut().map(|state| state.drop_all_caches()); if let (Some(json), Some(expected)) = (&result_json, &expected) { @@ -1615,21 +1875,6 @@ impl ApiTester { .unwrap(); assert_eq!(result_ssz, expected, "{:?}", state_id); - // Check legacy v1 API. - let result_v1 = self - .client - .get_debug_beacon_states_v1(state_id.0) - .await - .unwrap(); - - if let (Some(json), Some(expected)) = (&result_v1, &expected) { - assert_eq!(json.version, None); - assert_eq!(json.data, *expected, "{:?}", state_id); - } else { - assert_eq!(result_v1, None); - assert_eq!(expected, None); - } - // Check that version headers are provided. let url = self .client @@ -1679,6 +1924,59 @@ impl ApiTester { self } + pub async fn test_get_debug_fork_choice(self) -> Self { + let result = self.client.get_debug_fork_choice().await.unwrap(); + + let beacon_fork_choice = self.chain.canonical_head.fork_choice_read_lock(); + + let expected_proto_array = beacon_fork_choice.proto_array().core_proto_array(); + + assert_eq!( + result.justified_checkpoint, + expected_proto_array.justified_checkpoint + ); + assert_eq!( + result.finalized_checkpoint, + expected_proto_array.finalized_checkpoint + ); + + let expected_fork_choice_nodes: Vec = expected_proto_array + .nodes + .iter() + .map(|node| { + let execution_status = if node.execution_status.is_execution_enabled() { + Some(node.execution_status.to_string()) + } else { + None + }; + ForkChoiceNode { + slot: node.slot, + block_root: node.root, + parent_root: node + .parent + .and_then(|index| expected_proto_array.nodes.get(index)) + .map(|parent| parent.root), + justified_epoch: node.justified_checkpoint.map(|checkpoint| checkpoint.epoch), + finalized_epoch: node.finalized_checkpoint.map(|checkpoint| checkpoint.epoch), + weight: node.weight, + validity: execution_status, + execution_block_hash: node + .execution_status + .block_hash() + .map(|block_hash| block_hash.into_root()), + } + }) + .collect(); + + assert_eq!(result.fork_choice_nodes, expected_fork_choice_nodes); + + // need to drop beacon_fork_choice here, else borrow checker will complain + // that self cannot be moved out since beacon_fork_choice borrowed self.chain + // and might still live after self is moved out + drop(beacon_fork_choice); + self + } + fn validator_count(&self) -> usize { self.chain.head_snapshot().beacon_state.validators().len() } @@ -3604,7 +3902,7 @@ impl ApiTester { let mut expected = state_id .state(&self.chain) .ok() - .map(|(state, _execution_optimistic)| state); + .map(|(state, _execution_optimistic, _finalized)| state); expected.as_mut().map(|state| state.drop_all_caches()); assert_eq!(result, expected, "{:?}", state_id); @@ -4012,6 +4310,20 @@ async fn beacon_get() { .await .test_beacon_genesis() .await + .test_beacon_states_root_finalized() + .await + .test_beacon_states_fork_finalized() + .await + .test_beacon_states_finality_checkpoints_finalized() + .await + .test_beacon_headers_block_id_finalized() + .await + .test_beacon_blocks_finalized::() + .await + .test_beacon_blinded_blocks_finalized::() + .await + .test_debug_beacon_states_finalized() + .await .test_beacon_states_root() .await .test_beacon_states_fork() @@ -4148,6 +4460,8 @@ async fn debug_get() { .test_get_debug_beacon_states() .await .test_get_debug_beacon_heads() + .await + .test_get_debug_fork_choice() .await; } diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index f956d35d040..c1b4d721742 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -5,16 +5,16 @@ authors = ["Sigma Prime "] edition = "2021" [dependencies] -discv5 = { version = "0.1.0", features = ["libp2p"] } +discv5 = { version = "0.2.2", features = ["libp2p"] } unsigned-varint = { version = "0.6.0", features = ["codec"] } types = { path = "../../consensus/types" } -eth2_ssz_types = "0.2.2" +ssz_types = "0.5.0" serde = { version = "1.0.116", features = ["derive"] } serde_derive = "1.0.116" -eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.0" -tree_hash = "0.4.1" -tree_hash_derive = "0.4.0" +ethereum_ssz = "0.5.0" +ethereum_ssz_derive = "0.5.0" +tree_hash = "0.5.0" +tree_hash_derive = "0.5.0" slog = { version = "2.5.2", features = ["max_level_trace"] } lighthouse_version = { path = "../../common/lighthouse_version" } tokio = { version = "1.14.0", features = ["time", "macros"] } @@ -42,7 +42,7 @@ strum = { version = "0.24.0", features = ["derive"] } superstruct = "0.5.0" prometheus-client = "0.18.0" unused_port = { path = "../../common/unused_port" } -delay_map = "0.1.1" +delay_map = "0.3.0" void = "1" [dependencies.libp2p] diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 1e32315019c..f4b3b78d048 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -1,3 +1,4 @@ +use crate::listen_addr::{ListenAddr, ListenAddress}; use crate::rpc::config::OutboundRateLimiterConfig; use crate::types::GossipKind; use crate::{Enr, PeerIdSerialized}; @@ -12,6 +13,7 @@ use libp2p::gossipsub::{ use libp2p::Multiaddr; use serde_derive::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; +use std::net::{Ipv4Addr, Ipv6Addr}; use std::path::PathBuf; use std::sync::Arc; use std::time::Duration; @@ -57,24 +59,24 @@ pub struct Config { /// Data directory where node's keyfile is stored pub network_dir: PathBuf, - /// IP address to listen on. - pub listen_address: std::net::IpAddr, - - /// The TCP port that libp2p listens on. - pub libp2p_port: u16, - - /// UDP port that discovery listens on. - pub discovery_port: u16, + /// IP addresses to listen on. + listen_addresses: ListenAddress, /// The address to broadcast to peers about which address we are listening on. None indicates /// that no discovery address has been set in the CLI args. - pub enr_address: Option, + pub enr_address: (Option, Option), + + /// The udp4 port to broadcast to peers in order to reach back for discovery. + pub enr_udp4_port: Option, + + /// The tcp4 port to broadcast to peers in order to reach back for libp2p services. + pub enr_tcp4_port: Option, - /// The udp port to broadcast to peers in order to reach back for discovery. - pub enr_udp_port: Option, + /// The udp6 port to broadcast to peers in order to reach back for discovery. + pub enr_udp6_port: Option, - /// The tcp port to broadcast to peers in order to reach back for libp2p services. - pub enr_tcp_port: Option, + /// The tcp6 port to broadcast to peers in order to reach back for libp2p services. + pub enr_tcp6_port: Option, /// Target number of connected peers. pub target_peers: usize, @@ -99,6 +101,9 @@ pub struct Config { /// List of trusted libp2p nodes which are not scored. pub trusted_peers: Vec, + /// Disables peer scoring altogether. + pub disable_peer_scoring: bool, + /// Client version pub client_version: String, @@ -129,6 +134,9 @@ pub struct Config { /// List of extra topics to initially subscribe to as strings. pub topics: Vec, + /// Whether we are running a block proposer only node. + pub proposer_only: bool, + /// Whether metrics are enabled. pub metrics_enabled: bool, @@ -139,6 +147,105 @@ pub struct Config { pub outbound_rate_limiter_config: Option, } +impl Config { + /// Sets the listening address to use an ipv4 address. The discv5 ip_mode and table filter are + /// adjusted accordingly to ensure addresses that are present in the enr are globally + /// reachable. + pub fn set_ipv4_listening_address(&mut self, addr: Ipv4Addr, tcp_port: u16, udp_port: u16) { + self.listen_addresses = ListenAddress::V4(ListenAddr { + addr, + udp_port, + tcp_port, + }); + self.discv5_config.ip_mode = discv5::IpMode::Ip4; + self.discv5_config.table_filter = |enr| enr.ip4().as_ref().map_or(false, is_global_ipv4) + } + + /// Sets the listening address to use an ipv6 address. The discv5 ip_mode and table filter is + /// adjusted accordingly to ensure addresses that are present in the enr are globally + /// reachable. + pub fn set_ipv6_listening_address(&mut self, addr: Ipv6Addr, tcp_port: u16, udp_port: u16) { + self.listen_addresses = ListenAddress::V6(ListenAddr { + addr, + udp_port, + tcp_port, + }); + self.discv5_config.ip_mode = discv5::IpMode::Ip6 { + enable_mapped_addresses: false, + }; + self.discv5_config.table_filter = |enr| enr.ip6().as_ref().map_or(false, is_global_ipv6) + } + + /// Sets the listening address to use both an ipv4 and ipv6 address. The discv5 ip_mode and + /// table filter is adjusted accordingly to ensure addresses that are present in the enr are + /// globally reachable. + pub fn set_ipv4_ipv6_listening_addresses( + &mut self, + v4_addr: Ipv4Addr, + tcp4_port: u16, + udp4_port: u16, + v6_addr: Ipv6Addr, + tcp6_port: u16, + udp6_port: u16, + ) { + self.listen_addresses = ListenAddress::DualStack( + ListenAddr { + addr: v4_addr, + udp_port: udp4_port, + tcp_port: tcp4_port, + }, + ListenAddr { + addr: v6_addr, + udp_port: udp6_port, + tcp_port: tcp6_port, + }, + ); + + self.discv5_config.ip_mode = discv5::IpMode::Ip6 { + enable_mapped_addresses: true, + }; + self.discv5_config.table_filter = |enr| match (&enr.ip4(), &enr.ip6()) { + (None, None) => false, + (None, Some(ip6)) => is_global_ipv6(ip6), + (Some(ip4), None) => is_global_ipv4(ip4), + (Some(ip4), Some(ip6)) => is_global_ipv4(ip4) && is_global_ipv6(ip6), + }; + } + + pub fn set_listening_addr(&mut self, listen_addr: ListenAddress) { + match listen_addr { + ListenAddress::V4(ListenAddr { + addr, + udp_port, + tcp_port, + }) => self.set_ipv4_listening_address(addr, tcp_port, udp_port), + ListenAddress::V6(ListenAddr { + addr, + udp_port, + tcp_port, + }) => self.set_ipv6_listening_address(addr, tcp_port, udp_port), + ListenAddress::DualStack( + ListenAddr { + addr: ip4addr, + udp_port: udp4_port, + tcp_port: tcp4_port, + }, + ListenAddr { + addr: ip6addr, + udp_port: udp6_port, + tcp_port: tcp6_port, + }, + ) => self.set_ipv4_ipv6_listening_addresses( + ip4addr, tcp4_port, udp4_port, ip6addr, tcp6_port, udp6_port, + ), + } + } + + pub fn listen_addrs(&self) -> &ListenAddress { + &self.listen_addresses + } +} + impl Default for Config { /// Generate a default network configuration. fn default() -> Self { @@ -183,7 +290,7 @@ impl Default for Config { .filter_rate_limiter(filter_rate_limiter) .filter_max_bans_per_ip(Some(5)) .filter_max_nodes_per_ip(Some(10)) - .table_filter(|enr| enr.ip4().map_or(false, |ip| is_global(&ip))) // Filter non-global IPs + .table_filter(|enr| enr.ip4().map_or(false, |ip| is_global_ipv4(&ip))) // Filter non-global IPs .ban_duration(Some(Duration::from_secs(3600))) .ping_interval(Duration::from_secs(300)) .build(); @@ -191,12 +298,16 @@ impl Default for Config { // NOTE: Some of these get overridden by the corresponding CLI default values. Config { network_dir, - listen_address: "0.0.0.0".parse().expect("valid ip address"), - libp2p_port: 9000, - discovery_port: 9000, - enr_address: None, - enr_udp_port: None, - enr_tcp_port: None, + listen_addresses: ListenAddress::V4(ListenAddr { + addr: Ipv4Addr::UNSPECIFIED, + udp_port: 9000, + tcp_port: 9000, + }), + enr_address: (None, None), + enr_udp4_port: None, + enr_tcp4_port: None, + enr_udp6_port: None, + enr_tcp6_port: None, target_peers: 50, gs_config, discv5_config, @@ -204,6 +315,7 @@ impl Default for Config { boot_nodes_multiaddr: vec![], libp2p_nodes: vec![], trusted_peers: vec![], + disable_peer_scoring: false, client_version: lighthouse_version::version_with_platform(), disable_discovery: false, upnp_enabled: true, @@ -213,6 +325,7 @@ impl Default for Config { import_all_attestations: false, shutdown_after_sync: false, topics: Vec::new(), + proposer_only: false, metrics_enabled: false, enable_light_client_server: false, outbound_rate_limiter_config: None, @@ -361,7 +474,7 @@ pub fn gossipsub_config(network_load: u8, fork_context: Arc) -> Gos /// Helper function to determine if the IpAddr is a global address or not. The `is_global()` /// function is not yet stable on IpAddr. #[allow(clippy::nonminimal_bool)] -fn is_global(addr: &std::net::Ipv4Addr) -> bool { +fn is_global_ipv4(addr: &Ipv4Addr) -> bool { // check if this address is 192.0.0.9 or 192.0.0.10. These addresses are the only two // globally routable addresses in the 192.0.0.0/24 range. if u32::from_be_bytes(addr.octets()) == 0xc0000009 @@ -382,3 +495,60 @@ fn is_global(addr: &std::net::Ipv4Addr) -> bool { // Make sure the address is not in 0.0.0.0/8 && addr.octets()[0] != 0 } + +/// NOTE: Docs taken from https://doc.rust-lang.org/stable/std/net/struct.Ipv6Addr.html#method.is_global +/// +/// Returns true if the address appears to be globally reachable as specified by the IANA IPv6 +/// Special-Purpose Address Registry. Whether or not an address is practically reachable will +/// depend on your network configuration. +/// +/// Most IPv6 addresses are globally reachable; unless they are specifically defined as not +/// globally reachable. +/// +/// Non-exhaustive list of notable addresses that are not globally reachable: +/// +/// - The unspecified address (is_unspecified) +/// - The loopback address (is_loopback) +/// - IPv4-mapped addresses +/// - Addresses reserved for benchmarking +/// - Addresses reserved for documentation (is_documentation) +/// - Unique local addresses (is_unique_local) +/// - Unicast addresses with link-local scope (is_unicast_link_local) +// TODO: replace with [`Ipv6Addr::is_global`] once +// [Ip](https://github.com/rust-lang/rust/issues/27709) is stable. +pub const fn is_global_ipv6(addr: &Ipv6Addr) -> bool { + const fn is_documentation(addr: &Ipv6Addr) -> bool { + (addr.segments()[0] == 0x2001) && (addr.segments()[1] == 0xdb8) + } + const fn is_unique_local(addr: &Ipv6Addr) -> bool { + (addr.segments()[0] & 0xfe00) == 0xfc00 + } + const fn is_unicast_link_local(addr: &Ipv6Addr) -> bool { + (addr.segments()[0] & 0xffc0) == 0xfe80 + } + !(addr.is_unspecified() + || addr.is_loopback() + // IPv4-mapped Address (`::ffff:0:0/96`) + || matches!(addr.segments(), [0, 0, 0, 0, 0, 0xffff, _, _]) + // IPv4-IPv6 Translat. (`64:ff9b:1::/48`) + || matches!(addr.segments(), [0x64, 0xff9b, 1, _, _, _, _, _]) + // Discard-Only Address Block (`100::/64`) + || matches!(addr.segments(), [0x100, 0, 0, 0, _, _, _, _]) + // IETF Protocol Assignments (`2001::/23`) + || (matches!(addr.segments(), [0x2001, b, _, _, _, _, _, _] if b < 0x200) + && !( + // Port Control Protocol Anycast (`2001:1::1`) + u128::from_be_bytes(addr.octets()) == 0x2001_0001_0000_0000_0000_0000_0000_0001 + // Traversal Using Relays around NAT Anycast (`2001:1::2`) + || u128::from_be_bytes(addr.octets()) == 0x2001_0001_0000_0000_0000_0000_0000_0002 + // AMT (`2001:3::/32`) + || matches!(addr.segments(), [0x2001, 3, _, _, _, _, _, _]) + // AS112-v6 (`2001:4:112::/48`) + || matches!(addr.segments(), [0x2001, 4, 0x112, _, _, _, _, _]) + // ORCHIDv2 (`2001:20::/28`) + || matches!(addr.segments(), [0x2001, b, _, _, _, _, _, _] if b >= 0x20 && b <= 0x2F) + )) + || is_documentation(addr) + || is_unique_local(addr) + || is_unicast_link_local(addr)) +} diff --git a/beacon_node/lighthouse_network/src/discovery/enr.rs b/beacon_node/lighthouse_network/src/discovery/enr.rs index 6b4b87a5f80..938e7cfa257 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr.rs @@ -145,16 +145,39 @@ pub fn create_enr_builder_from_config( enable_tcp: bool, ) -> EnrBuilder { let mut builder = EnrBuilder::new("v4"); - if let Some(enr_address) = config.enr_address { - builder.ip(enr_address); + let (maybe_ipv4_address, maybe_ipv6_address) = &config.enr_address; + + if let Some(ip) = maybe_ipv4_address { + builder.ip4(*ip); + } + + if let Some(ip) = maybe_ipv6_address { + builder.ip6(*ip); + } + + if let Some(udp4_port) = config.enr_udp4_port { + builder.udp4(udp4_port); } - if let Some(udp_port) = config.enr_udp_port { - builder.udp4(udp_port); + + if let Some(udp6_port) = config.enr_udp6_port { + builder.udp6(udp6_port); } - // we always give it our listening tcp port + if enable_tcp { - let tcp_port = config.enr_tcp_port.unwrap_or(config.libp2p_port); - builder.tcp4(tcp_port); + // If the ENR port is not set, and we are listening over that ip version, use the listening port instead. + let tcp4_port = config + .enr_tcp4_port + .or_else(|| config.listen_addrs().v4().map(|v4_addr| v4_addr.tcp_port)); + if let Some(tcp4_port) = tcp4_port { + builder.tcp4(tcp4_port); + } + + let tcp6_port = config + .enr_tcp6_port + .or_else(|| config.listen_addrs().v6().map(|v6_addr| v6_addr.tcp_port)); + if let Some(tcp6_port) = tcp6_port { + builder.tcp6(tcp6_port); + } } builder } diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index c41844c2c59..13fdf8ed577 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -177,6 +177,13 @@ pub struct Discovery { /// always false. pub started: bool, + /// This keeps track of whether an external UDP port change should also indicate an internal + /// TCP port change. As we cannot detect our external TCP port, we assume that the external UDP + /// port is also our external TCP port. This assumption only holds if the user has not + /// explicitly set their ENR TCP port via the CLI config. The first indicates tcp4 and the + /// second indicates tcp6. + update_tcp_port: (bool, bool), + /// Logger for the discovery behaviour. log: slog::Logger, } @@ -197,12 +204,18 @@ impl Discovery { }; let local_enr = network_globals.local_enr.read().clone(); + let local_node_id = local_enr.node_id(); info!(log, "ENR Initialised"; "enr" => local_enr.to_base64(), "seq" => local_enr.seq(), "id"=> %local_enr.node_id(), - "ip4" => ?local_enr.ip4(), "udp4"=> ?local_enr.udp4(), "tcp4" => ?local_enr.tcp6() + "ip4" => ?local_enr.ip4(), "udp4"=> ?local_enr.udp4(), "tcp4" => ?local_enr.tcp4(), "tcp6" => ?local_enr.tcp6(), "udp6" => ?local_enr.udp6() ); - - let listen_socket = SocketAddr::new(config.listen_address, config.discovery_port); + let listen_socket = match config.listen_addrs() { + crate::listen_addr::ListenAddress::V4(v4_addr) => v4_addr.udp_socket_addr(), + crate::listen_addr::ListenAddress::V6(v6_addr) => v6_addr.udp_socket_addr(), + crate::listen_addr::ListenAddress::DualStack(_v4_addr, v6_addr) => { + v6_addr.udp_socket_addr() + } + }; // convert the keypair into an ENR key let enr_key: CombinedKey = CombinedKey::from_libp2p(local_key)?; @@ -212,6 +225,10 @@ impl Discovery { // Add bootnodes to routing table for bootnode_enr in config.boot_nodes_enr.clone() { + if bootnode_enr.node_id() == local_node_id { + // If we are a boot node, ignore adding it to the routing table + continue; + } debug!( log, "Adding node to routing table"; @@ -290,6 +307,11 @@ impl Discovery { } } + let update_tcp_port = ( + config.enr_tcp4_port.is_none(), + config.enr_tcp6_port.is_none(), + ); + Ok(Self { cached_enrs: LruCache::new(50), network_globals, @@ -299,6 +321,7 @@ impl Discovery { discv5, event_stream, started: !config.disable_discovery, + update_tcp_port, log, enr_dir, }) @@ -1009,20 +1032,40 @@ impl NetworkBehaviour for Discovery { metrics::check_nat(); // Discv5 will have updated our local ENR. We save the updated version // to disk. + + if (self.update_tcp_port.0 && socket_addr.is_ipv4()) + || (self.update_tcp_port.1 && socket_addr.is_ipv6()) + { + // Update the TCP port in the ENR + self.discv5.update_local_enr_socket(socket_addr, true); + } let enr = self.discv5.local_enr(); enr::save_enr_to_disk(Path::new(&self.enr_dir), &enr, &self.log); // update network globals *self.network_globals.local_enr.write() = enr; // A new UDP socket has been detected. // Build a multiaddr to report to libp2p - let mut address = Multiaddr::from(socket_addr.ip()); - // NOTE: This doesn't actually track the external TCP port. More sophisticated NAT handling - // should handle this. - address.push(Protocol::Tcp(self.network_globals.listen_port_tcp())); - return Poll::Ready(NBAction::ReportObservedAddr { - address, - score: AddressScore::Finite(1), - }); + let addr = match socket_addr.ip() { + IpAddr::V4(v4_addr) => { + self.network_globals.listen_port_tcp4().map(|tcp4_port| { + Multiaddr::from(v4_addr).with(Protocol::Tcp(tcp4_port)) + }) + } + IpAddr::V6(v6_addr) => { + self.network_globals.listen_port_tcp6().map(|tcp6_port| { + Multiaddr::from(v6_addr).with(Protocol::Tcp(tcp6_port)) + }) + } + }; + + if let Some(address) = addr { + // NOTE: This doesn't actually track the external TCP port. More sophisticated NAT handling + // should handle this. + return Poll::Ready(NBAction::ReportObservedAddr { + address, + score: AddressScore::Finite(1), + }); + } } Discv5Event::EnrAdded { .. } | Discv5Event::TalkRequest(_) @@ -1087,7 +1130,6 @@ mod tests { use enr::EnrBuilder; use slog::{o, Drain}; use types::{BitVector, MinimalEthSpec, SubnetId}; - use unused_port::unused_udp_port; type E = MinimalEthSpec; @@ -1105,23 +1147,22 @@ mod tests { async fn build_discovery() -> Discovery { let keypair = libp2p::identity::Keypair::generate_secp256k1(); - let config = NetworkConfig { - discovery_port: unused_udp_port().unwrap(), - ..Default::default() - }; + let mut config = NetworkConfig::default(); + config.set_listening_addr(crate::ListenAddress::unused_v4_ports()); let enr_key: CombinedKey = CombinedKey::from_libp2p(&keypair).unwrap(); let enr: Enr = build_enr::(&enr_key, &config, &EnrForkId::default()).unwrap(); let log = build_log(slog::Level::Debug, false); let globals = NetworkGlobals::new( enr, - 9000, - 9000, + Some(9000), + None, MetaData::V2(MetaDataV2 { seq_number: 0, attnets: Default::default(), syncnets: Default::default(), }), vec![], + false, &log, ); Discovery::new(&keypair, &config, Arc::new(globals), &log) diff --git a/beacon_node/lighthouse_network/src/lib.rs b/beacon_node/lighthouse_network/src/lib.rs index be4da809cb2..3d539af3b28 100644 --- a/beacon_node/lighthouse_network/src/lib.rs +++ b/beacon_node/lighthouse_network/src/lib.rs @@ -10,12 +10,14 @@ pub mod service; #[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy pub mod discovery; +pub mod listen_addr; pub mod metrics; pub mod peer_manager; pub mod rpc; pub mod types; pub use config::gossip_max_size; +pub use listen_addr::*; use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; use std::str::FromStr; diff --git a/beacon_node/lighthouse_network/src/listen_addr.rs b/beacon_node/lighthouse_network/src/listen_addr.rs new file mode 100644 index 00000000000..20d87d403cd --- /dev/null +++ b/beacon_node/lighthouse_network/src/listen_addr.rs @@ -0,0 +1,97 @@ +use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + +use libp2p::{multiaddr::Protocol, Multiaddr}; +use serde::{Deserialize, Serialize}; + +/// A listening address composed by an Ip, an UDP port and a TCP port. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct ListenAddr { + pub addr: Ip, + pub udp_port: u16, + pub tcp_port: u16, +} + +impl + Clone> ListenAddr { + pub fn udp_socket_addr(&self) -> SocketAddr { + (self.addr.clone().into(), self.udp_port).into() + } + + pub fn tcp_socket_addr(&self) -> SocketAddr { + (self.addr.clone().into(), self.tcp_port).into() + } +} + +/// Types of listening addresses Lighthouse can accept. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum ListenAddress { + V4(ListenAddr), + V6(ListenAddr), + DualStack(ListenAddr, ListenAddr), +} + +impl ListenAddress { + /// Return the listening address over IpV4 if any. + pub fn v4(&self) -> Option<&ListenAddr> { + match self { + ListenAddress::V4(v4_addr) | ListenAddress::DualStack(v4_addr, _) => Some(v4_addr), + ListenAddress::V6(_) => None, + } + } + + /// Return the listening address over IpV6 if any. + pub fn v6(&self) -> Option<&ListenAddr> { + match self { + ListenAddress::V6(v6_addr) | ListenAddress::DualStack(_, v6_addr) => Some(v6_addr), + ListenAddress::V4(_) => None, + } + } + + /// Returns the TCP addresses. + pub fn tcp_addresses(&self) -> impl Iterator + '_ { + let v4_multiaddr = self + .v4() + .map(|v4_addr| Multiaddr::from(v4_addr.addr).with(Protocol::Tcp(v4_addr.tcp_port))); + let v6_multiaddr = self + .v6() + .map(|v6_addr| Multiaddr::from(v6_addr.addr).with(Protocol::Tcp(v6_addr.tcp_port))); + v4_multiaddr.into_iter().chain(v6_multiaddr) + } + + #[cfg(test)] + pub fn unused_v4_ports() -> Self { + ListenAddress::V4(ListenAddr { + addr: Ipv4Addr::UNSPECIFIED, + udp_port: unused_port::unused_udp4_port().unwrap(), + tcp_port: unused_port::unused_tcp4_port().unwrap(), + }) + } + + #[cfg(test)] + pub fn unused_v6_ports() -> Self { + ListenAddress::V6(ListenAddr { + addr: Ipv6Addr::UNSPECIFIED, + udp_port: unused_port::unused_udp6_port().unwrap(), + tcp_port: unused_port::unused_tcp6_port().unwrap(), + }) + } +} + +impl slog::KV for ListenAddress { + fn serialize( + &self, + _record: &slog::Record, + serializer: &mut dyn slog::Serializer, + ) -> slog::Result { + if let Some(v4_addr) = self.v4() { + serializer.emit_arguments("ip4_address", &format_args!("{}", v4_addr.addr))?; + serializer.emit_u16("udp4_port", v4_addr.udp_port)?; + serializer.emit_u16("tcp4_port", v4_addr.tcp_port)?; + } + if let Some(v6_addr) = self.v6() { + serializer.emit_arguments("ip6_address", &format_args!("{}", v6_addr.addr))?; + serializer.emit_u16("udp6_port", v6_addr.udp_port)?; + serializer.emit_u16("tcp6_port", v6_addr.tcp_port)?; + } + slog::Result::Ok(()) + } +} diff --git a/beacon_node/lighthouse_network/src/metrics.rs b/beacon_node/lighthouse_network/src/metrics.rs index 2ee224d5e28..58cc9920126 100644 --- a/beacon_node/lighthouse_network/src/metrics.rs +++ b/beacon_node/lighthouse_network/src/metrics.rs @@ -159,7 +159,7 @@ pub fn check_nat() { if NAT_OPEN.as_ref().map(|v| v.get()).unwrap_or(0) != 0 { return; } - if ADDRESS_UPDATE_COUNT.as_ref().map(|v| v.get()).unwrap_or(0) == 0 + if ADDRESS_UPDATE_COUNT.as_ref().map(|v| v.get()).unwrap_or(0) != 0 || NETWORK_INBOUND_PEERS.as_ref().map(|v| v.get()).unwrap_or(0) != 0_i64 { inc_counter(&NAT_OPEN); @@ -167,7 +167,8 @@ pub fn check_nat() { } pub fn scrape_discovery_metrics() { - let metrics = discv5::metrics::Metrics::from(discv5::Discv5::raw_metrics()); + let metrics = + discv5::metrics::Metrics::from(discv5::Discv5::::raw_metrics()); set_float_gauge(&DISCOVERY_REQS, metrics.unsolicited_requests_per_second); set_gauge(&DISCOVERY_SESSIONS, metrics.active_sessions as i64); set_gauge(&DISCOVERY_SENT_BYTES, metrics.bytes_sent as i64); diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 03f6a746ac6..a461a12e530 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -13,7 +13,7 @@ use peerdb::{client::ClientKind, BanOperation, BanResult, ScoreUpdateResult}; use rand::seq::SliceRandom; use slog::{debug, error, trace, warn}; use smallvec::SmallVec; -use std::collections::VecDeque; +use std::collections::BTreeMap; use std::{ sync::Arc, time::{Duration, Instant}, @@ -77,7 +77,7 @@ pub struct PeerManager { /// The target number of peers we would like to connect to. target_peers: usize, /// Peers queued to be dialed. - peers_to_dial: VecDeque<(PeerId, Option)>, + peers_to_dial: BTreeMap>, /// The number of temporarily banned peers. This is used to prevent instantaneous /// reconnection. // NOTE: This just prevents re-connections. The state of the peer is otherwise unaffected. A @@ -290,11 +290,20 @@ impl PeerManager { // If a peer is being banned, this trumps any temporary ban the peer might be // under. We no longer track it in the temporary ban list. - self.temporary_banned_peers.raw_remove(peer_id); - - // Inform the Swarm to ban the peer - self.events - .push(PeerManagerEvent::Banned(*peer_id, banned_ips)); + if !self.temporary_banned_peers.raw_remove(peer_id) { + // If the peer is not already banned, inform the Swarm to ban the peer + self.events + .push(PeerManagerEvent::Banned(*peer_id, banned_ips)); + // If the peer was in the process of being un-banned, remove it (a rare race + // condition) + self.events.retain(|event| { + if let PeerManagerEvent::UnBanned(unbanned_peer_id, _) = event { + unbanned_peer_id != peer_id // Remove matching peer ids + } else { + true + } + }); + } } } } @@ -308,7 +317,7 @@ impl PeerManager { /// proves resource constraining, we should switch to multiaddr dialling here. #[allow(clippy::mutable_key_type)] pub fn peers_discovered(&mut self, results: HashMap>) -> Vec { - let mut to_dial_peers = Vec::new(); + let mut to_dial_peers = Vec::with_capacity(4); let connected_or_dialing = self.network_globals.connected_or_dialing_peers(); for (peer_id, min_ttl) in results { @@ -398,7 +407,7 @@ impl PeerManager { // A peer is being dialed. pub fn dial_peer(&mut self, peer_id: &PeerId, enr: Option) { - self.peers_to_dial.push_back((*peer_id, enr)); + self.peers_to_dial.insert(*peer_id, enr); } /// Reports if a peer is banned or not. @@ -552,8 +561,8 @@ impl PeerManager { Protocol::BlocksByRoot => return, Protocol::Goodbye => return, Protocol::LightClientBootstrap => return, - Protocol::MetaData => PeerAction::LowToleranceError, - Protocol::Status => PeerAction::LowToleranceError, + Protocol::MetaData => PeerAction::Fatal, + Protocol::Status => PeerAction::Fatal, } } RPCError::StreamTimeout => match direction { @@ -1185,6 +1194,18 @@ impl PeerManager { // Unban any peers that have served their temporary ban timeout self.unban_temporary_banned_peers(); + + // Maintains memory by shrinking mappings + self.shrink_mappings(); + } + + // Reduce memory footprint by routinely shrinking associating mappings. + fn shrink_mappings(&mut self) { + self.inbound_ping_peers.shrink_to(5); + self.outbound_ping_peers.shrink_to(5); + self.status_peers.shrink_to(5); + self.temporary_banned_peers.shrink_to_fit(); + self.sync_committee_subnets.shrink_to_fit(); } // Update metrics related to peer scoring. diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index 21288473ec9..24de83a61da 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -89,7 +89,7 @@ impl NetworkBehaviour for PeerManager { self.events.shrink_to_fit(); } - if let Some((peer_id, maybe_enr)) = self.peers_to_dial.pop_front() { + if let Some((peer_id, maybe_enr)) = self.peers_to_dial.pop_first() { self.inject_peer_connection(&peer_id, ConnectingType::Dialing, maybe_enr); let handler = self.new_handler(); return Poll::Ready(NetworkBehaviourAction::Dial { @@ -156,8 +156,10 @@ impl PeerManager { BanResult::BadScore => { // This is a faulty state error!(self.log, "Connected to a banned peer. Re-banning"; "peer_id" => %peer_id); - // Reban the peer + // Disconnect the peer. self.goodbye_peer(&peer_id, GoodbyeReason::Banned, ReportSource::PeerManager); + // Re-ban the peer to prevent repeated errors. + self.events.push(PeerManagerEvent::Banned(peer_id, vec![])); return; } BanResult::BannedIp(ip_addr) => { diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs index 70d3399d6ad..20870656883 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs @@ -41,12 +41,14 @@ pub struct PeerDB { disconnected_peers: usize, /// Counts banned peers in total and per ip banned_peers_count: BannedPeersCount, + /// Specifies if peer scoring is disabled. + disable_peer_scoring: bool, /// PeerDB's logger log: slog::Logger, } impl PeerDB { - pub fn new(trusted_peers: Vec, log: &slog::Logger) -> Self { + pub fn new(trusted_peers: Vec, disable_peer_scoring: bool, log: &slog::Logger) -> Self { // Initialize the peers hashmap with trusted peers let peers = trusted_peers .into_iter() @@ -56,6 +58,7 @@ impl PeerDB { log: log.clone(), disconnected_peers: 0, banned_peers_count: BannedPeersCount::default(), + disable_peer_scoring, peers, } } @@ -704,7 +707,11 @@ impl PeerDB { warn!(log_ref, "Updating state of unknown peer"; "peer_id" => %peer_id, "new_state" => ?new_state); } - PeerInfo::default() + if self.disable_peer_scoring { + PeerInfo::trusted_peer_info() + } else { + PeerInfo::default() + } }); // Ban the peer if the score is not already low enough. @@ -1300,7 +1307,7 @@ mod tests { fn get_db() -> PeerDB { let log = build_log(slog::Level::Debug, false); - PeerDB::new(vec![], &log) + PeerDB::new(vec![], false, &log) } #[test] @@ -1999,7 +2006,7 @@ mod tests { fn test_trusted_peers_score() { let trusted_peer = PeerId::random(); let log = build_log(slog::Level::Debug, false); - let mut pdb: PeerDB = PeerDB::new(vec![trusted_peer], &log); + let mut pdb: PeerDB = PeerDB::new(vec![trusted_peer], false, &log); pdb.connect_ingoing(&trusted_peer, "/ip4/0.0.0.0".parse().unwrap(), None); @@ -2018,4 +2025,28 @@ mod tests { Score::max_score().score() ); } + + #[test] + fn test_disable_peer_scoring() { + let peer = PeerId::random(); + let log = build_log(slog::Level::Debug, false); + let mut pdb: PeerDB = PeerDB::new(vec![], true, &log); + + pdb.connect_ingoing(&peer, "/ip4/0.0.0.0".parse().unwrap(), None); + + // Check trusted status and score + assert!(pdb.peer_info(&peer).unwrap().is_trusted()); + assert_eq!( + pdb.peer_info(&peer).unwrap().score().score(), + Score::max_score().score() + ); + + // Adding/Subtracting score should have no effect on a trusted peer + add_score(&mut pdb, &peer, -50.0); + + assert_eq!( + pdb.peer_info(&peer).unwrap().score().score(), + Score::max_score().score() + ); + } } diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 8d7b22029d1..a8423e47b0b 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -344,13 +344,16 @@ impl ProtocolId { /// Returns `true` if the given `ProtocolId` should expect `context_bytes` in the /// beginning of the stream, else returns `false`. pub fn has_context_bytes(&self) -> bool { - if self.version == Version::V2 { - match self.message_name { - Protocol::BlocksByRange | Protocol::BlocksByRoot => return true, - _ => return false, - } + match self.message_name { + Protocol::BlocksByRange | Protocol::BlocksByRoot => match self.version { + Version::V2 => true, + Version::V1 => false, + }, + Protocol::LightClientBootstrap => match self.version { + Version::V2 | Version::V1 => true, + }, + Protocol::Goodbye | Protocol::Ping | Protocol::Status | Protocol::MetaData => false, } - false } } diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index e20b86e546a..f815e3bd36b 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -163,14 +163,15 @@ impl Network { let meta_data = utils::load_or_build_metadata(&config.network_dir, &log); let globals = NetworkGlobals::new( enr, - config.libp2p_port, - config.discovery_port, + config.listen_addrs().v4().map(|v4_addr| v4_addr.tcp_port), + config.listen_addrs().v6().map(|v6_addr| v6_addr.tcp_port), meta_data, config .trusted_peers .iter() .map(|x| PeerId::from(x.clone())) .collect(), + config.disable_peer_scoring, &log, ); Arc::new(globals) @@ -388,36 +389,26 @@ impl Network { async fn start(&mut self, config: &crate::NetworkConfig) -> error::Result<()> { let enr = self.network_globals.local_enr(); info!(self.log, "Libp2p Starting"; "peer_id" => %enr.peer_id(), "bandwidth_config" => format!("{}-{}", config.network_load, NetworkLoad::from(config.network_load).name)); - let discovery_string = if config.disable_discovery { - "None".into() - } else { - config.discovery_port.to_string() - }; - - debug!(self.log, "Attempting to open listening ports"; "address" => ?config.listen_address, "tcp_port" => config.libp2p_port, "udp_port" => discovery_string); - - let listen_multiaddr = { - let mut m = Multiaddr::from(config.listen_address); - m.push(MProtocol::Tcp(config.libp2p_port)); - m - }; - - match self.swarm.listen_on(listen_multiaddr.clone()) { - Ok(_) => { - let mut log_address = listen_multiaddr; - log_address.push(MProtocol::P2p(enr.peer_id().into())); - info!(self.log, "Listening established"; "address" => %log_address); - } - Err(err) => { - crit!( - self.log, - "Unable to listen on libp2p address"; - "error" => ?err, - "listen_multiaddr" => %listen_multiaddr, - ); - return Err("Libp2p was unable to listen on the given listen address.".into()); - } - }; + debug!(self.log, "Attempting to open listening ports"; config.listen_addrs(), "discovery_enabled" => !config.disable_discovery); + + for listen_multiaddr in config.listen_addrs().tcp_addresses() { + match self.swarm.listen_on(listen_multiaddr.clone()) { + Ok(_) => { + let mut log_address = listen_multiaddr; + log_address.push(MProtocol::P2p(enr.peer_id().into())); + info!(self.log, "Listening established"; "address" => %log_address); + } + Err(err) => { + crit!( + self.log, + "Unable to listen on libp2p address"; + "error" => ?err, + "listen_multiaddr" => %listen_multiaddr, + ); + return Err("Libp2p was unable to listen on the given listen address.".into()); + } + }; + } // helper closure for dialing peers let mut dial = |mut multiaddr: Multiaddr| { @@ -1129,7 +1120,7 @@ impl Network { debug!(self.log, "Peer does not support gossipsub"; "peer_id" => %peer_id); self.peer_manager_mut().report_peer( &peer_id, - PeerAction::LowToleranceError, + PeerAction::Fatal, ReportSource::Gossipsub, Some(GoodbyeReason::Unknown), "does_not_support_gossipsub", diff --git a/beacon_node/lighthouse_network/src/types/globals.rs b/beacon_node/lighthouse_network/src/types/globals.rs index aadd13a236b..43e8ebd76a5 100644 --- a/beacon_node/lighthouse_network/src/types/globals.rs +++ b/beacon_node/lighthouse_network/src/types/globals.rs @@ -7,7 +7,6 @@ use crate::EnrExt; use crate::{Enr, GossipTopic, Multiaddr, PeerId}; use parking_lot::RwLock; use std::collections::HashSet; -use std::sync::atomic::{AtomicU16, Ordering}; use types::EthSpec; pub struct NetworkGlobals { @@ -17,10 +16,10 @@ pub struct NetworkGlobals { pub peer_id: RwLock, /// Listening multiaddrs. pub listen_multiaddrs: RwLock>, - /// The TCP port that the libp2p service is listening on - pub listen_port_tcp: AtomicU16, - /// The UDP port that the discovery service is listening on - pub listen_port_udp: AtomicU16, + /// The TCP port that the libp2p service is listening on over Ipv4. + listen_port_tcp4: Option, + /// The TCP port that the libp2p service is listening on over Ipv6. + listen_port_tcp6: Option, /// The collection of known peers. pub peers: RwLock>, // The local meta data of our node. @@ -36,20 +35,21 @@ pub struct NetworkGlobals { impl NetworkGlobals { pub fn new( enr: Enr, - tcp_port: u16, - udp_port: u16, + listen_port_tcp4: Option, + listen_port_tcp6: Option, local_metadata: MetaData, trusted_peers: Vec, + disable_peer_scoring: bool, log: &slog::Logger, ) -> Self { NetworkGlobals { local_enr: RwLock::new(enr.clone()), peer_id: RwLock::new(enr.peer_id()), listen_multiaddrs: RwLock::new(Vec::new()), - listen_port_tcp: AtomicU16::new(tcp_port), - listen_port_udp: AtomicU16::new(udp_port), + listen_port_tcp4, + listen_port_tcp6, local_metadata: RwLock::new(local_metadata), - peers: RwLock::new(PeerDB::new(trusted_peers, log)), + peers: RwLock::new(PeerDB::new(trusted_peers, disable_peer_scoring, log)), gossipsub_subscriptions: RwLock::new(HashSet::new()), sync_state: RwLock::new(SyncState::Stalled), backfill_state: RwLock::new(BackFillState::NotRequired), @@ -73,13 +73,13 @@ impl NetworkGlobals { } /// Returns the libp2p TCP port that this node has been configured to listen on. - pub fn listen_port_tcp(&self) -> u16 { - self.listen_port_tcp.load(Ordering::Relaxed) + pub fn listen_port_tcp4(&self) -> Option { + self.listen_port_tcp4 } /// Returns the UDP discovery port that this node has been configured to listen on. - pub fn listen_port_udp(&self) -> u16 { - self.listen_port_udp.load(Ordering::Relaxed) + pub fn listen_port_tcp6(&self) -> Option { + self.listen_port_tcp6 } /// Returns the number of libp2p connected peers. @@ -137,14 +137,15 @@ impl NetworkGlobals { let enr = discv5::enr::EnrBuilder::new("v4").build(&enr_key).unwrap(); NetworkGlobals::new( enr, - 9000, - 9000, + Some(9000), + None, MetaData::V2(MetaDataV2 { seq_number: 0, attnets: Default::default(), syncnets: Default::default(), }), vec![], + false, log, ) } diff --git a/beacon_node/lighthouse_network/tests/common.rs b/beacon_node/lighthouse_network/tests/common.rs index dfceb6c4c6a..d44f20c0806 100644 --- a/beacon_node/lighthouse_network/tests/common.rs +++ b/beacon_node/lighthouse_network/tests/common.rs @@ -13,7 +13,7 @@ use tokio::runtime::Runtime; use types::{ ChainSpec, EnrForkId, Epoch, EthSpec, ForkContext, ForkName, Hash256, MinimalEthSpec, Slot, }; -use unused_port::unused_tcp_port; +use unused_port::unused_tcp4_port; type E = MinimalEthSpec; type ReqId = usize; @@ -75,11 +75,9 @@ pub fn build_config(port: u16, mut boot_nodes: Vec) -> NetworkConfig { .tempdir() .unwrap(); - config.libp2p_port = port; // tcp port - config.discovery_port = port; // udp port - config.enr_tcp_port = Some(port); - config.enr_udp_port = Some(port); - config.enr_address = Some("127.0.0.1".parse().unwrap()); + config.set_ipv4_listening_address(std::net::Ipv4Addr::UNSPECIFIED, port, port); + config.enr_udp4_port = Some(port); + config.enr_address = (Some(std::net::Ipv4Addr::LOCALHOST), None); config.boot_nodes_enr.append(&mut boot_nodes); config.network_dir = path.into_path(); // Reduce gossipsub heartbeat parameters @@ -97,7 +95,7 @@ pub async fn build_libp2p_instance( log: slog::Logger, fork_name: ForkName, ) -> Libp2pInstance { - let port = unused_tcp_port().unwrap(); + let port = unused_tcp4_port().unwrap(); let config = build_config(port, boot_nodes); // launch libp2p service diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 95d8a294c11..fbc45364aad 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -21,8 +21,8 @@ types = { path = "../../consensus/types" } slot_clock = { path = "../../common/slot_clock" } slog = { version = "2.5.2", features = ["max_level_trace"] } hex = "0.4.2" -eth2_ssz = "0.4.1" -eth2_ssz_types = "0.2.2" +ethereum_ssz = "0.5.0" +ssz_types = "0.5.0" futures = "0.3.7" error-chain = "0.12.4" tokio = { version = "1.14.0", features = ["full"] } @@ -43,7 +43,7 @@ if-addrs = "0.6.4" strum = "0.24.0" tokio-util = { version = "0.6.3", features = ["time"] } derivative = "2.2.0" -delay_map = "0.1.1" +delay_map = "0.3.0" ethereum-types = { version = "0.14.1", optional = true } operation_pool = { path = "../operation_pool" } execution_layer = { path = "../execution_layer" } diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index 61e3367e2fc..96032052284 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -61,6 +61,7 @@ use std::time::Duration; use std::{cmp, collections::HashSet}; use task_executor::TaskExecutor; use tokio::sync::mpsc; +use tokio::sync::mpsc::error::TrySendError; use types::{ Attestation, AttesterSlashing, Hash256, LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedBlsToExecutionChange, @@ -77,7 +78,9 @@ mod tests; mod work_reprocessing_queue; mod worker; -use crate::beacon_processor::work_reprocessing_queue::QueuedGossipBlock; +use crate::beacon_processor::work_reprocessing_queue::{ + QueuedBackfillBatch, QueuedGossipBlock, ReprocessQueueMessage, +}; pub use worker::{ChainSegmentProcessId, GossipAggregatePackage, GossipAttestationPackage}; /// The maximum size of the channel for work events to the `BeaconProcessor`. @@ -218,6 +221,7 @@ pub const GOSSIP_LIGHT_CLIENT_FINALITY_UPDATE: &str = "light_client_finality_upd pub const GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE: &str = "light_client_optimistic_update"; pub const RPC_BLOCK: &str = "rpc_block"; pub const CHAIN_SEGMENT: &str = "chain_segment"; +pub const CHAIN_SEGMENT_BACKFILL: &str = "chain_segment_backfill"; pub const STATUS_PROCESSING: &str = "status_processing"; pub const BLOCKS_BY_RANGE_REQUEST: &str = "blocks_by_range_request"; pub const BLOCKS_BY_ROOTS_REQUEST: &str = "blocks_by_roots_request"; @@ -738,6 +742,9 @@ impl std::convert::From> for WorkEvent { seen_timestamp, }, }, + ReadyWork::BackfillSync(QueuedBackfillBatch { process_id, blocks }) => { + WorkEvent::chain_segment(process_id, blocks) + } } } } @@ -893,6 +900,10 @@ impl Work { Work::GossipLightClientFinalityUpdate { .. } => GOSSIP_LIGHT_CLIENT_FINALITY_UPDATE, Work::GossipLightClientOptimisticUpdate { .. } => GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE, Work::RpcBlock { .. } => RPC_BLOCK, + Work::ChainSegment { + process_id: ChainSegmentProcessId::BackSyncBatchId { .. }, + .. + } => CHAIN_SEGMENT_BACKFILL, Work::ChainSegment { .. } => CHAIN_SEGMENT, Work::Status { .. } => STATUS_PROCESSING, Work::BlocksByRangeRequest { .. } => BLOCKS_BY_RANGE_REQUEST, @@ -1054,23 +1065,23 @@ impl BeaconProcessor { FifoQueue::new(MAX_BLS_TO_EXECUTION_CHANGE_QUEUE_LEN); let mut lcbootstrap_queue = FifoQueue::new(MAX_LIGHT_CLIENT_BOOTSTRAP_QUEUE_LEN); + + let chain = match self.beacon_chain.upgrade() { + Some(chain) => chain, + // No need to proceed any further if the beacon chain has been dropped, the client + // is shutting down. + None => return, + }; + // Channels for sending work to the re-process scheduler (`work_reprocessing_tx`) and to // receive them back once they are ready (`ready_work_rx`). let (ready_work_tx, ready_work_rx) = mpsc::channel(MAX_SCHEDULED_WORK_QUEUE_LEN); - let work_reprocessing_tx = { - if let Some(chain) = self.beacon_chain.upgrade() { - spawn_reprocess_scheduler( - ready_work_tx, - &self.executor, - chain.slot_clock.clone(), - self.log.clone(), - ) - } else { - // No need to proceed any further if the beacon chain has been dropped, the client - // is shutting down. - return; - } - }; + let work_reprocessing_tx = spawn_reprocess_scheduler( + ready_work_tx, + &self.executor, + chain.slot_clock.clone(), + self.log.clone(), + ); let executor = self.executor.clone(); @@ -1083,12 +1094,55 @@ impl BeaconProcessor { reprocess_work_rx: ready_work_rx, }; + let enable_backfill_rate_limiting = chain.config.enable_backfill_rate_limiting; + loop { let work_event = match inbound_events.next().await { Some(InboundEvent::WorkerIdle) => { self.current_workers = self.current_workers.saturating_sub(1); None } + Some(InboundEvent::WorkEvent(event)) if enable_backfill_rate_limiting => { + match QueuedBackfillBatch::try_from(event) { + Ok(backfill_batch) => { + match work_reprocessing_tx + .try_send(ReprocessQueueMessage::BackfillSync(backfill_batch)) + { + Err(e) => { + warn!( + self.log, + "Unable to queue backfill work event. Will try to process now."; + "error" => %e + ); + match e { + TrySendError::Full(reprocess_queue_message) + | TrySendError::Closed(reprocess_queue_message) => { + match reprocess_queue_message { + ReprocessQueueMessage::BackfillSync( + backfill_batch, + ) => Some(backfill_batch.into()), + other => { + crit!( + self.log, + "Unexpected queue message type"; + "message_type" => other.as_ref() + ); + // This is an unhandled exception, drop the message. + continue; + } + } + } + } + } + Ok(..) => { + // backfill work sent to "reprocessing" queue. Process the next event. + continue; + } + } + } + Err(event) => Some(event), + } + } Some(InboundEvent::WorkEvent(event)) | Some(InboundEvent::ReprocessingWork(event)) => Some(event), None => { diff --git a/beacon_node/network/src/beacon_processor/tests.rs b/beacon_node/network/src/beacon_processor/tests.rs index ea1a59e0d05..4b0a159eb4b 100644 --- a/beacon_node/network/src/beacon_processor/tests.rs +++ b/beacon_node/network/src/beacon_processor/tests.rs @@ -9,7 +9,7 @@ use crate::{service::NetworkMessage, sync::SyncMessage}; use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, }; -use beacon_chain::{BeaconChain, MAXIMUM_GOSSIP_CLOCK_DISPARITY}; +use beacon_chain::{BeaconChain, ChainConfig, MAXIMUM_GOSSIP_CLOCK_DISPARITY}; use lighthouse_network::{ discv5::enr::{CombinedKey, EnrBuilder}, rpc::methods::{MetaData, MetaDataV2}, @@ -23,8 +23,8 @@ use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc; use types::{ - Attestation, AttesterSlashing, EthSpec, MainnetEthSpec, ProposerSlashing, SignedBeaconBlock, - SignedVoluntaryExit, SubnetId, + Attestation, AttesterSlashing, Epoch, EthSpec, MainnetEthSpec, ProposerSlashing, + SignedBeaconBlock, SignedVoluntaryExit, SubnetId, }; type E = MainnetEthSpec; @@ -36,7 +36,6 @@ const SMALL_CHAIN: u64 = 2; const LONG_CHAIN: u64 = SLOTS_PER_EPOCH * 2; const TCP_PORT: u16 = 42; -const UDP_PORT: u16 = 42; const SEQ_NUMBER: u64 = 0; /// The default time to wait for `BeaconProcessor` events. @@ -71,6 +70,10 @@ impl Drop for TestRig { impl TestRig { pub async fn new(chain_length: u64) -> Self { + Self::new_with_chain_config(chain_length, ChainConfig::default()).await + } + + pub async fn new_with_chain_config(chain_length: u64, chain_config: ChainConfig) -> Self { // This allows for testing voluntary exits without building out a massive chain. let mut spec = E::default_spec(); spec.shard_committee_period = 2; @@ -79,6 +82,7 @@ impl TestRig { .spec(spec) .deterministic_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() + .chain_config(chain_config) .build(); harness.advance_slot(); @@ -177,10 +181,11 @@ impl TestRig { let enr = EnrBuilder::new("v4").build(&enr_key).unwrap(); let network_globals = Arc::new(NetworkGlobals::new( enr, - TCP_PORT, - UDP_PORT, + Some(TCP_PORT), + None, meta_data, vec![], + false, &log, )); @@ -262,6 +267,14 @@ impl TestRig { self.beacon_processor_tx.try_send(event).unwrap(); } + pub fn enqueue_backfill_batch(&self) { + let event = WorkEvent::chain_segment( + ChainSegmentProcessId::BackSyncBatchId(Epoch::default()), + Vec::default(), + ); + self.beacon_processor_tx.try_send(event).unwrap(); + } + pub fn enqueue_unaggregated_attestation(&self) { let (attestation, subnet_id) = self.attestations.first().unwrap().clone(); self.beacon_processor_tx @@ -874,3 +887,49 @@ async fn test_rpc_block_reprocessing() { // cache handle was dropped. assert_eq!(next_block_root, rig.head_root()); } + +/// Ensure that backfill batches get rate-limited and processing is scheduled at specified intervals. +#[tokio::test] +async fn test_backfill_sync_processing() { + let mut rig = TestRig::new(SMALL_CHAIN).await; + // Note: to verify the exact event times in an integration test is not straight forward here + // (not straight forward to manipulate `TestingSlotClock` due to cloning of `SlotClock` in code) + // and makes the test very slow, hence timing calculation is unit tested separately in + // `work_reprocessing_queue`. + for _ in 0..1 { + rig.enqueue_backfill_batch(); + // ensure queued batch is not processed until later + rig.assert_no_events_for(Duration::from_millis(100)).await; + // A new batch should be processed within a slot. + rig.assert_event_journal_with_timeout( + &[CHAIN_SEGMENT_BACKFILL, WORKER_FREED, NOTHING_TO_DO], + rig.chain.slot_clock.slot_duration(), + ) + .await; + } +} + +/// Ensure that backfill batches get processed as fast as they can when rate-limiting is disabled. +#[tokio::test] +async fn test_backfill_sync_processing_rate_limiting_disabled() { + let chain_config = ChainConfig { + enable_backfill_rate_limiting: false, + ..Default::default() + }; + let mut rig = TestRig::new_with_chain_config(SMALL_CHAIN, chain_config).await; + + for _ in 0..3 { + rig.enqueue_backfill_batch(); + } + + // ensure all batches are processed + rig.assert_event_journal_with_timeout( + &[ + CHAIN_SEGMENT_BACKFILL, + CHAIN_SEGMENT_BACKFILL, + CHAIN_SEGMENT_BACKFILL, + ], + Duration::from_millis(100), + ) + .await; +} diff --git a/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs b/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs index 8c568a7eefd..427be6d5138 100644 --- a/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs +++ b/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs @@ -11,21 +11,25 @@ //! Aggregated and unaggregated attestations that failed verification due to referencing an unknown //! block will be re-queued until their block is imported, or until they expire. use super::MAX_SCHEDULED_WORK_QUEUE_LEN; +use crate::beacon_processor::{ChainSegmentProcessId, Work, WorkEvent}; use crate::metrics; use crate::sync::manager::BlockProcessType; use beacon_chain::{BeaconChainTypes, GossipVerifiedBlock, MAXIMUM_GOSSIP_CLOCK_DISPARITY}; use fnv::FnvHashMap; use futures::task::Poll; use futures::{Stream, StreamExt}; +use itertools::Itertools; use lighthouse_network::{MessageId, PeerId}; use logging::TimeLatch; use slog::{crit, debug, error, trace, warn, Logger}; use slot_clock::SlotClock; use std::collections::{HashMap, HashSet}; +use std::future::Future; use std::pin::Pin; use std::sync::Arc; use std::task::Context; use std::time::Duration; +use strum::AsRefStr; use task_executor::TaskExecutor; use tokio::sync::mpsc::{self, Receiver, Sender}; use tokio::time::error::Error as TimeError; @@ -52,7 +56,7 @@ pub const QUEUED_ATTESTATION_DELAY: Duration = Duration::from_secs(12); pub const QUEUED_LIGHT_CLIENT_UPDATE_DELAY: Duration = Duration::from_secs(12); /// For how long to queue rpc blocks before sending them back for reprocessing. -pub const QUEUED_RPC_BLOCK_DELAY: Duration = Duration::from_secs(3); +pub const QUEUED_RPC_BLOCK_DELAY: Duration = Duration::from_secs(4); /// Set an arbitrary upper-bound on the number of queued blocks to avoid DoS attacks. The fact that /// we signature-verify blocks before putting them in the queue *should* protect against this, but @@ -65,7 +69,21 @@ const MAXIMUM_QUEUED_ATTESTATIONS: usize = 16_384; /// How many light client updates we keep before new ones get dropped. const MAXIMUM_QUEUED_LIGHT_CLIENT_UPDATES: usize = 128; +// Process backfill batch 50%, 60%, 80% through each slot. +// +// Note: use caution to set these fractions in a way that won't cause panic-y +// arithmetic. +pub const BACKFILL_SCHEDULE_IN_SLOT: [(u32, u32); 3] = [ + // One half: 6s on mainnet, 2.5s on Gnosis. + (1, 2), + // Three fifths: 7.2s on mainnet, 3s on Gnosis. + (3, 5), + // Four fifths: 9.6s on mainnet, 4s on Gnosis. + (4, 5), +]; + /// Messages that the scheduler can receive. +#[derive(AsRefStr)] pub enum ReprocessQueueMessage { /// A block that has been received early and we should queue for later processing. EarlyBlock(QueuedGossipBlock), @@ -84,6 +102,8 @@ pub enum ReprocessQueueMessage { UnknownBlockAggregate(QueuedAggregate), /// A light client optimistic update that references a parent root that has not been seen as a parent. UnknownLightClientOptimisticUpdate(QueuedLightClientUpdate), + /// A new backfill batch that needs to be scheduled for processing. + BackfillSync(QueuedBackfillBatch), } /// Events sent by the scheduler once they are ready for re-processing. @@ -93,6 +113,7 @@ pub enum ReadyWork { Unaggregate(QueuedUnaggregate), Aggregate(QueuedAggregate), LightClientUpdate(QueuedLightClientUpdate), + BackfillSync(QueuedBackfillBatch), } /// An Attestation for which the corresponding block was not seen while processing, queued for @@ -144,6 +165,40 @@ pub struct QueuedRpcBlock { pub should_process: bool, } +/// A backfill batch work that has been queued for processing later. +#[derive(Clone)] +pub struct QueuedBackfillBatch { + pub process_id: ChainSegmentProcessId, + pub blocks: Vec>>, +} + +impl TryFrom> for QueuedBackfillBatch { + type Error = WorkEvent; + + fn try_from(event: WorkEvent) -> Result> { + match event { + WorkEvent { + work: + Work::ChainSegment { + process_id: process_id @ ChainSegmentProcessId::BackSyncBatchId(_), + blocks, + }, + .. + } => Ok(QueuedBackfillBatch { process_id, blocks }), + _ => Err(event), + } + } +} + +impl From> for WorkEvent { + fn from(queued_backfill_batch: QueuedBackfillBatch) -> WorkEvent { + WorkEvent::chain_segment( + queued_backfill_batch.process_id, + queued_backfill_batch.blocks, + ) + } +} + /// Unifies the different messages processed by the block delay queue. enum InboundEvent { /// A gossip block that was queued for later processing and is ready for import. @@ -155,6 +210,8 @@ enum InboundEvent { ReadyAttestation(QueuedAttestationId), /// A light client update that is ready for re-processing. ReadyLightClientUpdate(QueuedLightClientUpdateId), + /// A backfill batch that was queued is ready for processing. + ReadyBackfillSync(QueuedBackfillBatch), /// A `DelayQueue` returned an error. DelayQueueError(TimeError, &'static str), /// A message sent to the `ReprocessQueue` @@ -191,6 +248,8 @@ struct ReprocessQueue { queued_lc_updates: FnvHashMap, DelayKey)>, /// Light Client Updates per parent_root. awaiting_lc_updates_per_parent_root: HashMap>, + /// Queued backfill batches + queued_backfill_batches: Vec>, /* Aux */ /// Next attestation id, used for both aggregated and unaggregated attestations @@ -200,6 +259,8 @@ struct ReprocessQueue { rpc_block_debounce: TimeLatch, attestation_delay_debounce: TimeLatch, lc_update_delay_debounce: TimeLatch, + next_backfill_batch_event: Option>>, + slot_clock: Pin>, } pub type QueuedLightClientUpdateId = usize; @@ -287,6 +348,20 @@ impl Stream for ReprocessQueue { Poll::Ready(None) | Poll::Pending => (), } + if let Some(next_backfill_batch_event) = self.next_backfill_batch_event.as_mut() { + match next_backfill_batch_event.as_mut().poll(cx) { + Poll::Ready(_) => { + let maybe_batch = self.queued_backfill_batches.pop(); + self.recompute_next_backfill_batch_event(); + + if let Some(batch) = maybe_batch { + return Poll::Ready(Some(InboundEvent::ReadyBackfillSync(batch))); + } + } + Poll::Pending => (), + } + } + // Last empty the messages channel. match self.work_reprocessing_rx.poll_recv(cx) { Poll::Ready(Some(message)) => return Poll::Ready(Some(InboundEvent::Msg(message))), @@ -323,12 +398,15 @@ pub fn spawn_reprocess_scheduler( queued_unaggregates: FnvHashMap::default(), awaiting_attestations_per_root: HashMap::new(), awaiting_lc_updates_per_parent_root: HashMap::new(), + queued_backfill_batches: Vec::new(), next_attestation: 0, next_lc_update: 0, early_block_debounce: TimeLatch::default(), rpc_block_debounce: TimeLatch::default(), attestation_delay_debounce: TimeLatch::default(), lc_update_delay_debounce: TimeLatch::default(), + next_backfill_batch_event: None, + slot_clock: Box::pin(slot_clock.clone()), }; executor.spawn( @@ -443,7 +521,7 @@ impl ReprocessQueue { return; } - // Queue the block for 1/4th of a slot + // Queue the block for 1/3rd of a slot self.rpc_block_delay_queue .insert(rpc_block, QUEUED_RPC_BLOCK_DELAY); } @@ -573,6 +651,9 @@ impl ReprocessQueue { }) => { // Unqueue the attestations we have for this root, if any. if let Some(queued_ids) = self.awaiting_attestations_per_root.remove(&block_root) { + let mut sent_count = 0; + let mut failed_to_send_count = 0; + for id in queued_ids { metrics::inc_counter( &metrics::BEACON_PROCESSOR_REPROCESSING_QUEUE_MATCHED_ATTESTATIONS, @@ -597,10 +678,9 @@ impl ReprocessQueue { // Send the work. if self.ready_work_tx.try_send(work).is_err() { - error!( - log, - "Failed to send scheduled attestation"; - ); + failed_to_send_count += 1; + } else { + sent_count += 1; } } else { // There is a mismatch between the attestation ids registered for this @@ -613,6 +693,18 @@ impl ReprocessQueue { ); } } + + if failed_to_send_count > 0 { + error!( + log, + "Ignored scheduled attestation(s) for block"; + "hint" => "system may be overloaded", + "parent_root" => ?parent_root, + "block_root" => ?block_root, + "failed_count" => failed_to_send_count, + "sent_count" => sent_count, + ); + } } // Unqueue the light client optimistic updates we have for this root, if any. if let Some(queued_lc_id) = self @@ -665,6 +757,14 @@ impl ReprocessQueue { } } } + InboundEvent::Msg(BackfillSync(queued_backfill_batch)) => { + self.queued_backfill_batches + .insert(0, queued_backfill_batch); + // only recompute if there is no `next_backfill_batch_event` already scheduled + if self.next_backfill_batch_event.is_none() { + self.recompute_next_backfill_batch_event(); + } + } // A block that was queued for later processing is now ready to be processed. InboundEvent::ReadyGossipBlock(ready_block) => { let block_root = ready_block.block.block_root; @@ -727,7 +827,9 @@ impl ReprocessQueue { if self.ready_work_tx.try_send(work).is_err() { error!( log, - "Failed to send scheduled attestation"; + "Ignored scheduled attestation"; + "hint" => "system may be overloaded", + "beacon_block_root" => ?root ); } @@ -770,6 +872,33 @@ impl ReprocessQueue { } } } + InboundEvent::ReadyBackfillSync(queued_backfill_batch) => { + let millis_from_slot_start = slot_clock + .millis_from_current_slot_start() + .map_or("null".to_string(), |duration| { + duration.as_millis().to_string() + }); + + debug!( + log, + "Sending scheduled backfill work"; + "millis_from_slot_start" => millis_from_slot_start + ); + + if self + .ready_work_tx + .try_send(ReadyWork::BackfillSync(queued_backfill_batch.clone())) + .is_err() + { + error!( + log, + "Failed to send scheduled backfill work"; + "info" => "sending work back to queue" + ); + self.queued_backfill_batches + .insert(0, queued_backfill_batch); + } + } } metrics::set_gauge_vec( @@ -793,4 +922,95 @@ impl ReprocessQueue { self.lc_updates_delay_queue.len() as i64, ); } + + fn recompute_next_backfill_batch_event(&mut self) { + // only recompute the `next_backfill_batch_event` if there are backfill batches in the queue + if !self.queued_backfill_batches.is_empty() { + self.next_backfill_batch_event = Some(Box::pin(tokio::time::sleep( + ReprocessQueue::::duration_until_next_backfill_batch_event(&self.slot_clock), + ))); + } else { + self.next_backfill_batch_event = None + } + } + + /// Returns duration until the next scheduled processing time. The schedule ensure that backfill + /// processing is done in windows of time that aren't critical + fn duration_until_next_backfill_batch_event(slot_clock: &T::SlotClock) -> Duration { + let slot_duration = slot_clock.slot_duration(); + slot_clock + .millis_from_current_slot_start() + .and_then(|duration_from_slot_start| { + BACKFILL_SCHEDULE_IN_SLOT + .into_iter() + // Convert fractions to seconds from slot start. + .map(|(multiplier, divisor)| (slot_duration / divisor) * multiplier) + .find_or_first(|&event_duration_from_slot_start| { + event_duration_from_slot_start > duration_from_slot_start + }) + .map(|next_event_time| { + if duration_from_slot_start >= next_event_time { + // event is in the next slot, add duration to next slot + let duration_to_next_slot = slot_duration - duration_from_slot_start; + duration_to_next_slot + next_event_time + } else { + next_event_time - duration_from_slot_start + } + }) + }) + // If we can't read the slot clock, just wait another slot. + .unwrap_or(slot_duration) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use beacon_chain::builder::Witness; + use beacon_chain::eth1_chain::CachingEth1Backend; + use slot_clock::TestingSlotClock; + use store::MemoryStore; + use types::MainnetEthSpec as E; + use types::Slot; + + type TestBeaconChainType = + Witness, E, MemoryStore, MemoryStore>; + + #[test] + fn backfill_processing_schedule_calculation() { + let slot_duration = Duration::from_secs(12); + let slot_clock = TestingSlotClock::new(Slot::new(0), Duration::from_secs(0), slot_duration); + let current_slot_start = slot_clock.start_of(Slot::new(100)).unwrap(); + slot_clock.set_current_time(current_slot_start); + + let event_times = BACKFILL_SCHEDULE_IN_SLOT + .map(|(multiplier, divisor)| (slot_duration / divisor) * multiplier); + + for &event_duration_from_slot_start in event_times.iter() { + let duration_to_next_event = + ReprocessQueue::::duration_until_next_backfill_batch_event( + &slot_clock, + ); + + let current_time = slot_clock.millis_from_current_slot_start().unwrap(); + + assert_eq!( + duration_to_next_event, + event_duration_from_slot_start - current_time + ); + + slot_clock.set_current_time(current_slot_start + event_duration_from_slot_start) + } + + // check for next event beyond the current slot + let duration_to_next_slot = slot_clock.duration_to_next_slot().unwrap(); + let duration_to_next_event = + ReprocessQueue::::duration_until_next_backfill_batch_event( + &slot_clock, + ); + assert_eq!( + duration_to_next_event, + duration_to_next_slot + event_times[0] + ); + } } diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index f2b1b3a26ba..1ec03ae954f 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -834,7 +834,6 @@ impl Worker { | Err(e @ BlockError::WeakSubjectivityConflict) | Err(e @ BlockError::InconsistentFork(_)) | Err(e @ BlockError::ExecutionPayloadError(_)) - // TODO(merge): reconsider peer scoring for this event. | Err(e @ BlockError::ParentExecutionPayloadInvalid { .. }) | Err(e @ BlockError::GenesisBlock) => { warn!(self.log, "Could not verify block for gossip. Rejecting the block"; diff --git a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs index afcc15280d3..81b163bf7ee 100644 --- a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs @@ -9,8 +9,8 @@ use lighthouse_network::rpc::*; use lighthouse_network::{PeerId, PeerRequestId, ReportSource, Response, SyncInfo}; use slog::{debug, error, warn}; use slot_clock::SlotClock; -use std::sync::Arc; use task_executor::TaskExecutor; +use tokio_stream::StreamExt; use types::{light_client_bootstrap::LightClientBootstrap, Epoch, EthSpec, Hash256, Slot}; use super::Worker; @@ -131,21 +131,25 @@ impl Worker { request_id: PeerRequestId, request: BlocksByRootRequest, ) { + let requested_blocks = request.block_roots.len(); + let mut block_stream = match self + .chain + .get_blocks_checking_early_attester_cache(request.block_roots.into(), &executor) + { + Ok(block_stream) => block_stream, + Err(e) => return error!(self.log, "Error getting block stream"; "error" => ?e), + }; // Fetching blocks is async because it may have to hit the execution layer for payloads. executor.spawn( async move { let mut send_block_count = 0; let mut send_response = true; - for root in request.block_roots.iter() { - match self - .chain - .get_block_checking_early_attester_cache(root) - .await - { + while let Some((root, result)) = block_stream.next().await { + match result.as_ref() { Ok(Some(block)) => { self.send_response( peer_id, - Response::BlocksByRoot(Some(block)), + Response::BlocksByRoot(Some(block.clone())), request_id, ); send_block_count += 1; @@ -190,7 +194,7 @@ impl Worker { self.log, "Received BlocksByRoot Request"; "peer" => %peer_id, - "requested" => request.block_roots.len(), + "requested" => requested_blocks, "returned" => %send_block_count ); @@ -344,14 +348,19 @@ impl Worker { // remove all skip slots let block_roots = block_roots.into_iter().flatten().collect::>(); + let mut block_stream = match self.chain.get_blocks(block_roots, &executor) { + Ok(block_stream) => block_stream, + Err(e) => return error!(self.log, "Error getting block stream"; "error" => ?e), + }; + // Fetching blocks is async because it may have to hit the execution layer for payloads. executor.spawn( async move { let mut blocks_sent = 0; let mut send_response = true; - for root in block_roots { - match self.chain.get_block(&root).await { + while let Some((root, result)) = block_stream.next().await { + match result.as_ref() { Ok(Some(block)) => { // Due to skip slots, blocks could be out of the range, we ensure they // are in the range before sending @@ -361,7 +370,7 @@ impl Worker { blocks_sent += 1; self.send_network_message(NetworkMessage::SendResponse { peer_id, - response: Response::BlocksByRange(Some(Arc::new(block))), + response: Response::BlocksByRange(Some(block.clone())), id: request_id, }); } diff --git a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs index 6e6e6815504..ca2095348ae 100644 --- a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs @@ -9,12 +9,15 @@ use crate::sync::manager::{BlockProcessType, SyncMessage}; use crate::sync::{BatchProcessResult, ChainId}; use beacon_chain::CountUnrealized; use beacon_chain::{ + observed_block_producers::Error as ObserveError, validator_monitor::get_block_delay_ms, BeaconChainError, BeaconChainTypes, BlockError, ChainSegmentResult, HistoricalBlockError, NotifyExecutionLayer, }; use lighthouse_network::PeerAction; use slog::{debug, error, info, warn}; +use slot_clock::SlotClock; use std::sync::Arc; +use std::time::{SystemTime, UNIX_EPOCH}; use tokio::sync::mpsc; use types::{Epoch, Hash256, SignedBeaconBlock}; @@ -83,6 +86,66 @@ impl Worker { return; } }; + + // Returns `true` if the time now is after the 4s attestation deadline. + let block_is_late = SystemTime::now() + .duration_since(UNIX_EPOCH) + // If we can't read the system time clock then indicate that the + // block is late (and therefore should *not* be requeued). This + // avoids infinite loops. + .map_or(true, |now| { + get_block_delay_ms(now, block.message(), &self.chain.slot_clock) + > self.chain.slot_clock.unagg_attestation_production_delay() + }); + + // Checks if a block from this proposer is already known. + let proposal_already_known = || { + match self + .chain + .observed_block_producers + .read() + .proposer_has_been_observed(block.message()) + { + Ok(is_observed) => is_observed, + // Both of these blocks will be rejected, so reject them now rather + // than re-queuing them. + Err(ObserveError::FinalizedBlock { .. }) + | Err(ObserveError::ValidatorIndexTooHigh { .. }) => false, + } + }; + + // If we've already seen a block from this proposer *and* the block + // arrived before the attestation deadline, requeue it to ensure it is + // imported late enough that it won't receive a proposer boost. + if !block_is_late && proposal_already_known() { + debug!( + self.log, + "Delaying processing of duplicate RPC block"; + "block_root" => ?block_root, + "proposer" => block.message().proposer_index(), + "slot" => block.slot() + ); + + // Send message to work reprocess queue to retry the block + let reprocess_msg = ReprocessQueueMessage::RpcBlock(QueuedRpcBlock { + block_root, + block: block.clone(), + process_type, + seen_timestamp, + should_process: true, + }); + + if reprocess_tx.try_send(reprocess_msg).is_err() { + error!( + self.log, + "Failed to inform block import"; + "source" => "rpc", + "block_root" => %block_root + ); + } + return; + } + let slot = block.slot(); let parent_root = block.message().parent_root(); let result = self @@ -513,6 +576,21 @@ impl Worker { }) } } + ref err @ BlockError::ParentExecutionPayloadInvalid { ref parent_root } => { + warn!( + self.log, + "Failed to sync chain built on invalid parent"; + "parent_root" => ?parent_root, + "advice" => "check execution node for corruption then restart it and Lighthouse", + ); + Err(ChainSegmentFailed { + message: format!("Peer sent invalid block. Reason: {err:?}"), + // We need to penalise harshly in case this represents an actual attack. In case + // of a faulty EL it will usually require manual intervention to fix anyway, so + // it's not too bad if we drop most of our peers. + peer_action: Some(PeerAction::LowToleranceError), + }) + } other => { debug!( self.log, "Invalid block received"; diff --git a/beacon_node/network/src/nat.rs b/beacon_node/network/src/nat.rs index a2fbe576109..9bf123e8dec 100644 --- a/beacon_node/network/src/nat.rs +++ b/beacon_node/network/src/nat.rs @@ -20,13 +20,13 @@ pub struct UPnPConfig { disable_discovery: bool, } -impl From<&NetworkConfig> for UPnPConfig { - fn from(config: &NetworkConfig) -> Self { - UPnPConfig { - tcp_port: config.libp2p_port, - udp_port: config.discovery_port, +impl UPnPConfig { + pub fn from_config(config: &NetworkConfig) -> Option { + config.listen_addrs().v4().map(|v4_addr| UPnPConfig { + tcp_port: v4_addr.tcp_port, + udp_port: v4_addr.udp_port, disable_discovery: config.disable_discovery, - } + }) } } diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs new file mode 100644 index 00000000000..7f75a27fe25 --- /dev/null +++ b/beacon_node/network/src/router.rs @@ -0,0 +1,535 @@ +//! This module handles incoming network messages. +//! +//! It routes the messages to appropriate services. +//! It handles requests at the application layer in its associated processor and directs +//! syncing-related responses to the Sync manager. +#![allow(clippy::unit_arg)] + +use crate::beacon_processor::{ + BeaconProcessor, WorkEvent as BeaconWorkEvent, MAX_WORK_EVENT_QUEUE_LEN, +}; +use crate::error; +use crate::service::{NetworkMessage, RequestId}; +use crate::status::status_message; +use crate::sync::manager::RequestId as SyncId; +use crate::sync::SyncMessage; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use futures::prelude::*; +use lighthouse_network::rpc::*; +use lighthouse_network::{ + MessageId, NetworkGlobals, PeerId, PeerRequestId, PubsubMessage, Request, Response, +}; +use slog::{debug, o, trace}; +use slog::{error, warn}; +use std::cmp; +use std::sync::Arc; +use std::time::{Duration, SystemTime, UNIX_EPOCH}; +use tokio::sync::mpsc; +use tokio_stream::wrappers::UnboundedReceiverStream; +use types::{EthSpec, SignedBeaconBlock}; + +/// Handles messages from the network and routes them to the appropriate service to be handled. +pub struct Router { + /// Access to the peer db and network information. + network_globals: Arc>, + /// A reference to the underlying beacon chain. + chain: Arc>, + /// A channel to the syncing thread. + sync_send: mpsc::UnboundedSender>, + /// A network context to return and handle RPC requests. + network: HandlerNetworkContext, + /// A multi-threaded, non-blocking processor for applying messages to the beacon chain. + beacon_processor_send: mpsc::Sender>, + /// The `Router` logger. + log: slog::Logger, +} + +/// Types of messages the router can receive. +#[derive(Debug)] +pub enum RouterMessage { + /// Peer has disconnected. + PeerDisconnected(PeerId), + /// An RPC request has been received. + RPCRequestReceived { + peer_id: PeerId, + id: PeerRequestId, + request: Request, + }, + /// An RPC response has been received. + RPCResponseReceived { + peer_id: PeerId, + request_id: RequestId, + response: Response, + }, + /// An RPC request failed + RPCFailed { + peer_id: PeerId, + request_id: RequestId, + }, + /// A gossip message has been received. The fields are: message id, the peer that sent us this + /// message, the message itself and a bool which indicates if the message should be processed + /// by the beacon chain after successful verification. + PubsubMessage(MessageId, PeerId, PubsubMessage, bool), + /// The peer manager has requested we re-status a peer. + StatusPeer(PeerId), +} + +impl Router { + /// Initializes and runs the Router. + pub fn spawn( + beacon_chain: Arc>, + network_globals: Arc>, + network_send: mpsc::UnboundedSender>, + executor: task_executor::TaskExecutor, + log: slog::Logger, + ) -> error::Result>> { + let message_handler_log = log.new(o!("service"=> "router")); + trace!(message_handler_log, "Service starting"); + + let (handler_send, handler_recv) = mpsc::unbounded_channel(); + + let (beacon_processor_send, beacon_processor_receive) = + mpsc::channel(MAX_WORK_EVENT_QUEUE_LEN); + + let sync_logger = log.new(o!("service"=> "sync")); + + // spawn the sync thread + let sync_send = crate::sync::manager::spawn( + executor.clone(), + beacon_chain.clone(), + network_globals.clone(), + network_send.clone(), + beacon_processor_send.clone(), + sync_logger, + ); + + BeaconProcessor { + beacon_chain: Arc::downgrade(&beacon_chain), + network_tx: network_send.clone(), + sync_tx: sync_send.clone(), + network_globals: network_globals.clone(), + executor: executor.clone(), + max_workers: cmp::max(1, num_cpus::get()), + current_workers: 0, + importing_blocks: Default::default(), + log: log.clone(), + } + .spawn_manager(beacon_processor_receive, None); + + // generate the Message handler + let mut handler = Router { + network_globals, + chain: beacon_chain, + sync_send, + network: HandlerNetworkContext::new(network_send, log.clone()), + beacon_processor_send, + log: message_handler_log, + }; + + // spawn handler task and move the message handler instance into the spawned thread + executor.spawn( + async move { + debug!(log, "Network message router started"); + UnboundedReceiverStream::new(handler_recv) + .for_each(move |msg| future::ready(handler.handle_message(msg))) + .await; + }, + "router", + ); + + Ok(handler_send) + } + + /// Handle all messages incoming from the network service. + fn handle_message(&mut self, message: RouterMessage) { + match message { + // we have initiated a connection to a peer or the peer manager has requested a + // re-status + RouterMessage::StatusPeer(peer_id) => { + self.send_status(peer_id); + } + // A peer has disconnected + RouterMessage::PeerDisconnected(peer_id) => { + self.send_to_sync(SyncMessage::Disconnect(peer_id)); + } + RouterMessage::RPCRequestReceived { + peer_id, + id, + request, + } => { + self.handle_rpc_request(peer_id, id, request); + } + RouterMessage::RPCResponseReceived { + peer_id, + request_id, + response, + } => { + self.handle_rpc_response(peer_id, request_id, response); + } + RouterMessage::RPCFailed { + peer_id, + request_id, + } => { + self.on_rpc_error(peer_id, request_id); + } + RouterMessage::PubsubMessage(id, peer_id, gossip, should_process) => { + self.handle_gossip(id, peer_id, gossip, should_process); + } + } + } + + /* RPC - Related functionality */ + + /// A new RPC request has been received from the network. + fn handle_rpc_request(&mut self, peer_id: PeerId, request_id: PeerRequestId, request: Request) { + if !self.network_globals.peers.read().is_connected(&peer_id) { + debug!(self.log, "Dropping request of disconnected peer"; "peer_id" => %peer_id, "request" => ?request); + return; + } + match request { + Request::Status(status_message) => { + self.on_status_request(peer_id, request_id, status_message) + } + Request::BlocksByRange(request) => self.send_beacon_processor_work( + BeaconWorkEvent::blocks_by_range_request(peer_id, request_id, request), + ), + Request::BlocksByRoot(request) => self.send_beacon_processor_work( + BeaconWorkEvent::blocks_by_roots_request(peer_id, request_id, request), + ), + Request::LightClientBootstrap(request) => self.send_beacon_processor_work( + BeaconWorkEvent::lightclient_bootstrap_request(peer_id, request_id, request), + ), + } + } + + /// An RPC response has been received from the network. + fn handle_rpc_response( + &mut self, + peer_id: PeerId, + request_id: RequestId, + response: Response, + ) { + match response { + Response::Status(status_message) => { + debug!(self.log, "Received Status Response"; "peer_id" => %peer_id, &status_message); + self.send_beacon_processor_work(BeaconWorkEvent::status_message( + peer_id, + status_message, + )) + } + Response::BlocksByRange(beacon_block) => { + self.on_blocks_by_range_response(peer_id, request_id, beacon_block); + } + Response::BlocksByRoot(beacon_block) => { + self.on_blocks_by_root_response(peer_id, request_id, beacon_block); + } + Response::LightClientBootstrap(_) => unreachable!(), + } + } + + /// Handle RPC messages. + /// Note: `should_process` is currently only useful for the `Attestation` variant. + /// if `should_process` is `false`, we only propagate the message on successful verification, + /// else, we propagate **and** import into the beacon chain. + fn handle_gossip( + &mut self, + message_id: MessageId, + peer_id: PeerId, + gossip_message: PubsubMessage, + should_process: bool, + ) { + match gossip_message { + PubsubMessage::AggregateAndProofAttestation(aggregate_and_proof) => self + .send_beacon_processor_work(BeaconWorkEvent::aggregated_attestation( + message_id, + peer_id, + *aggregate_and_proof, + timestamp_now(), + )), + PubsubMessage::Attestation(subnet_attestation) => { + self.send_beacon_processor_work(BeaconWorkEvent::unaggregated_attestation( + message_id, + peer_id, + subnet_attestation.1, + subnet_attestation.0, + should_process, + timestamp_now(), + )) + } + PubsubMessage::BeaconBlock(block) => { + self.send_beacon_processor_work(BeaconWorkEvent::gossip_beacon_block( + message_id, + peer_id, + self.network_globals.client(&peer_id), + block, + timestamp_now(), + )) + } + PubsubMessage::VoluntaryExit(exit) => { + debug!(self.log, "Received a voluntary exit"; "peer_id" => %peer_id); + self.send_beacon_processor_work(BeaconWorkEvent::gossip_voluntary_exit( + message_id, peer_id, exit, + )) + } + PubsubMessage::ProposerSlashing(proposer_slashing) => { + debug!( + self.log, + "Received a proposer slashing"; + "peer_id" => %peer_id + ); + self.send_beacon_processor_work(BeaconWorkEvent::gossip_proposer_slashing( + message_id, + peer_id, + proposer_slashing, + )) + } + PubsubMessage::AttesterSlashing(attester_slashing) => { + debug!( + self.log, + "Received a attester slashing"; + "peer_id" => %peer_id + ); + self.send_beacon_processor_work(BeaconWorkEvent::gossip_attester_slashing( + message_id, + peer_id, + attester_slashing, + )) + } + PubsubMessage::SignedContributionAndProof(contribution_and_proof) => { + trace!( + self.log, + "Received sync committee aggregate"; + "peer_id" => %peer_id + ); + self.send_beacon_processor_work(BeaconWorkEvent::gossip_sync_contribution( + message_id, + peer_id, + *contribution_and_proof, + timestamp_now(), + )) + } + PubsubMessage::SyncCommitteeMessage(sync_committtee_msg) => { + trace!( + self.log, + "Received sync committee signature"; + "peer_id" => %peer_id + ); + self.send_beacon_processor_work(BeaconWorkEvent::gossip_sync_signature( + message_id, + peer_id, + sync_committtee_msg.1, + sync_committtee_msg.0, + timestamp_now(), + )) + } + PubsubMessage::LightClientFinalityUpdate(light_client_finality_update) => { + trace!( + self.log, + "Received light client finality update"; + "peer_id" => %peer_id + ); + self.send_beacon_processor_work( + BeaconWorkEvent::gossip_light_client_finality_update( + message_id, + peer_id, + light_client_finality_update, + timestamp_now(), + ), + ) + } + PubsubMessage::LightClientOptimisticUpdate(light_client_optimistic_update) => { + trace!( + self.log, + "Received light client optimistic update"; + "peer_id" => %peer_id + ); + self.send_beacon_processor_work( + BeaconWorkEvent::gossip_light_client_optimistic_update( + message_id, + peer_id, + light_client_optimistic_update, + timestamp_now(), + ), + ) + } + PubsubMessage::BlsToExecutionChange(bls_to_execution_change) => self + .send_beacon_processor_work(BeaconWorkEvent::gossip_bls_to_execution_change( + message_id, + peer_id, + bls_to_execution_change, + )), + } + } + + fn send_status(&mut self, peer_id: PeerId) { + let status_message = status_message(&self.chain); + debug!(self.log, "Sending Status Request"; "peer" => %peer_id, &status_message); + self.network + .send_processor_request(peer_id, Request::Status(status_message)); + } + + fn send_to_sync(&mut self, message: SyncMessage) { + self.sync_send.send(message).unwrap_or_else(|e| { + warn!( + self.log, + "Could not send message to the sync service"; + "error" => %e, + ) + }); + } + + /// An error occurred during an RPC request. The state is maintained by the sync manager, so + /// this function notifies the sync manager of the error. + pub fn on_rpc_error(&mut self, peer_id: PeerId, request_id: RequestId) { + // Check if the failed RPC belongs to sync + if let RequestId::Sync(request_id) = request_id { + self.send_to_sync(SyncMessage::RpcError { + peer_id, + request_id, + }); + } + } + + /// Handle a `Status` request. + /// + /// Processes the `Status` from the remote peer and sends back our `Status`. + pub fn on_status_request( + &mut self, + peer_id: PeerId, + request_id: PeerRequestId, + status: StatusMessage, + ) { + debug!(self.log, "Received Status Request"; "peer_id" => %peer_id, &status); + + // Say status back. + self.network.send_response( + peer_id, + Response::Status(status_message(&self.chain)), + request_id, + ); + + self.send_beacon_processor_work(BeaconWorkEvent::status_message(peer_id, status)) + } + + /// Handle a `BlocksByRange` response from the peer. + /// A `beacon_block` behaves as a stream which is terminated on a `None` response. + pub fn on_blocks_by_range_response( + &mut self, + peer_id: PeerId, + request_id: RequestId, + beacon_block: Option>>, + ) { + let request_id = match request_id { + RequestId::Sync(sync_id) => match sync_id { + SyncId::SingleBlock { .. } | SyncId::ParentLookup { .. } => { + unreachable!("Block lookups do not request BBRange requests") + } + id @ (SyncId::BackFillSync { .. } | SyncId::RangeSync { .. }) => id, + }, + RequestId::Router => unreachable!("All BBRange requests belong to sync"), + }; + + trace!( + self.log, + "Received BlocksByRange Response"; + "peer" => %peer_id, + ); + + self.send_to_sync(SyncMessage::RpcBlock { + peer_id, + request_id, + beacon_block, + seen_timestamp: timestamp_now(), + }); + } + + /// Handle a `BlocksByRoot` response from the peer. + pub fn on_blocks_by_root_response( + &mut self, + peer_id: PeerId, + request_id: RequestId, + beacon_block: Option>>, + ) { + let request_id = match request_id { + RequestId::Sync(sync_id) => match sync_id { + id @ (SyncId::SingleBlock { .. } | SyncId::ParentLookup { .. }) => id, + SyncId::BackFillSync { .. } | SyncId::RangeSync { .. } => { + unreachable!("Batch syncing do not request BBRoot requests") + } + }, + RequestId::Router => unreachable!("All BBRoot requests belong to sync"), + }; + + trace!( + self.log, + "Received BlocksByRoot Response"; + "peer" => %peer_id, + ); + self.send_to_sync(SyncMessage::RpcBlock { + peer_id, + request_id, + beacon_block, + seen_timestamp: timestamp_now(), + }); + } + + fn send_beacon_processor_work(&mut self, work: BeaconWorkEvent) { + self.beacon_processor_send + .try_send(work) + .unwrap_or_else(|e| { + let work_type = match &e { + mpsc::error::TrySendError::Closed(work) + | mpsc::error::TrySendError::Full(work) => work.work_type(), + }; + error!(&self.log, "Unable to send message to the beacon processor"; + "error" => %e, "type" => work_type) + }) + } +} + +/// Wraps a Network Channel to employ various RPC related network functionality for the +/// processor. +#[derive(Clone)] +pub struct HandlerNetworkContext { + /// The network channel to relay messages to the Network service. + network_send: mpsc::UnboundedSender>, + /// Logger for the `NetworkContext`. + log: slog::Logger, +} + +impl HandlerNetworkContext { + pub fn new(network_send: mpsc::UnboundedSender>, log: slog::Logger) -> Self { + Self { network_send, log } + } + + /// Sends a message to the network task. + fn inform_network(&mut self, msg: NetworkMessage) { + self.network_send.send(msg).unwrap_or_else( + |e| warn!(self.log, "Could not send message to the network service"; "error" => %e), + ) + } + + /// Sends a request to the network task. + pub fn send_processor_request(&mut self, peer_id: PeerId, request: Request) { + self.inform_network(NetworkMessage::SendRequest { + peer_id, + request_id: RequestId::Router, + request, + }) + } + + /// Sends a response to the network task. + pub fn send_response(&mut self, peer_id: PeerId, response: Response, id: PeerRequestId) { + self.inform_network(NetworkMessage::SendResponse { + peer_id, + id, + response, + }) + } +} + +fn timestamp_now() -> Duration { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or_else(|_| Duration::from_secs(0)) +} diff --git a/beacon_node/network/src/router/mod.rs b/beacon_node/network/src/router/mod.rs deleted file mode 100644 index 231f30f3eef..00000000000 --- a/beacon_node/network/src/router/mod.rs +++ /dev/null @@ -1,321 +0,0 @@ -//! This module handles incoming network messages. -//! -//! It routes the messages to appropriate services. -//! It handles requests at the application layer in its associated processor and directs -//! syncing-related responses to the Sync manager. -#![allow(clippy::unit_arg)] - -mod processor; - -use crate::error; -use crate::service::{NetworkMessage, RequestId}; -use beacon_chain::{BeaconChain, BeaconChainTypes}; -use futures::prelude::*; -use lighthouse_network::{ - MessageId, NetworkGlobals, PeerId, PeerRequestId, PubsubMessage, Request, Response, -}; -use processor::Processor; -use slog::{debug, o, trace}; -use std::sync::Arc; -use tokio::sync::mpsc; -use tokio_stream::wrappers::UnboundedReceiverStream; -use types::EthSpec; - -/// Handles messages received from the network and client and organises syncing. This -/// functionality of this struct is to validate an decode messages from the network before -/// passing them to the internal message processor. The message processor spawns a syncing thread -/// which manages which blocks need to be requested and processed. -pub struct Router { - /// Access to the peer db. - network_globals: Arc>, - /// Processes validated and decoded messages from the network. Has direct access to the - /// sync manager. - processor: Processor, - /// The `Router` logger. - log: slog::Logger, -} - -/// Types of messages the handler can receive. -#[derive(Debug)] -pub enum RouterMessage { - /// We have initiated a connection to a new peer. - PeerDialed(PeerId), - /// Peer has disconnected, - PeerDisconnected(PeerId), - /// An RPC request has been received. - RPCRequestReceived { - peer_id: PeerId, - id: PeerRequestId, - request: Request, - }, - /// An RPC response has been received. - RPCResponseReceived { - peer_id: PeerId, - request_id: RequestId, - response: Response, - }, - /// An RPC request failed - RPCFailed { - peer_id: PeerId, - request_id: RequestId, - }, - /// A gossip message has been received. The fields are: message id, the peer that sent us this - /// message, the message itself and a bool which indicates if the message should be processed - /// by the beacon chain after successful verification. - PubsubMessage(MessageId, PeerId, PubsubMessage, bool), - /// The peer manager has requested we re-status a peer. - StatusPeer(PeerId), -} - -impl Router { - /// Initializes and runs the Router. - pub fn spawn( - beacon_chain: Arc>, - network_globals: Arc>, - network_send: mpsc::UnboundedSender>, - executor: task_executor::TaskExecutor, - log: slog::Logger, - ) -> error::Result>> { - let message_handler_log = log.new(o!("service"=> "router")); - trace!(message_handler_log, "Service starting"); - - let (handler_send, handler_recv) = mpsc::unbounded_channel(); - - // Initialise a message instance, which itself spawns the syncing thread. - let processor = Processor::new( - executor.clone(), - beacon_chain, - network_globals.clone(), - network_send, - &log, - ); - - // generate the Message handler - let mut handler = Router { - network_globals, - processor, - log: message_handler_log, - }; - - // spawn handler task and move the message handler instance into the spawned thread - executor.spawn( - async move { - debug!(log, "Network message router started"); - UnboundedReceiverStream::new(handler_recv) - .for_each(move |msg| future::ready(handler.handle_message(msg))) - .await; - }, - "router", - ); - - Ok(handler_send) - } - - /// Handle all messages incoming from the network service. - fn handle_message(&mut self, message: RouterMessage) { - match message { - // we have initiated a connection to a peer or the peer manager has requested a - // re-status - RouterMessage::PeerDialed(peer_id) | RouterMessage::StatusPeer(peer_id) => { - self.processor.send_status(peer_id); - } - // A peer has disconnected - RouterMessage::PeerDisconnected(peer_id) => { - self.processor.on_disconnect(peer_id); - } - RouterMessage::RPCRequestReceived { - peer_id, - id, - request, - } => { - self.handle_rpc_request(peer_id, id, request); - } - RouterMessage::RPCResponseReceived { - peer_id, - request_id, - response, - } => { - self.handle_rpc_response(peer_id, request_id, response); - } - RouterMessage::RPCFailed { - peer_id, - request_id, - } => { - self.processor.on_rpc_error(peer_id, request_id); - } - RouterMessage::PubsubMessage(id, peer_id, gossip, should_process) => { - self.handle_gossip(id, peer_id, gossip, should_process); - } - } - } - - /* RPC - Related functionality */ - - /// A new RPC request has been received from the network. - fn handle_rpc_request(&mut self, peer_id: PeerId, id: PeerRequestId, request: Request) { - if !self.network_globals.peers.read().is_connected(&peer_id) { - debug!(self.log, "Dropping request of disconnected peer"; "peer_id" => %peer_id, "request" => ?request); - return; - } - match request { - Request::Status(status_message) => { - self.processor - .on_status_request(peer_id, id, status_message) - } - Request::BlocksByRange(request) => self - .processor - .on_blocks_by_range_request(peer_id, id, request), - Request::BlocksByRoot(request) => self - .processor - .on_blocks_by_root_request(peer_id, id, request), - Request::LightClientBootstrap(request) => self - .processor - .on_lightclient_bootstrap(peer_id, id, request), - } - } - - /// An RPC response has been received from the network. - // we match on id and ignore responses past the timeout. - fn handle_rpc_response( - &mut self, - peer_id: PeerId, - request_id: RequestId, - response: Response, - ) { - // an error could have occurred. - match response { - Response::Status(status_message) => { - self.processor.on_status_response(peer_id, status_message); - } - Response::BlocksByRange(beacon_block) => { - self.processor - .on_blocks_by_range_response(peer_id, request_id, beacon_block); - } - Response::BlocksByRoot(beacon_block) => { - self.processor - .on_blocks_by_root_response(peer_id, request_id, beacon_block); - } - Response::LightClientBootstrap(_) => unreachable!(), - } - } - - /// Handle RPC messages. - /// Note: `should_process` is currently only useful for the `Attestation` variant. - /// if `should_process` is `false`, we only propagate the message on successful verification, - /// else, we propagate **and** import into the beacon chain. - fn handle_gossip( - &mut self, - id: MessageId, - peer_id: PeerId, - gossip_message: PubsubMessage, - should_process: bool, - ) { - match gossip_message { - // Attestations should never reach the router. - PubsubMessage::AggregateAndProofAttestation(aggregate_and_proof) => { - self.processor - .on_aggregated_attestation_gossip(id, peer_id, *aggregate_and_proof); - } - PubsubMessage::Attestation(subnet_attestation) => { - self.processor.on_unaggregated_attestation_gossip( - id, - peer_id, - subnet_attestation.1.clone(), - subnet_attestation.0, - should_process, - ); - } - PubsubMessage::BeaconBlock(block) => { - self.processor.on_block_gossip( - id, - peer_id, - self.network_globals.client(&peer_id), - block, - ); - } - PubsubMessage::VoluntaryExit(exit) => { - debug!(self.log, "Received a voluntary exit"; "peer_id" => %peer_id); - self.processor.on_voluntary_exit_gossip(id, peer_id, exit); - } - PubsubMessage::ProposerSlashing(proposer_slashing) => { - debug!( - self.log, - "Received a proposer slashing"; - "peer_id" => %peer_id - ); - self.processor - .on_proposer_slashing_gossip(id, peer_id, proposer_slashing); - } - PubsubMessage::AttesterSlashing(attester_slashing) => { - debug!( - self.log, - "Received a attester slashing"; - "peer_id" => %peer_id - ); - self.processor - .on_attester_slashing_gossip(id, peer_id, attester_slashing); - } - PubsubMessage::SignedContributionAndProof(contribution_and_proof) => { - trace!( - self.log, - "Received sync committee aggregate"; - "peer_id" => %peer_id - ); - self.processor.on_sync_committee_contribution_gossip( - id, - peer_id, - *contribution_and_proof, - ); - } - PubsubMessage::SyncCommitteeMessage(sync_committtee_msg) => { - trace!( - self.log, - "Received sync committee signature"; - "peer_id" => %peer_id - ); - self.processor.on_sync_committee_signature_gossip( - id, - peer_id, - sync_committtee_msg.1, - sync_committtee_msg.0, - ); - } - PubsubMessage::BlsToExecutionChange(bls_to_execution_change) => { - trace!( - self.log, - "Received BLS to execution change"; - "peer_id" => %peer_id - ); - self.processor.on_bls_to_execution_change_gossip( - id, - peer_id, - bls_to_execution_change, - ); - } - PubsubMessage::LightClientFinalityUpdate(light_client_finality_update) => { - trace!( - self.log, - "Received light client finality update"; - "peer_id" => %peer_id - ); - self.processor.on_light_client_finality_update_gossip( - id, - peer_id, - light_client_finality_update, - ); - } - PubsubMessage::LightClientOptimisticUpdate(light_client_optimistic_update) => { - trace!( - self.log, - "Received light client optimistic update"; - "peer_id" => %peer_id - ); - self.processor.on_light_client_optimistic_update_gossip( - id, - peer_id, - light_client_optimistic_update, - ); - } - } - } -} diff --git a/beacon_node/network/src/router/processor.rs b/beacon_node/network/src/router/processor.rs deleted file mode 100644 index d3ba024e4ce..00000000000 --- a/beacon_node/network/src/router/processor.rs +++ /dev/null @@ -1,472 +0,0 @@ -use crate::beacon_processor::{ - BeaconProcessor, WorkEvent as BeaconWorkEvent, MAX_WORK_EVENT_QUEUE_LEN, -}; -use crate::service::{NetworkMessage, RequestId}; -use crate::status::status_message; -use crate::sync::manager::RequestId as SyncId; -use crate::sync::SyncMessage; -use beacon_chain::{BeaconChain, BeaconChainTypes}; -use lighthouse_network::rpc::*; -use lighthouse_network::{ - Client, MessageId, NetworkGlobals, PeerId, PeerRequestId, Request, Response, -}; -use slog::{debug, error, o, trace, warn}; -use std::cmp; -use std::sync::Arc; -use std::time::{Duration, SystemTime, UNIX_EPOCH}; -use store::SyncCommitteeMessage; -use tokio::sync::mpsc; -use types::{ - Attestation, AttesterSlashing, EthSpec, LightClientFinalityUpdate, LightClientOptimisticUpdate, - ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedBlsToExecutionChange, - SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncSubnetId, -}; - -/// Processes validated messages from the network. It relays necessary data to the syncing thread -/// and processes blocks from the pubsub network. -pub struct Processor { - /// A reference to the underlying beacon chain. - chain: Arc>, - /// A channel to the syncing thread. - sync_send: mpsc::UnboundedSender>, - /// A network context to return and handle RPC requests. - network: HandlerNetworkContext, - /// A multi-threaded, non-blocking processor for applying messages to the beacon chain. - beacon_processor_send: mpsc::Sender>, - /// The `RPCHandler` logger. - log: slog::Logger, -} - -impl Processor { - /// Instantiate a `Processor` instance - pub fn new( - executor: task_executor::TaskExecutor, - beacon_chain: Arc>, - network_globals: Arc>, - network_send: mpsc::UnboundedSender>, - log: &slog::Logger, - ) -> Self { - let sync_logger = log.new(o!("service"=> "sync")); - let (beacon_processor_send, beacon_processor_receive) = - mpsc::channel(MAX_WORK_EVENT_QUEUE_LEN); - - // spawn the sync thread - let sync_send = crate::sync::manager::spawn( - executor.clone(), - beacon_chain.clone(), - network_globals.clone(), - network_send.clone(), - beacon_processor_send.clone(), - sync_logger, - ); - - BeaconProcessor { - beacon_chain: Arc::downgrade(&beacon_chain), - network_tx: network_send.clone(), - sync_tx: sync_send.clone(), - network_globals, - executor, - max_workers: cmp::max(1, num_cpus::get()), - current_workers: 0, - importing_blocks: Default::default(), - log: log.clone(), - } - .spawn_manager(beacon_processor_receive, None); - - Processor { - chain: beacon_chain, - sync_send, - network: HandlerNetworkContext::new(network_send, log.clone()), - beacon_processor_send, - log: log.new(o!("service" => "router")), - } - } - - fn send_to_sync(&mut self, message: SyncMessage) { - self.sync_send.send(message).unwrap_or_else(|e| { - warn!( - self.log, - "Could not send message to the sync service"; - "error" => %e, - ) - }); - } - - /// Handle a peer disconnect. - /// - /// Removes the peer from the manager. - pub fn on_disconnect(&mut self, peer_id: PeerId) { - self.send_to_sync(SyncMessage::Disconnect(peer_id)); - } - - /// An error occurred during an RPC request. The state is maintained by the sync manager, so - /// this function notifies the sync manager of the error. - pub fn on_rpc_error(&mut self, peer_id: PeerId, request_id: RequestId) { - // Check if the failed RPC belongs to sync - if let RequestId::Sync(request_id) = request_id { - self.send_to_sync(SyncMessage::RpcError { - peer_id, - request_id, - }); - } - } - - /// Sends a `Status` message to the peer. - /// - /// Called when we first connect to a peer, or when the PeerManager determines we need to - /// re-status. - pub fn send_status(&mut self, peer_id: PeerId) { - let status_message = status_message(&self.chain); - debug!(self.log, "Sending Status Request"; "peer" => %peer_id, &status_message); - self.network - .send_processor_request(peer_id, Request::Status(status_message)); - } - - /// Handle a `Status` request. - /// - /// Processes the `Status` from the remote peer and sends back our `Status`. - pub fn on_status_request( - &mut self, - peer_id: PeerId, - request_id: PeerRequestId, - status: StatusMessage, - ) { - debug!(self.log, "Received Status Request"; "peer_id" => %peer_id, &status); - - // Say status back. - self.network.send_response( - peer_id, - Response::Status(status_message(&self.chain)), - request_id, - ); - - self.send_beacon_processor_work(BeaconWorkEvent::status_message(peer_id, status)) - } - - /// Process a `Status` response from a peer. - pub fn on_status_response(&mut self, peer_id: PeerId, status: StatusMessage) { - debug!(self.log, "Received Status Response"; "peer_id" => %peer_id, &status); - self.send_beacon_processor_work(BeaconWorkEvent::status_message(peer_id, status)) - } - - /// Handle a `BlocksByRoot` request from the peer. - pub fn on_blocks_by_root_request( - &mut self, - peer_id: PeerId, - request_id: PeerRequestId, - request: BlocksByRootRequest, - ) { - self.send_beacon_processor_work(BeaconWorkEvent::blocks_by_roots_request( - peer_id, request_id, request, - )) - } - - /// Handle a `LightClientBootstrap` request from the peer. - pub fn on_lightclient_bootstrap( - &mut self, - peer_id: PeerId, - request_id: PeerRequestId, - request: LightClientBootstrapRequest, - ) { - self.send_beacon_processor_work(BeaconWorkEvent::lightclient_bootstrap_request( - peer_id, request_id, request, - )) - } - - /// Handle a `BlocksByRange` request from the peer. - pub fn on_blocks_by_range_request( - &mut self, - peer_id: PeerId, - request_id: PeerRequestId, - req: BlocksByRangeRequest, - ) { - self.send_beacon_processor_work(BeaconWorkEvent::blocks_by_range_request( - peer_id, request_id, req, - )) - } - - /// Handle a `BlocksByRange` response from the peer. - /// A `beacon_block` behaves as a stream which is terminated on a `None` response. - pub fn on_blocks_by_range_response( - &mut self, - peer_id: PeerId, - request_id: RequestId, - beacon_block: Option>>, - ) { - let request_id = match request_id { - RequestId::Sync(sync_id) => match sync_id { - SyncId::SingleBlock { .. } | SyncId::ParentLookup { .. } => { - unreachable!("Block lookups do not request BBRange requests") - } - id @ (SyncId::BackFillSync { .. } | SyncId::RangeSync { .. }) => id, - }, - RequestId::Router => unreachable!("All BBRange requests belong to sync"), - }; - - trace!( - self.log, - "Received BlocksByRange Response"; - "peer" => %peer_id, - ); - - self.send_to_sync(SyncMessage::RpcBlock { - peer_id, - request_id, - beacon_block, - seen_timestamp: timestamp_now(), - }); - } - - /// Handle a `BlocksByRoot` response from the peer. - pub fn on_blocks_by_root_response( - &mut self, - peer_id: PeerId, - request_id: RequestId, - beacon_block: Option>>, - ) { - let request_id = match request_id { - RequestId::Sync(sync_id) => match sync_id { - id @ (SyncId::SingleBlock { .. } | SyncId::ParentLookup { .. }) => id, - SyncId::BackFillSync { .. } | SyncId::RangeSync { .. } => { - unreachable!("Batch syncing do not request BBRoot requests") - } - }, - RequestId::Router => unreachable!("All BBRoot requests belong to sync"), - }; - - trace!( - self.log, - "Received BlocksByRoot Response"; - "peer" => %peer_id, - ); - self.send_to_sync(SyncMessage::RpcBlock { - peer_id, - request_id, - beacon_block, - seen_timestamp: timestamp_now(), - }); - } - - /// Process a gossip message declaring a new block. - /// - /// Attempts to apply to block to the beacon chain. May queue the block for later processing. - /// - /// Returns a `bool` which, if `true`, indicates we should forward the block to our peers. - pub fn on_block_gossip( - &mut self, - message_id: MessageId, - peer_id: PeerId, - peer_client: Client, - block: Arc>, - ) { - self.send_beacon_processor_work(BeaconWorkEvent::gossip_beacon_block( - message_id, - peer_id, - peer_client, - block, - timestamp_now(), - )) - } - - pub fn on_unaggregated_attestation_gossip( - &mut self, - message_id: MessageId, - peer_id: PeerId, - unaggregated_attestation: Attestation, - subnet_id: SubnetId, - should_process: bool, - ) { - self.send_beacon_processor_work(BeaconWorkEvent::unaggregated_attestation( - message_id, - peer_id, - unaggregated_attestation, - subnet_id, - should_process, - timestamp_now(), - )) - } - - pub fn on_aggregated_attestation_gossip( - &mut self, - message_id: MessageId, - peer_id: PeerId, - aggregate: SignedAggregateAndProof, - ) { - self.send_beacon_processor_work(BeaconWorkEvent::aggregated_attestation( - message_id, - peer_id, - aggregate, - timestamp_now(), - )) - } - - pub fn on_voluntary_exit_gossip( - &mut self, - message_id: MessageId, - peer_id: PeerId, - voluntary_exit: Box, - ) { - self.send_beacon_processor_work(BeaconWorkEvent::gossip_voluntary_exit( - message_id, - peer_id, - voluntary_exit, - )) - } - - pub fn on_proposer_slashing_gossip( - &mut self, - message_id: MessageId, - peer_id: PeerId, - proposer_slashing: Box, - ) { - self.send_beacon_processor_work(BeaconWorkEvent::gossip_proposer_slashing( - message_id, - peer_id, - proposer_slashing, - )) - } - - pub fn on_attester_slashing_gossip( - &mut self, - message_id: MessageId, - peer_id: PeerId, - attester_slashing: Box>, - ) { - self.send_beacon_processor_work(BeaconWorkEvent::gossip_attester_slashing( - message_id, - peer_id, - attester_slashing, - )) - } - - pub fn on_sync_committee_signature_gossip( - &mut self, - message_id: MessageId, - peer_id: PeerId, - sync_signature: SyncCommitteeMessage, - subnet_id: SyncSubnetId, - ) { - self.send_beacon_processor_work(BeaconWorkEvent::gossip_sync_signature( - message_id, - peer_id, - sync_signature, - subnet_id, - timestamp_now(), - )) - } - - pub fn on_sync_committee_contribution_gossip( - &mut self, - message_id: MessageId, - peer_id: PeerId, - sync_contribution: SignedContributionAndProof, - ) { - self.send_beacon_processor_work(BeaconWorkEvent::gossip_sync_contribution( - message_id, - peer_id, - sync_contribution, - timestamp_now(), - )) - } - - pub fn on_bls_to_execution_change_gossip( - &mut self, - message_id: MessageId, - peer_id: PeerId, - bls_to_execution_change: Box, - ) { - self.send_beacon_processor_work(BeaconWorkEvent::gossip_bls_to_execution_change( - message_id, - peer_id, - bls_to_execution_change, - )) - } - - pub fn on_light_client_finality_update_gossip( - &mut self, - message_id: MessageId, - peer_id: PeerId, - light_client_finality_update: Box>, - ) { - self.send_beacon_processor_work(BeaconWorkEvent::gossip_light_client_finality_update( - message_id, - peer_id, - light_client_finality_update, - timestamp_now(), - )) - } - - pub fn on_light_client_optimistic_update_gossip( - &mut self, - message_id: MessageId, - peer_id: PeerId, - light_client_optimistic_update: Box>, - ) { - self.send_beacon_processor_work(BeaconWorkEvent::gossip_light_client_optimistic_update( - message_id, - peer_id, - light_client_optimistic_update, - timestamp_now(), - )) - } - - fn send_beacon_processor_work(&mut self, work: BeaconWorkEvent) { - self.beacon_processor_send - .try_send(work) - .unwrap_or_else(|e| { - let work_type = match &e { - mpsc::error::TrySendError::Closed(work) - | mpsc::error::TrySendError::Full(work) => work.work_type(), - }; - error!(&self.log, "Unable to send message to the beacon processor"; - "error" => %e, "type" => work_type) - }) - } -} - -/// Wraps a Network Channel to employ various RPC related network functionality for the -/// processor. -#[derive(Clone)] -pub struct HandlerNetworkContext { - /// The network channel to relay messages to the Network service. - network_send: mpsc::UnboundedSender>, - /// Logger for the `NetworkContext`. - log: slog::Logger, -} - -impl HandlerNetworkContext { - pub fn new(network_send: mpsc::UnboundedSender>, log: slog::Logger) -> Self { - Self { network_send, log } - } - - /// Sends a message to the network task. - fn inform_network(&mut self, msg: NetworkMessage) { - self.network_send.send(msg).unwrap_or_else( - |e| warn!(self.log, "Could not send message to the network service"; "error" => %e), - ) - } - - /// Sends a request to the network task. - pub fn send_processor_request(&mut self, peer_id: PeerId, request: Request) { - self.inform_network(NetworkMessage::SendRequest { - peer_id, - request_id: RequestId::Router, - request, - }) - } - - /// Sends a response to the network task. - pub fn send_response(&mut self, peer_id: PeerId, response: Response, id: PeerRequestId) { - self.inform_network(NetworkMessage::SendResponse { - peer_id, - id, - response, - }) - } -} - -fn timestamp_now() -> Duration { - SystemTime::now() - .duration_since(UNIX_EPOCH) - .unwrap_or_else(|_| Duration::from_secs(0)) -} diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 410461bcd35..3e86d2099f0 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -228,16 +228,21 @@ impl NetworkService { let (network_senders, network_recievers) = NetworkSenders::new(); // try and construct UPnP port mappings if required. - let upnp_config = crate::nat::UPnPConfig::from(config); - let upnp_log = network_log.new(o!("service" => "UPnP")); - let upnp_network_send = network_senders.network_send(); - if config.upnp_enabled { - executor.spawn_blocking( - move || { - crate::nat::construct_upnp_mappings(upnp_config, upnp_network_send, upnp_log) - }, - "UPnP", - ); + if let Some(upnp_config) = crate::nat::UPnPConfig::from_config(config) { + let upnp_log = network_log.new(o!("service" => "UPnP")); + let upnp_network_send = network_senders.network_send(); + if config.upnp_enabled { + executor.spawn_blocking( + move || { + crate::nat::construct_upnp_mappings( + upnp_config, + upnp_network_send, + upnp_log, + ) + }, + "UPnP", + ); + } } // get a reference to the beacon chain store @@ -467,7 +472,7 @@ impl NetworkService { ) { match ev { NetworkEvent::PeerConnectedOutgoing(peer_id) => { - self.send_to_router(RouterMessage::PeerDialed(peer_id)); + self.send_to_router(RouterMessage::StatusPeer(peer_id)); } NetworkEvent::PeerConnectedIncoming(_) | NetworkEvent::PeerBanned(_) diff --git a/beacon_node/network/src/service/tests.rs b/beacon_node/network/src/service/tests.rs index f0dd0e75ffd..83fcc8c9ac8 100644 --- a/beacon_node/network/src/service/tests.rs +++ b/beacon_node/network/src/service/tests.rs @@ -59,10 +59,9 @@ mod tests { ); let mut config = NetworkConfig::default(); + config.set_ipv4_listening_address(std::net::Ipv4Addr::UNSPECIFIED, 21212, 21212); config.discv5_config.table_filter = |_| true; // Do not ignore local IPs - config.libp2p_port = 21212; config.upnp_enabled = false; - config.discovery_port = 21212; config.boot_nodes_enr = enrs.clone(); runtime.block_on(async move { // Create a new network service which implicitly gets dropped at the diff --git a/beacon_node/network/src/subnet_service/attestation_subnets.rs b/beacon_node/network/src/subnet_service/attestation_subnets.rs index 70ba1c8170b..e46a52cfb21 100644 --- a/beacon_node/network/src/subnet_service/attestation_subnets.rs +++ b/beacon_node/network/src/subnet_service/attestation_subnets.rs @@ -112,6 +112,9 @@ pub struct AttestationService { #[cfg(feature = "deterministic_long_lived_attnets")] next_long_lived_subscription_event: Pin>, + /// Whether this node is a block proposer-only node. + proposer_only: bool, + /// The logger for the attestation service. log: slog::Logger, } @@ -155,6 +158,7 @@ impl AttestationService { known_validators: HashSetDelay::new(last_seen_val_timeout), waker: None, discovery_disabled: config.disable_discovery, + proposer_only: config.proposer_only, subscribe_all_subnets: config.subscribe_all_subnets, long_lived_subnet_subscription_slots, log, @@ -256,6 +260,11 @@ impl AttestationService { &mut self, subscriptions: Vec, ) -> Result<(), String> { + // If the node is in a proposer-only state, we ignore all subnet subscriptions. + if self.proposer_only { + return Ok(()); + } + // Maps each subnet_id subscription to it's highest slot let mut subnets_to_discover: HashMap = HashMap::new(); for subscription in subscriptions { @@ -450,6 +459,10 @@ impl AttestationService { subnet: SubnetId, attestation: &Attestation, ) -> bool { + // Proposer-only mode does not need to process attestations + if self.proposer_only { + return false; + } self.aggregate_validators_on_subnet .as_ref() .map(|tracked_vals| { diff --git a/beacon_node/network/src/subnet_service/sync_subnets.rs b/beacon_node/network/src/subnet_service/sync_subnets.rs index 0b27ff527fd..eda7ce8efbd 100644 --- a/beacon_node/network/src/subnet_service/sync_subnets.rs +++ b/beacon_node/network/src/subnet_service/sync_subnets.rs @@ -54,6 +54,9 @@ pub struct SyncCommitteeService { /// We are always subscribed to all subnets. subscribe_all_subnets: bool, + /// Whether this node is a block proposer-only node. + proposer_only: bool, + /// The logger for the attestation service. log: slog::Logger, } @@ -82,6 +85,7 @@ impl SyncCommitteeService { waker: None, subscribe_all_subnets: config.subscribe_all_subnets, discovery_disabled: config.disable_discovery, + proposer_only: config.proposer_only, log, } } @@ -110,6 +114,11 @@ impl SyncCommitteeService { &mut self, subscriptions: Vec, ) -> Result<(), String> { + // A proposer-only node does not subscribe to any sync-committees + if self.proposer_only { + return Ok(()); + } + let mut subnets_to_discover = Vec::new(); for subscription in subscriptions { metrics::inc_counter(&metrics::SYNC_COMMITTEE_SUBSCRIPTION_REQUESTS); diff --git a/beacon_node/operation_pool/Cargo.toml b/beacon_node/operation_pool/Cargo.toml index cc4eacde898..fdbecb656f4 100644 --- a/beacon_node/operation_pool/Cargo.toml +++ b/beacon_node/operation_pool/Cargo.toml @@ -12,8 +12,8 @@ lighthouse_metrics = { path = "../../common/lighthouse_metrics" } parking_lot = "0.12.0" types = { path = "../../consensus/types" } state_processing = { path = "../../consensus/state_processing" } -eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.1" +ethereum_ssz = "0.5.0" +ethereum_ssz_derive = "0.5.0" rayon = "1.5.0" serde = "1.0.116" serde_derive = "1.0.116" diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index c5be4f0a614..24c0623f5c3 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -497,7 +497,8 @@ impl OperationPool { |exit| { filter(exit.as_inner()) && exit.signature_is_still_valid(&state.fork()) - && verify_exit(state, exit.as_inner(), VerifySignatures::False, spec).is_ok() + && verify_exit(state, None, exit.as_inner(), VerifySignatures::False, spec) + .is_ok() }, |exit| exit.as_inner().clone(), T::MaxVoluntaryExits::to_usize(), diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 792d6253482..a578ac7ea20 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -71,7 +71,16 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Arg::with_name("listen-address") .long("listen-address") .value_name("ADDRESS") - .help("The address lighthouse will listen for UDP and TCP connections.") + .help("The address lighthouse will listen for UDP and TCP connections. To listen \ + over IpV4 and IpV6 set this flag twice with the different values.\n\ + Examples:\n\ + - --listen-address '0.0.0.0' will listen over Ipv4.\n\ + - --listen-address '::' will listen over Ipv6.\n\ + - --listen-address '0.0.0.0' --listen-address '::' will listen over both \ + Ipv4 and Ipv6. The order of the given addresses is not relevant. However, \ + multiple Ipv4, or multiple Ipv6 addresses will not be accepted.") + .multiple(true) + .max_values(2) .default_value("0.0.0.0") .takes_value(true) ) @@ -79,10 +88,21 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Arg::with_name("port") .long("port") .value_name("PORT") - .help("The TCP/UDP port to listen on. The UDP port can be modified by the --discovery-port flag.") + .help("The TCP/UDP port to listen on. The UDP port can be modified by the \ + --discovery-port flag. If listening over both Ipv4 and Ipv6 the --port flag \ + will apply to the Ipv4 address and --port6 to the Ipv6 address.") .default_value("9000") .takes_value(true), ) + .arg( + Arg::with_name("port6") + .long("port6") + .value_name("PORT") + .help("The TCP/UDP port to listen on over IpV6 when listening over both Ipv4 and \ + Ipv6. Defaults to 9090 when required.") + .default_value("9090") + .takes_value(true), + ) .arg( Arg::with_name("discovery-port") .long("discovery-port") @@ -90,11 +110,19 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .help("The UDP port that discovery will listen on. Defaults to `port`") .takes_value(true), ) + .arg( + Arg::with_name("discovery-port6") + .long("discovery-port6") + .value_name("PORT") + .help("The UDP port that discovery will listen on over IpV6 if listening over \ + both Ipv4 and IpV6. Defaults to `port6`") + .hidden(true) // TODO: implement dual stack via two sockets in discv5. + .takes_value(true), + ) .arg( Arg::with_name("target-peers") .long("target-peers") .help("The target number of peers.") - .default_value("80") .takes_value(true), ) .arg( @@ -130,27 +158,49 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Arg::with_name("enr-udp-port") .long("enr-udp-port") .value_name("PORT") - .help("The UDP port of the local ENR. Set this only if you are sure other nodes can connect to your local node on this port.") + .help("The UDP4 port of the local ENR. Set this only if you are sure other nodes \ + can connect to your local node on this port over IpV4.") + .takes_value(true), + ) + .arg( + Arg::with_name("enr-udp6-port") + .long("enr-udp6-port") + .value_name("PORT") + .help("The UDP6 port of the local ENR. Set this only if you are sure other nodes \ + can connect to your local node on this port over IpV6.") .takes_value(true), ) .arg( Arg::with_name("enr-tcp-port") .long("enr-tcp-port") .value_name("PORT") - .help("The TCP port of the local ENR. Set this only if you are sure other nodes can connect to your local node on this port.\ - The --port flag is used if this is not set.") + .help("The TCP4 port of the local ENR. Set this only if you are sure other nodes \ + can connect to your local node on this port over IpV4. The --port flag is \ + used if this is not set.") + .takes_value(true), + ) + .arg( + Arg::with_name("enr-tcp6-port") + .long("enr-tcp6-port") + .value_name("PORT") + .help("The TCP6 port of the local ENR. Set this only if you are sure other nodes \ + can connect to your local node on this port over IpV6. The --port6 flag is \ + used if this is not set.") .takes_value(true), ) .arg( Arg::with_name("enr-address") .long("enr-address") .value_name("ADDRESS") - .help("The IP address/ DNS address to broadcast to other peers on how to reach this node. \ - If a DNS address is provided, the enr-address is set to the IP address it resolves to and \ - does not auto-update based on PONG responses in discovery. \ - Set this only if you are sure other nodes can connect to your local node on this address. \ - Discovery will automatically find your external address, if possible.") + .help("The IP address/ DNS address to broadcast to other peers on how to reach \ + this node. If a DNS address is provided, the enr-address is set to the IP \ + address it resolves to and does not auto-update based on PONG responses in \ + discovery. Set this only if you are sure other nodes can connect to your \ + local node on this address. This will update the `ip4` or `ip6` ENR fields \ + accordingly. To update both, set this flag twice with the different values.") .requires("enr-udp-port") + .multiple(true) + .max_values(2) .takes_value(true), ) .arg( @@ -158,7 +208,8 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .short("e") .long("enr-match") .help("Sets the local ENR IP address and port to match those set for lighthouse. \ - Specifically, the IP address will be the value of --listen-address and the UDP port will be --discovery-port.") + Specifically, the IP address will be the value of --listen-address and the \ + UDP port will be --discovery-port.") ) .arg( Arg::with_name("disable-enr-auto-update") @@ -181,6 +232,14 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .help("Disables the discv5 discovery protocol. The node will not search for new peers or participate in the discovery protocol.") .takes_value(false), ) + .arg( + Arg::with_name("disable-peer-scoring") + .long("disable-peer-scoring") + .help("Disables peer scoring in lighthouse. WARNING: This is a dev only flag is only meant to be used in local testing scenarios \ + Using this flag on a real network may cause your node to become eclipsed and see a different view of the network") + .takes_value(false) + .hidden(true), + ) .arg( Arg::with_name("trusted-peers") .long("trusted-peers") @@ -209,6 +268,23 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .min_values(0) .hidden(true) ) + .arg( + Arg::with_name("proposer-only") + .long("proposer-only") + .help("Sets this beacon node at be a block proposer only node. \ + This will run the beacon node in a minimal configuration that is sufficient for block publishing only. This flag should be used \ + for a beacon node being referenced by validator client using the --proposer-node flag. This configuration is for enabling more secure setups.") + .takes_value(false), + ) + + .arg( + Arg::with_name("disable-backfill-rate-limiting") + .long("disable-backfill-rate-limiting") + .help("Disable the backfill sync rate-limiting. This allow users to just sync the entire chain as fast \ + as possible, however it can result in resource contention which degrades staking performance. Stakers \ + should generally choose to avoid this flag since backfill sync is not required for staking.") + .takes_value(false), + ) /* REST API related arguments */ .arg( Arg::with_name("http") @@ -318,6 +394,14 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { address of this server (e.g., http://localhost:5054).") .takes_value(true), ) + .arg( + Arg::with_name("shuffling-cache-size") + .long("shuffling-cache-size") + .help("Some HTTP API requests can be optimised by caching the shufflings at each epoch. \ + This flag allows the user to set the shuffling cache size in epochs. \ + Shufflings are dependent on validator count and setting this value to a large number can consume a large amount of memory.") + .takes_value(true) + ) /* * Monitoring metrics @@ -809,6 +893,28 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { allowed. Default: 2") .conflicts_with("disable-proposer-reorgs") ) + .arg( + Arg::with_name("proposer-reorg-cutoff") + .long("proposer-reorg-cutoff") + .value_name("MILLISECONDS") + .help("Maximum delay after the start of the slot at which to propose a reorging \ + block. Lower values can prevent failed reorgs by ensuring the block has \ + ample time to propagate and be processed by the network. The default is \ + 1/12th of a slot (1 second on mainnet)") + .conflicts_with("disable-proposer-reorgs") + ) + .arg( + Arg::with_name("proposer-reorg-disallowed-offsets") + .long("proposer-reorg-disallowed-offsets") + .value_name("N1,N2,...") + .help("Comma-separated list of integer offsets which can be used to avoid \ + proposing reorging blocks at certain slots. An offset of N means that \ + reorging proposals will not be attempted at any slot such that \ + `slot % SLOTS_PER_EPOCH == N`. By default only re-orgs at offset 0 will be \ + avoided. Any offsets supplied with this flag will impose additional \ + restrictions.") + .conflicts_with("disable-proposer-reorgs") + ) .arg( Arg::with_name("prepare-payload-lookahead") .long("prepare-payload-lookahead") @@ -902,12 +1008,20 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .default_value("0") .takes_value(true) ) + .arg( + Arg::with_name("builder-user-agent") + .long("builder-user-agent") + .value_name("STRING") + .help("The HTTP user agent to send alongside requests to the builder URL. The \ + default is Lighthouse's version string.") + .requires("builder") + .takes_value(true) + ) .arg( Arg::with_name("count-unrealized") .long("count-unrealized") .hidden(true) - .help("Enables an alternative, potentially more performant FFG \ - vote tracking method.") + .help("This flag is deprecated and has no effect.") .takes_value(true) .default_value("true") ) @@ -915,7 +1029,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Arg::with_name("count-unrealized-full") .long("count-unrealized-full") .hidden(true) - .help("Stricter version of `count-unrealized`.") + .help("This flag is deprecated and has no effect.") .takes_value(true) .default_value("false") ) diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 55335081cb7..7cd2a627999 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -1,5 +1,5 @@ use beacon_chain::chain_config::{ - ReOrgThreshold, DEFAULT_PREPARE_PAYLOAD_LOOKAHEAD_FACTOR, + DisallowedReOrgOffsets, ReOrgThreshold, DEFAULT_PREPARE_PAYLOAD_LOOKAHEAD_FACTOR, DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, DEFAULT_RE_ORG_THRESHOLD, }; use clap::ArgMatches; @@ -10,13 +10,13 @@ use environment::RuntimeContext; use execution_layer::DEFAULT_JWT_FILE; use genesis::Eth1Endpoint; use http_api::TlsConfig; +use lighthouse_network::ListenAddress; use lighthouse_network::{multiaddr::Protocol, Enr, Multiaddr, NetworkConfig, PeerIdSerialized}; use sensitive_url::SensitiveUrl; use slog::{info, warn, Logger}; use std::cmp; use std::cmp::max; use std::fmt::Debug; -use std::fmt::Write; use std::fs; use std::net::Ipv6Addr; use std::net::{IpAddr, Ipv4Addr, ToSocketAddrs}; @@ -24,7 +24,6 @@ use std::path::{Path, PathBuf}; use std::str::FromStr; use std::time::Duration; use types::{Checkpoint, Epoch, EthSpec, Hash256, PublicKeyBytes, GRAFFITI_BYTES_LEN}; -use unused_port::{unused_tcp_port, unused_udp_port}; /// Gets the fully-initialized global client. /// @@ -78,13 +77,7 @@ pub fn get_config( let data_dir_ref = client_config.data_dir().clone(); - set_network_config( - &mut client_config.network, - cli_args, - &data_dir_ref, - log, - false, - )?; + set_network_config(&mut client_config.network, cli_args, &data_dir_ref, log)?; /* * Staking flag @@ -155,6 +148,10 @@ pub fn get_config( client_config.http_api.allow_sync_stalled = true; } + if let Some(cache_size) = clap_utils::parse_optional(cli_args, "shuffling-cache-size")? { + client_config.chain.shuffling_cache_size = cache_size; + } + /* * Prometheus metrics HTTP server */ @@ -332,6 +329,9 @@ pub fn get_config( let payload_builder = parse_only_one_value(endpoint, SensitiveUrl::parse, "--builder", log)?; el_config.builder_url = Some(payload_builder); + + el_config.builder_user_agent = + clap_utils::parse_optional(cli_args, "builder-user-agent")?; } // Set config values from parse values. @@ -404,13 +404,6 @@ pub fn get_config( * Discovery address is set to localhost by default. */ if cli_args.is_present("zero-ports") { - if client_config.network.enr_address == Some(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))) { - client_config.network.enr_address = None - } - client_config.network.libp2p_port = - unused_tcp_port().map_err(|e| format!("Failed to get port for libp2p: {}", e))?; - client_config.network.discovery_port = - unused_udp_port().map_err(|e| format!("Failed to get port for discovery: {}", e))?; client_config.http_api.listen_port = 0; client_config.http_metrics.listen_port = 0; } @@ -696,6 +689,23 @@ pub fn get_config( client_config.chain.re_org_max_epochs_since_finalization = clap_utils::parse_optional(cli_args, "proposer-reorg-epochs-since-finalization")? .unwrap_or(DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION); + client_config.chain.re_org_cutoff_millis = + clap_utils::parse_optional(cli_args, "proposer-reorg-cutoff")?; + + if let Some(disallowed_offsets_str) = + clap_utils::parse_optional::(cli_args, "proposer-reorg-disallowed-offsets")? + { + let disallowed_offsets = disallowed_offsets_str + .split(',') + .map(|s| { + s.parse() + .map_err(|e| format!("invalid disallowed-offsets: {e:?}")) + }) + .collect::, _>>()?; + client_config.chain.re_org_disallowed_offsets = + DisallowedReOrgOffsets::new::(disallowed_offsets) + .map_err(|e| format!("invalid disallowed-offsets: {e:?}"))?; + } } // Note: This overrides any previous flags that enable this option. @@ -719,10 +729,21 @@ pub fn get_config( client_config.chain.fork_choice_before_proposal_timeout_ms = timeout; } - client_config.chain.count_unrealized = - clap_utils::parse_required(cli_args, "count-unrealized")?; - client_config.chain.count_unrealized_full = - clap_utils::parse_required::(cli_args, "count-unrealized-full")?.into(); + if !clap_utils::parse_required::(cli_args, "count-unrealized")? { + warn!( + log, + "The flag --count-unrealized is deprecated and will be removed"; + "info" => "any use of the flag will have no effect" + ); + } + + if clap_utils::parse_required::(cli_args, "count-unrealized-full")? { + warn!( + log, + "The flag --count-unrealized-full is deprecated and will be removed"; + "info" => "setting it to `true` has no effect" + ); + } client_config.chain.always_reset_payload_statuses = cli_args.is_present("reset-payload-statuses"); @@ -758,16 +779,184 @@ pub fn get_config( client_config.always_prefer_builder_payload = true; } + // Backfill sync rate-limiting + client_config.chain.enable_backfill_rate_limiting = + !cli_args.is_present("disable-backfill-rate-limiting"); + Ok(client_config) } -/// Sets the network config from the command line arguments +/// Gets the listening_addresses for lighthouse based on the cli options. +pub fn parse_listening_addresses( + cli_args: &ArgMatches, + log: &Logger, +) -> Result { + let listen_addresses_str = cli_args + .values_of("listen-address") + .expect("--listen_addresses has a default value"); + + let use_zero_ports = cli_args.is_present("zero-ports"); + + // parse the possible ips + let mut maybe_ipv4 = None; + let mut maybe_ipv6 = None; + for addr_str in listen_addresses_str { + let addr = addr_str.parse::().map_err(|parse_error| { + format!("Failed to parse listen-address ({addr_str}) as an Ip address: {parse_error}") + })?; + + match addr { + IpAddr::V4(v4_addr) => match &maybe_ipv4 { + Some(first_ipv4_addr) => { + return Err(format!( + "When setting the --listen-address option twice, use an IpV4 address and an Ipv6 address. \ + Got two IpV4 addresses {first_ipv4_addr} and {v4_addr}" + )); + } + None => maybe_ipv4 = Some(v4_addr), + }, + IpAddr::V6(v6_addr) => match &maybe_ipv6 { + Some(first_ipv6_addr) => { + return Err(format!( + "When setting the --listen-address option twice, use an IpV4 address and an Ipv6 address. \ + Got two IpV6 addresses {first_ipv6_addr} and {v6_addr}" + )); + } + None => maybe_ipv6 = Some(v6_addr), + }, + } + } + + // parse the possible tcp ports + let port = cli_args + .value_of("port") + .expect("--port has a default value") + .parse::() + .map_err(|parse_error| format!("Failed to parse --port as an integer: {parse_error}"))?; + let port6 = cli_args + .value_of("port6") + .map(str::parse::) + .transpose() + .map_err(|parse_error| format!("Failed to parse --port6 as an integer: {parse_error}"))? + .unwrap_or(9090); + + // parse the possible udp ports + let maybe_udp_port = cli_args + .value_of("discovery-port") + .map(str::parse::) + .transpose() + .map_err(|parse_error| { + format!("Failed to parse --discovery-port as an integer: {parse_error}") + })?; + let maybe_udp6_port = cli_args + .value_of("discovery-port6") + .map(str::parse::) + .transpose() + .map_err(|parse_error| { + format!("Failed to parse --discovery-port6 as an integer: {parse_error}") + })?; + + // Now put everything together + let listening_addresses = match (maybe_ipv4, maybe_ipv6) { + (None, None) => { + // This should never happen unless clap is broken + return Err("No listening addresses provided".into()); + } + (None, Some(ipv6)) => { + // A single ipv6 address was provided. Set the ports + + if cli_args.is_present("port6") { + warn!(log, "When listening only over IpV6, use the --port flag. The value of --port6 will be ignored.") + } + // use zero ports if required. If not, use the given port. + let tcp_port = use_zero_ports + .then(unused_port::unused_tcp6_port) + .transpose()? + .unwrap_or(port); + + if maybe_udp6_port.is_some() { + warn!(log, "When listening only over IpV6, use the --discovery-port flag. The value of --discovery-port6 will be ignored.") + } + // use zero ports if required. If not, use the specific udp port. If none given, use + // the tcp port. + let udp_port = use_zero_ports + .then(unused_port::unused_udp6_port) + .transpose()? + .or(maybe_udp_port) + .unwrap_or(port); + + ListenAddress::V6(lighthouse_network::ListenAddr { + addr: ipv6, + udp_port, + tcp_port, + }) + } + (Some(ipv4), None) => { + // A single ipv4 address was provided. Set the ports + + // use zero ports if required. If not, use the given port. + let tcp_port = use_zero_ports + .then(unused_port::unused_tcp4_port) + .transpose()? + .unwrap_or(port); + // use zero ports if required. If not, use the specific udp port. If none given, use + // the tcp port. + let udp_port = use_zero_ports + .then(unused_port::unused_udp4_port) + .transpose()? + .or(maybe_udp_port) + .unwrap_or(port); + ListenAddress::V4(lighthouse_network::ListenAddr { + addr: ipv4, + udp_port, + tcp_port, + }) + } + (Some(ipv4), Some(ipv6)) => { + let ipv4_tcp_port = use_zero_ports + .then(unused_port::unused_tcp4_port) + .transpose()? + .unwrap_or(port); + let ipv4_udp_port = use_zero_ports + .then(unused_port::unused_udp4_port) + .transpose()? + .or(maybe_udp_port) + .unwrap_or(ipv4_tcp_port); + + // Defaults to 9090 when required + let ipv6_tcp_port = use_zero_ports + .then(unused_port::unused_tcp6_port) + .transpose()? + .unwrap_or(port6); + let ipv6_udp_port = use_zero_ports + .then(unused_port::unused_udp6_port) + .transpose()? + .or(maybe_udp6_port) + .unwrap_or(ipv6_tcp_port); + ListenAddress::DualStack( + lighthouse_network::ListenAddr { + addr: ipv4, + udp_port: ipv4_udp_port, + tcp_port: ipv4_tcp_port, + }, + lighthouse_network::ListenAddr { + addr: ipv6, + udp_port: ipv6_udp_port, + tcp_port: ipv6_tcp_port, + }, + ) + } + }; + + Ok(listening_addresses) +} + +/// Sets the network config from the command line arguments. pub fn set_network_config( config: &mut NetworkConfig, cli_args: &ArgMatches, data_dir: &Path, log: &Logger, - use_listening_port_as_enr_port_by_default: bool, ) -> Result<(), String> { // If a network dir has been specified, override the `datadir` definition. if let Some(dir) = cli_args.value_of("network-dir") { @@ -788,32 +977,15 @@ pub fn set_network_config( config.shutdown_after_sync = true; } - if let Some(listen_address_str) = cli_args.value_of("listen-address") { - let listen_address = listen_address_str - .parse() - .map_err(|_| format!("Invalid listen address: {:?}", listen_address_str))?; - config.listen_address = listen_address; - } + config.set_listening_addr(parse_listening_addresses(cli_args, log)?); + // A custom target-peers command will overwrite the --proposer-only default. if let Some(target_peers_str) = cli_args.value_of("target-peers") { config.target_peers = target_peers_str .parse::() .map_err(|_| format!("Invalid number of target peers: {}", target_peers_str))?; - } - - if let Some(port_str) = cli_args.value_of("port") { - let port = port_str - .parse::() - .map_err(|_| format!("Invalid port: {}", port_str))?; - config.libp2p_port = port; - config.discovery_port = port; - } - - if let Some(port_str) = cli_args.value_of("discovery-port") { - let port = port_str - .parse::() - .map_err(|_| format!("Invalid port: {}", port_str))?; - config.discovery_port = port; + } else { + config.target_peers = 80; // default value } if let Some(value) = cli_args.value_of("network-load") { @@ -859,6 +1031,10 @@ pub fn set_network_config( .collect::, _>>()?; } + if cli_args.is_present("disable-peer-scoring") { + config.disable_peer_scoring = true; + } + if let Some(trusted_peers_str) = cli_args.value_of("trusted-peers") { config.trusted_peers = trusted_peers_str .split(',') @@ -871,7 +1047,7 @@ pub fn set_network_config( } if let Some(enr_udp_port_str) = cli_args.value_of("enr-udp-port") { - config.enr_udp_port = Some( + config.enr_udp4_port = Some( enr_udp_port_str .parse::() .map_err(|_| format!("Invalid discovery port: {}", enr_udp_port_str))?, @@ -879,7 +1055,23 @@ pub fn set_network_config( } if let Some(enr_tcp_port_str) = cli_args.value_of("enr-tcp-port") { - config.enr_tcp_port = Some( + config.enr_tcp4_port = Some( + enr_tcp_port_str + .parse::() + .map_err(|_| format!("Invalid ENR TCP port: {}", enr_tcp_port_str))?, + ); + } + + if let Some(enr_udp_port_str) = cli_args.value_of("enr-udp6-port") { + config.enr_udp6_port = Some( + enr_udp_port_str + .parse::() + .map_err(|_| format!("Invalid discovery port: {}", enr_udp_port_str))?, + ); + } + + if let Some(enr_tcp_port_str) = cli_args.value_of("enr-tcp6-port") { + config.enr_tcp6_port = Some( enr_tcp_port_str .parse::() .map_err(|_| format!("Invalid ENR TCP port: {}", enr_tcp_port_str))?, @@ -887,58 +1079,106 @@ pub fn set_network_config( } if cli_args.is_present("enr-match") { + // Match the Ip and UDP port in the enr. + // set the enr address to localhost if the address is unspecified - if config.listen_address == IpAddr::V4(Ipv4Addr::UNSPECIFIED) { - config.enr_address = Some(IpAddr::V4(Ipv4Addr::LOCALHOST)); - } else if config.listen_address == IpAddr::V6(Ipv6Addr::UNSPECIFIED) { - config.enr_address = Some(IpAddr::V6(Ipv6Addr::LOCALHOST)); - } else { - config.enr_address = Some(config.listen_address); + if let Some(ipv4_addr) = config.listen_addrs().v4().cloned() { + let ipv4_enr_addr = if ipv4_addr.addr == Ipv4Addr::UNSPECIFIED { + Ipv4Addr::LOCALHOST + } else { + ipv4_addr.addr + }; + config.enr_address.0 = Some(ipv4_enr_addr); + config.enr_udp4_port = Some(ipv4_addr.udp_port); } - config.enr_udp_port = Some(config.discovery_port); - } - - if let Some(enr_address) = cli_args.value_of("enr-address") { - let resolved_addr = match enr_address.parse::() { - Ok(addr) => addr, // // Input is an IpAddr - Err(_) => { - let mut addr = enr_address.to_string(); - // Appending enr-port to the dns hostname to appease `to_socket_addrs()` parsing. - // Since enr-update is disabled with a dns address, not setting the enr-udp-port - // will make the node undiscoverable. - if let Some(enr_udp_port) = - config - .enr_udp_port - .or(if use_listening_port_as_enr_port_by_default { - Some(config.discovery_port) - } else { - None - }) - { - write!(addr, ":{}", enr_udp_port) - .map_err(|e| format!("Failed to write enr address {}", e))?; - } else { - return Err( - "enr-udp-port must be set for node to be discoverable with dns address" - .into(), - ); + + if let Some(ipv6_addr) = config.listen_addrs().v6().cloned() { + let ipv6_enr_addr = if ipv6_addr.addr == Ipv6Addr::UNSPECIFIED { + Ipv6Addr::LOCALHOST + } else { + ipv6_addr.addr + }; + config.enr_address.1 = Some(ipv6_enr_addr); + config.enr_udp6_port = Some(ipv6_addr.udp_port); + } + } + + if let Some(enr_addresses) = cli_args.values_of("enr-address") { + let mut enr_ip4 = None; + let mut enr_ip6 = None; + let mut resolved_enr_ip4 = None; + let mut resolved_enr_ip6 = None; + + for addr in enr_addresses { + match addr.parse::() { + Ok(IpAddr::V4(v4_addr)) => { + if let Some(used) = enr_ip4.as_ref() { + warn!(log, "More than one Ipv4 ENR address provided"; "used" => %used, "ignored" => %v4_addr) + } else { + enr_ip4 = Some(v4_addr) + } + } + Ok(IpAddr::V6(v6_addr)) => { + if let Some(used) = enr_ip6.as_ref() { + warn!(log, "More than one Ipv6 ENR address provided"; "used" => %used, "ignored" => %v6_addr) + } else { + enr_ip6 = Some(v6_addr) + } + } + Err(_) => { + // Try to resolve the address + + // NOTE: From checking the `to_socket_addrs` code I don't think the port + // actually matters. Just use the udp port. + + let port = match config.listen_addrs() { + ListenAddress::V4(v4_addr) => v4_addr.udp_port, + ListenAddress::V6(v6_addr) => v6_addr.udp_port, + ListenAddress::DualStack(v4_addr, _v6_addr) => { + // NOTE: slight preference for ipv4 that I don't think is of importance. + v4_addr.udp_port + } + }; + + let addr_str = format!("{addr}:{port}"); + match addr_str.to_socket_addrs() { + Err(_e) => { + return Err(format!("Failed to parse or resolve address {addr}.")) + } + Ok(resolved_addresses) => { + for socket_addr in resolved_addresses { + // Use the first ipv4 and first ipv6 addresses present. + + // NOTE: this means that if two dns addresses are provided, we + // might end up using the ipv4 and ipv6 resolved addresses of just + // the first. + match socket_addr.ip() { + IpAddr::V4(v4_addr) => { + if resolved_enr_ip4.is_none() { + resolved_enr_ip4 = Some(v4_addr) + } + } + IpAddr::V6(v6_addr) => { + if resolved_enr_ip6.is_none() { + resolved_enr_ip6 = Some(v6_addr) + } + } + } + } + } + } } - // `to_socket_addr()` does the dns resolution - // Note: `to_socket_addrs()` is a blocking call - let resolved_addr = if let Ok(mut resolved_addrs) = addr.to_socket_addrs() { - // Pick the first ip from the list of resolved addresses - resolved_addrs - .next() - .map(|a| a.ip()) - .ok_or("Resolved dns addr contains no entries")? - } else { - return Err(format!("Failed to parse enr-address: {}", enr_address)); - }; - config.discv5_config.enr_update = false; - resolved_addr } - }; - config.enr_address = Some(resolved_addr); + } + + // The ENR addresses given as ips should take preference over any resolved address + let used_host_resolution = resolved_enr_ip4.is_some() || resolved_enr_ip6.is_some(); + let ip4 = enr_ip4.or(resolved_enr_ip4); + let ip6 = enr_ip6.or(resolved_enr_ip6); + config.enr_address = (ip4, ip6); + if used_host_resolution { + config.discv5_config.enr_update = false; + } } if cli_args.is_present("disable-enr-auto-update") { @@ -981,6 +1221,20 @@ pub fn set_network_config( config.outbound_rate_limiter_config = Some(Default::default()); } + // Proposer-only mode overrides a number of previous configuration parameters. + // Specifically, we avoid subscribing to long-lived subnets and wish to maintain a minimal set + // of peers. + if cli_args.is_present("proposer-only") { + config.subscribe_all_subnets = false; + + if cli_args.value_of("target-peers").is_none() { + // If a custom value is not set, change the default to 15 + config.target_peers = 15; + } + config.proposer_only = true; + warn!(log, "Proposer-only mode enabled"; "info"=> "Do not connect a validator client to this node unless via the --proposer-nodes flag"); + } + Ok(()) } diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index 7ec2af9f9db..a1c65bd26dd 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -13,8 +13,8 @@ db-key = "0.0.5" leveldb = { version = "0.8.6", default-features = false } parking_lot = "0.12.0" itertools = "0.10.0" -eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.1" +ethereum_ssz = "0.5.0" +ethereum_ssz_derive = "0.5.0" types = { path = "../../consensus/types" } state_processing = { path = "../../consensus/state_processing" } slog = "2.5.2" diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 3255006b550..02608f9a0bd 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -1741,7 +1741,7 @@ fn no_state_root_iter() -> Option HotColdDB where E: EthSpec, - Hot: KeyValueStore + ItemStore, - Cold: KeyValueStore + ItemStore, + Hot: ItemStore, + Cold: ItemStore, { pub fn reconstruct_historic_states(self: &Arc) -> Result<(), Error> { let mut anchor = if let Some(anchor) = self.get_anchor_info() { diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 7def1821dd2..ba234632d72 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -2,7 +2,6 @@ * [Introduction](./intro.md) * [Installation](./installation.md) - * [System Requirements](./system-requirements.md) * [Pre-Built Binaries](./installation-binaries.md) * [Docker](./docker.md) * [Build from Source](./installation-source.md) @@ -35,6 +34,7 @@ * [Prometheus Metrics](./advanced_metrics.md) * [Lighthouse UI (Siren)](./lighthouse-ui.md) * [Installation](./ui-installation.md) + * [Authentication](./ui-authentication.md) * [Configuration](./ui-configuration.md) * [Usage](./ui-usage.md) * [FAQs](./ui-faqs.md) @@ -42,6 +42,7 @@ * [Checkpoint Sync](./checkpoint-sync.md) * [Custom Data Directories](./advanced-datadir.md) * [Validator Graffiti](./graffiti.md) + * [Proposer Only Beacon Nodes](./advanced-proposer-only.md) * [Remote Signing with Web3Signer](./validator-web3signer.md) * [Database Configuration](./advanced_database.md) * [Database Migrations](./database-migrations.md) diff --git a/book/src/advanced-proposer-only.md b/book/src/advanced-proposer-only.md new file mode 100644 index 00000000000..c3347e044b7 --- /dev/null +++ b/book/src/advanced-proposer-only.md @@ -0,0 +1,71 @@ +# Advanced Proposer-Only Beacon Nodes + +Lighthouse allows for more exotic setups that can minimize attack vectors by +adding redundant beacon nodes and dividing the roles of attesting and block +production between them. + +The purpose of this is to minimize attack vectors +where malicious users obtain the network identities (IP addresses) of beacon +nodes corresponding to individual validators and subsequently perform Denial Of Service +attacks on the beacon nodes when they are due to produce a block on the +network. By splitting the duties of attestation and block production across +different beacon nodes, an attacker may not know which node is the block +production node, especially if the user rotates IP addresses of the block +production beacon node in between block proposals (this is in-frequent with +networks with large validator counts). + +## The Beacon Node + +A Lighthouse beacon node can be configured with the `--proposer-only` flag +(i.e. `lighthouse bn --proposer-only`). +Setting a beacon node with this flag will limit its use as a beacon node for +normal activities such as performing attestations, but it will make the node +harder to identify as a potential node to attack and will also consume less +resources. + +Specifically, this flag reduces the default peer count (to a safe minimal +number as maintaining peers on attestation subnets do not need to be considered), +prevents the node from subscribing to any attestation-subnets or +sync-committees which is a primary way for attackers to de-anonymize +validators. + +> Note: Beacon nodes that have set the `--proposer-only` flag should not be connected +> to validator clients unless via the `--proposer-nodes` flag. If connected as a +> normal beacon node, the validator may fail to handle its duties correctly and +> result in a loss of income. + + +## The Validator Client + +The validator client can be given a list of HTTP API endpoints representing +beacon nodes that will be solely used for block propagation on the network, via +the CLI flag `--proposer-nodes`. These nodes can be any working beacon nodes +and do not specifically have to be proposer-only beacon nodes that have been +executed with the `--proposer-only` (although we do recommend this flag for +these nodes for added security). + +> Note: The validator client still requires at least one other beacon node to +> perform its duties and must be specified in the usual `--beacon-nodes` flag. + +> Note: The validator client will attempt to get a block to propose from the +> beacon nodes specified in `--beacon-nodes` before trying `--proposer-nodes`. +> This is because the nodes subscribed to subnets have a higher chance of +> producing a more profitable block. Any block builders should therefore be +> attached to the `--beacon-nodes` and not necessarily the `--proposer-nodes`. + + +## Setup Overview + +The intended set-up to take advantage of this mechanism is to run one (or more) +normal beacon nodes in conjunction with one (or more) proposer-only beacon +nodes. See the [Redundancy](./redundancy.md) section for more information about +setting up redundant beacon nodes. The proposer-only beacon nodes should be +setup to use a different IP address than the primary (non proposer-only) nodes. +For added security, the IP addresses of the proposer-only nodes should be +rotated occasionally such that a new IP-address is used per block proposal. + +A single validator client can then connect to all of the above nodes via the +`--beacon-nodes` and `--proposer-nodes` flags. The resulting setup will allow +the validator client to perform its regular duties on the standard beacon nodes +and when the time comes to propose a block, it will send this block via the +specified proposer-only nodes. diff --git a/book/src/advanced_networking.md b/book/src/advanced_networking.md index fb7f07a51a6..08d276ba356 100644 --- a/book/src/advanced_networking.md +++ b/book/src/advanced_networking.md @@ -41,7 +41,7 @@ drastically and use the (recommended) default. ### NAT Traversal (Port Forwarding) -Lighthouse, by default, used port 9000 for both TCP and UDP. Lighthouse will +Lighthouse, by default, uses port 9000 for both TCP and UDP. Lighthouse will still function if it is behind a NAT without any port mappings. Although Lighthouse still functions, we recommend that some mechanism is used to ensure that your Lighthouse node is publicly accessible. This will typically improve @@ -54,6 +54,16 @@ node will inform you of established routes in this case). If UPnP is not enabled, we recommend you manually set up port mappings to both of Lighthouse's TCP and UDP ports (9000 by default). +> Note: Lighthouse needs to advertise its publicly accessible ports in +> order to inform its peers that it is contactable and how to connect to it. +> Lighthouse has an automated way of doing this for the UDP port. This means +> Lighthouse can detect its external UDP port. There is no such mechanism for the +> TCP port. As such, we assume that the external UDP and external TCP port is the +> same (i.e external 5050 UDP/TCP mapping to internal 9000 is fine). If you are setting up differing external UDP and TCP ports, you should +> explicitly specify them using the `--enr-tcp-port` and `--enr-udp-port` as +> explained in the following section. + + ### ENR Configuration Lighthouse has a number of CLI parameters for constructing and modifying the diff --git a/book/src/api-lighthouse.md b/book/src/api-lighthouse.md index 7f42091c454..5cd7929a3cd 100644 --- a/book/src/api-lighthouse.md +++ b/book/src/api-lighthouse.md @@ -141,7 +141,8 @@ curl -X POST "http://localhost:5052/lighthouse/ui/validator_metrics" -d '{"indic "attestation_head_hit_percentage": 100, "attestation_target_hits": 5, "attestation_target_misses": 5, - "attestation_target_hit_percentage": 50 + "attestation_target_hit_percentage": 50, + "latest_attestation_inclusion_distance": 1 } } } diff --git a/book/src/database-migrations.md b/book/src/database-migrations.md index ef7e95cc7ae..d2b7b518d75 100644 --- a/book/src/database-migrations.md +++ b/book/src/database-migrations.md @@ -28,6 +28,7 @@ validator client or the slasher**. | v3.3.0 | Nov 2022 | v13 | yes | | v3.4.0 | Jan 2023 | v13 | yes | | v3.5.0 | Feb 2023 | v15 | yes before Capella | +| v4.0.1 | Mar 2023 | v16 | yes before Capella | > **Note**: All point releases (e.g. v2.3.1) are schema-compatible with the prior minor release > (e.g. v2.3.0). diff --git a/book/src/docker.md b/book/src/docker.md index 7484f9f525b..d67b084da63 100644 --- a/book/src/docker.md +++ b/book/src/docker.md @@ -16,21 +16,18 @@ way to run Lighthouse without building the image yourself. Obtain the latest image with: ```bash -$ docker pull sigp/lighthouse +docker pull sigp/lighthouse ``` Download and test the image with: ```bash -$ docker run sigp/lighthouse lighthouse --version +docker run sigp/lighthouse lighthouse --version ``` If you can see the latest [Lighthouse release](https://github.com/sigp/lighthouse/releases) version (see example below), then you've successfully installed Lighthouse via Docker. -> Pro tip: try the `latest-modern` image for a 20-30% speed-up! See [Available Docker -> Images](#available-docker-images) below. - ### Example Version Output ``` @@ -38,6 +35,9 @@ Lighthouse vx.x.xx-xxxxxxxxx BLS Library: xxxx-xxxxxxx ``` +> Pro tip: try the `latest-modern` image for a 20-30% speed-up! See [Available Docker +> Images](#available-docker-images) below. + ### Available Docker Images There are several images available on Docker Hub. @@ -47,11 +47,10 @@ Lighthouse with optimizations enabled. If you are running on older hardware then `latest` image bundles a _portable_ version of Lighthouse which is slower but with better hardware compatibility (see [Portability](./installation-binaries.md#portability)). -To install a specific tag (in this case `latest-modern`) add the tag name to your `docker` commands -like so: +To install a specific tag (in this case `latest-modern`), add the tag name to your `docker` commands: ``` -$ docker pull sigp/lighthouse:latest-modern +docker pull sigp/lighthouse:latest-modern ``` Image tags follow this format: @@ -65,17 +64,17 @@ The `version` is: * `vX.Y.Z` for a tagged Lighthouse release, e.g. `v2.1.1` * `latest` for the `stable` branch (latest release) or `unstable` branch -The `stability` is: - -* `-unstable` for the `unstable` branch -* empty for a tagged release or the `stable` branch - The `arch` is: * `-amd64` for x86_64, e.g. Intel, AMD * `-arm64` for aarch64, e.g. Raspberry Pi 4 * empty for a multi-arch image (works on either `amd64` or `arm64` platforms) +The `stability` is: + +* `-unstable` for the `unstable` branch +* empty for a tagged release or the `stable` branch + The `modernity` is: * `-modern` for optimized builds @@ -99,13 +98,13 @@ To build the image from source, navigate to the root of the repository and run: ```bash -$ docker build . -t lighthouse:local +docker build . -t lighthouse:local ``` The build will likely take several minutes. Once it's built, test it with: ```bash -$ docker run lighthouse:local lighthouse --help +docker run lighthouse:local lighthouse --help ``` ## Using the Docker image @@ -113,12 +112,12 @@ $ docker run lighthouse:local lighthouse --help You can run a Docker beacon node with the following command: ```bash -$ docker run -p 9000:9000/tcp -p 9000:9000/udp -p 127.0.0.1:5052:5052 -v $HOME/.lighthouse:/root/.lighthouse sigp/lighthouse lighthouse --network mainnet beacon --http --http-address 0.0.0.0 +docker run -p 9000:9000/tcp -p 9000:9000/udp -p 127.0.0.1:5052:5052 -v $HOME/.lighthouse:/root/.lighthouse sigp/lighthouse lighthouse --network mainnet beacon --http --http-address 0.0.0.0 ``` -> To join the Prater testnet, use `--network prater` instead. +> To join the Goerli testnet, use `--network goerli` instead. -> The `-p` and `-v` and values are described below. +> The `-v` (Volumes) and `-p` (Ports) and values are described below. ### Volumes @@ -131,7 +130,7 @@ The following example runs a beacon node with the data directory mapped to the users home directory: ```bash -$ docker run -v $HOME/.lighthouse:/root/.lighthouse sigp/lighthouse lighthouse beacon +docker run -v $HOME/.lighthouse:/root/.lighthouse sigp/lighthouse lighthouse beacon ``` ### Ports @@ -140,14 +139,14 @@ In order to be a good peer and serve other peers you should expose port `9000` f Use the `-p` flag to do this: ```bash -$ docker run -p 9000:9000/tcp -p 9000:9000/udp sigp/lighthouse lighthouse beacon +docker run -p 9000:9000/tcp -p 9000:9000/udp sigp/lighthouse lighthouse beacon ``` If you use the `--http` flag you may also want to expose the HTTP port with `-p 127.0.0.1:5052:5052`. ```bash -$ docker run -p 9000:9000/tcp -p 9000:9000/udp -p 127.0.0.1:5052:5052 sigp/lighthouse lighthouse beacon --http --http-address 0.0.0.0 +docker run -p 9000:9000/tcp -p 9000:9000/udp -p 127.0.0.1:5052:5052 sigp/lighthouse lighthouse beacon --http --http-address 0.0.0.0 ``` [docker_hub]: https://hub.docker.com/repository/docker/sigp/lighthouse/ diff --git a/book/src/faq.md b/book/src/faq.md index 5bfae3fa875..b42e197a003 100644 --- a/book/src/faq.md +++ b/book/src/faq.md @@ -9,6 +9,11 @@ - [What is "Syncing deposit contract block cache"?](#what-is-syncing-deposit-contract-block-cache) - [Can I use redundancy in my staking setup?](#can-i-use-redundancy-in-my-staking-setup) - [How can I monitor my validators?](#how-can-i-monitor-my-validators) +- [I see beacon logs showing `WARN: Execution engine called failed`, what should I do?](#i-see-beacon-logs-showing-warn-execution-engine-called-failed-what-should-i-do) +- [How do I check or update my withdrawal credentials?](#how-do-i-check-or-update-my-withdrawal-credentials) +- [I am missing attestations. Why?](#i-am-missing-attestations-why) +- [Sometimes I miss the attestation head vote, resulting in penalty. Is this normal?](#sometimes-i-miss-the-attestation-head-vote-resulting-in-penalty-is-this-normal) +- [My beacon node is stuck at downloading historical block using checkpoing sync. What can I do?](#my-beacon-node-is-stuck-at-downloading-historical-block-using-checkpoing-sync-what-can-i-do) ### Why does it take so long for a validator to be activated? @@ -128,8 +133,9 @@ same `datadir` as a previous network. I.e if you have been running the `datadir` (the `datadir` is also printed out in the beacon node's logs on boot-up). -If you find yourself with a low peer count and is not reaching the target you -expect. Try setting up the correct port forwards as described [here](./advanced_networking.md#nat-traversal-port-forwarding). +If you find yourself with a low peer count and it's not reaching the target you +expect. Try setting up the correct port forwards as described +[here](./advanced_networking.md#nat-traversal-port-forwarding). ### What should I do if I lose my slashing protection database? @@ -184,4 +190,47 @@ However, there are some components which can be configured with redundancy. See Apart from using block explorers, you may use the "Validator Monitor" built into Lighthouse which provides logging and Prometheus/Grafana metrics for individual validators. See [Validator -Monitoring](./validator-monitoring.md) for more information. +Monitoring](./validator-monitoring.md) for more information. Lighthouse has also developed Lighthouse UI (Siren) to monitor performance, see [Lighthouse UI (Siren)](./lighthouse-ui.md). + +### I see beacon logs showing `WARN: Execution engine called failed`, what should I do? + +The `WARN Execution engine called failed` log is shown when the beacon node cannot reach the execution engine. When this warning occurs, it will be followed by a detailed message. A frequently encountered example of the error message is: + +`error: Reqwest(reqwest::Error { kind: Request, url: Url { scheme: "http", cannot_be_a_base: false, username: "", password: None, host: Some(Ipv4(127.0.0.1)), port: Some(8551), path: "/", query: None, fragment: None }, source: TimedOut }), service: exec` + +which says `TimedOut` at the end of the message. This means that the execution engine has not responded in time to the beacon node. There are a few reasons why this can occur: +1. The execution engine is not synced. Check the log of the execution engine to make sure that it is synced. If it is syncing, wait until it is synced and the error will disappear. You will see the beacon node logs `INFO Execution engine online` when it is synced. +1. The computer is overloaded. Check the CPU and RAM usage to see if it has overloaded. You can use `htop` to check for CPU and RAM usage. +1. Your SSD is slow. Check if your SSD is in "The Bad" list [here](https://gist.github.com/yorickdowne/f3a3e79a573bf35767cd002cc977b038). If your SSD is in "The Bad" list, it means it cannot keep in sync to the network and you may want to consider upgrading to a better SSD. + +If the reason for the error message is caused by no. 1 above, you may want to look further. If the execution engine is out of sync suddenly, it is usually caused by ungraceful shutdown. The common causes for ungraceful shutdown are: +- Power outage. If power outages are an issue at your place, consider getting a UPS to avoid ungraceful shutdown of services. +- The service file is not stopped properly. To overcome this, make sure that the process is stop properly, e.g., during client updates. +- Out of memory (oom) error. This can happen when the system memory usage has reached its maximum and causes the execution engine to be killed. When this occurs, the log file will show `Main process exited, code=killed, status=9/KILL`. You can also run `sudo journalctl -a --since "18 hours ago" | grep -i "killed process` to confirm that the execution client has been killed due to oom. If you are using geth as the execution client, a short term solution is to reduce the resources used, for example: (1) reduce the cache by adding the flag `--cache 2048` (2) connect to less peers using the flag `--maxpeers 10`. If the oom occurs rather frequently, a long term solution is to increase the memory capacity of the computer. + + +### How do I check or update my withdrawal credentials? +Withdrawals will be available after the Capella/Shanghai upgrades on 12th April 2023. To check that if you are eligible for withdrawals, go to [Staking launchpad](https://launchpad.ethereum.org/en/withdrawals), enter your validator index and click `verify on mainnet`: +- `withdrawals enabled` means you will automatically receive withdrawals to the withdrawal address that you set. +- `withdrawals not enabled` means you will need to update your withdrawal credentials from `0x00` type to `0x01` type. The common way to do this is using `Staking deposit CLI` or `ethdo`, with the instructions available [here](https://launchpad.ethereum.org/en/withdrawals#update-your-keys). + +For the case of `withdrawals not enabled`, you can update your withdrawal credentials **anytime**, and there is no deadline for that. The catch is that as long as you do not update your withdrawal credentials, your rewards in the beacon chain will continue to be locked in the beacon chain. Only after you update the withdrawal credentials, will the rewards be withdrawn to the withdrawal address. + + +### I am missing attestations. Why? +The first thing is to ensure both consensus and execution clients are synced with the network. If they are synced, there may still be some issues with the node setup itself that is causing the missed attestations. Check the setup to ensure that: +- the clock is synced +- the computer has sufficient resources and is not overloaded +- the internet is working well +- you have sufficient peers + +You can see more information on the [Ethstaker KB](https://ethstaker.gitbook.io/ethstaker-knowledge-base/help/missed-attestations). Once the above points are good, missing attestation should be a rare occurance. + +### Sometimes I miss the attestation head vote, resulting in penalty. Is this normal? + +In general it is unavoiadable to have some penalties occasionally. This is particularly the case when you are assigned to attest on the first slot of an epoch and if the proposer of that slot releases the block late, then you will get penalised for missing the target and head votes. Your attestation performance does not only depend on your own setup, but also on everyone else's performance. + + +### My beacon node is stuck at downloading historical block using checkpoing sync. What can I do? + +Check the number of peers you are connected to. If you have low peers (less than 50), try to do port forwarding on the port 9000 TCP/UDP to increase peer count. \ No newline at end of file diff --git a/book/src/imgs/ui-autoconnect-auth.png b/book/src/imgs/ui-autoconnect-auth.png new file mode 100644 index 00000000000..4121f56cabd Binary files /dev/null and b/book/src/imgs/ui-autoconnect-auth.png differ diff --git a/book/src/imgs/ui-exit.png b/book/src/imgs/ui-exit.png new file mode 100644 index 00000000000..7061fab388b Binary files /dev/null and b/book/src/imgs/ui-exit.png differ diff --git a/book/src/imgs/ui-fail-auth.png b/book/src/imgs/ui-fail-auth.png new file mode 100644 index 00000000000..dece7b707a5 Binary files /dev/null and b/book/src/imgs/ui-fail-auth.png differ diff --git a/book/src/imgs/ui-session-auth.png b/book/src/imgs/ui-session-auth.png new file mode 100644 index 00000000000..c66b92af743 Binary files /dev/null and b/book/src/imgs/ui-session-auth.png differ diff --git a/book/src/installation-binaries.md b/book/src/installation-binaries.md index 2365ea7ed7b..30bf03e14ee 100644 --- a/book/src/installation-binaries.md +++ b/book/src/installation-binaries.md @@ -23,21 +23,24 @@ For details, see [Portability](#portability). ## Usage Each binary is contained in a `.tar.gz` archive. For this example, lets assume the user needs -a portable `x86_64` binary. +a `x86_64` binary. ### Steps 1. Go to the [Releases](https://github.com/sigp/lighthouse/releases) page and select the latest release. -1. Download the `lighthouse-${VERSION}-x86_64-unknown-linux-gnu-portable.tar.gz` binary. -1. Extract the archive: - 1. `cd Downloads` - 1. `tar -xvf lighthouse-${VERSION}-x86_64-unknown-linux-gnu.tar.gz` +1. Download the `lighthouse-${VERSION}-x86_64-unknown-linux-gnu.tar.gz` binary. For example, to obtain the binary file for v4.0.1 (the latest version at the time of writing), a user can run the following commands in a linux terminal: + ```bash + cd ~ + curl -LO https://github.com/sigp/lighthouse/releases/download/v4.0.1/lighthouse-v4.0.1-x86_64-unknown-linux-gnu.tar.gz + tar -xvf lighthouse-v4.0.1-x86_64-unknown-linux-gnu.tar.gz + ``` 1. Test the binary with `./lighthouse --version` (it should print the version). -1. (Optional) Move the `lighthouse` binary to a location in your `PATH`, so the `lighthouse` command can be called from anywhere. - - E.g., `cp lighthouse /usr/bin` +1. (Optional) Move the `lighthouse` binary to a location in your `PATH`, so the `lighthouse` command can be called from anywhere. For example, to copy `lighthouse` from the current directory to `usr/bin`, run `sudo cp lighthouse /usr/bin`. -> Windows users will need to execute the commands in Step 3 from PowerShell. + + +> Windows users will need to execute the commands in Step 2 from PowerShell. ## Portability @@ -64,4 +67,4 @@ WARN CPU seems incompatible with optimized Lighthouse build, advice: If you get On some VPS providers, the virtualization can make it appear as if CPU features are not available, even when they are. In this case you might see the warning above, but so long as the client -continues to function it's nothing to worry about. +continues to function, it's nothing to worry about. diff --git a/book/src/installation-source.md b/book/src/installation-source.md index c89dd1add4f..b9c9df163d8 100644 --- a/book/src/installation-source.md +++ b/book/src/installation-source.md @@ -5,8 +5,20 @@ the instructions below, and then proceed to [Building Lighthouse](#build-lightho ## Dependencies -First, **install Rust** using [rustup](https://rustup.rs/). The rustup installer provides an easy way -to update the Rust compiler, and works on all platforms. +First, **install Rust** using [rustup](https://rustup.rs/): + +```bash +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh +``` + +The rustup installer provides an easy way to update the Rust compiler, and works on all platforms. + +> Tips: +> +> - During installation, when prompted, enter `1` for the default installation. +> - After Rust installation completes, try running `cargo version` . If it cannot +> be found, run `source $HOME/.cargo/env`. After that, running `cargo version` should return the version, for example `cargo 1.68.2`. +> - It's generally advisable to append `source $HOME/.cargo/env` to `~/.bashrc`. With Rust installed, follow the instructions below to install dependencies relevant to your operating system. @@ -19,10 +31,17 @@ Install the following packages: sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang protobuf-compiler ``` +> Tips: +> +> - If there are difficulties, try updating the package manager with `sudo apt +> update`. + > Note: Lighthouse requires CMake v3.12 or newer, which isn't available in the package repositories > of Ubuntu 18.04 or earlier. On these distributions CMake can still be installed via PPA: > [https://apt.kitware.com/](https://apt.kitware.com) +After this, you are ready to [build Lighthouse](#build-lighthouse). + #### macOS 1. Install the [Homebrew][] package manager. @@ -39,10 +58,19 @@ brew install protobuf [Homebrew]: https://brew.sh/ +After this, you are ready to [build Lighthouse](#build-lighthouse). + #### Windows -1. Install [Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git). +1. Install [Git](https://git-scm.com/download/win). 1. Install the [Chocolatey](https://chocolatey.org/install) package manager for Windows. + > Tips: + > - Use PowerShell to install. In Windows, search for PowerShell and run as administrator. + > - You must ensure `Get-ExecutionPolicy` is not Restricted. To test this, run `Get-ExecutionPolicy` in PowerShell. If it returns `restricted`, then run `Set-ExecutionPolicy AllSigned`, and then run + ```bash + Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1')) + ``` + > - To verify that Chocolatey is ready, run `choco` and it should return the version. 1. Install Make, CMake, LLVM and protoc using Chocolatey: ``` @@ -67,6 +95,8 @@ should follow the instructions for Ubuntu listed in the [Dependencies (Ubuntu)]( [WSL]: https://docs.microsoft.com/en-us/windows/wsl/about +After this, you are ready to [build Lighthouse](#build-lighthouse). + ## Build Lighthouse Once you have Rust and the build dependencies you're ready to build Lighthouse: @@ -136,7 +166,7 @@ Commonly used features include: * `spec-minimal`: support for the minimal preset (useful for testing). Default features (e.g. `slasher-mdbx`) may be opted out of using the `--no-default-features` -argument for `cargo`, which can plumbed in via the `CARGO_INSTALL_EXTRA_FLAGS` environment variable. +argument for `cargo`, which can be plumbed in via the `CARGO_INSTALL_EXTRA_FLAGS` environment variable. E.g. ``` @@ -171,12 +201,11 @@ PROFILE=maxperf make Lighthouse will be installed to `CARGO_HOME` or `$HOME/.cargo`. This directory needs to be on your `PATH` before you can run `$ lighthouse`. -See ["Configuring the `PATH` environment variable" -(rust-lang.org)](https://www.rust-lang.org/tools/install) for more information. +See ["Configuring the `PATH` environment variable"](https://www.rust-lang.org/tools/install) for more information. ### Compilation error -Make sure you are running the latest version of Rust. If you have installed Rust using rustup, simply type `rustup update`. +Make sure you are running the latest version of Rust. If you have installed Rust using rustup, simply run `rustup update`. If you can't install the latest version of Rust you can instead compile using the Minimum Supported Rust Version (MSRV) which is listed under the `rust-version` key in Lighthouse's @@ -185,7 +214,7 @@ Rust Version (MSRV) which is listed under the `rust-version` key in Lighthouse's If compilation fails with `(signal: 9, SIGKILL: kill)`, this could mean your machine ran out of memory during compilation. If you are on a resource-constrained device you can look into [cross compilation](./cross-compiling.md), or use a [pre-built -binary](./installation-binaries.md). +binary](https://github.com/sigp/lighthouse/releases). If compilation fails with `error: linking with cc failed: exit code: 1`, try running `cargo clean`. diff --git a/book/src/installation.md b/book/src/installation.md index bc546e09874..627326d2a4a 100644 --- a/book/src/installation.md +++ b/book/src/installation.md @@ -8,24 +8,27 @@ There are three core methods to obtain the Lighthouse application: - [Docker images](./docker.md). - [Building from source](./installation-source.md). -Community-maintained additional installation methods: +Additionally, there are two extra guides for specific uses: + +- [Raspberry Pi 4 guide](./pi.md). +- [Cross-compiling guide for developers](./cross-compiling.md). + +There are also community-maintained installation methods: - [Homebrew package](./homebrew.md). - Arch Linux AUR packages: [source](https://aur.archlinux.org/packages/lighthouse-ethereum), [binary](https://aur.archlinux.org/packages/lighthouse-ethereum-bin). -Additionally, there are two extra guides for specific uses: -- [Raspberry Pi 4 guide](./pi.md). -- [Cross-compiling guide for developers](./cross-compiling.md). -## Minimum System Requirements +## Recommended System Requirements + +Before [The Merge](https://ethereum.org/en/roadmap/merge/), Lighthouse was able to run on its own with low to mid-range consumer hardware, but would perform best when provided with ample system resources. -* Dual-core CPU, 2015 or newer -* 8 GB RAM -* 128 GB solid state storage -* 10 Mb/s download, 5 Mb/s upload broadband connection +After [The Merge](https://ethereum.org/en/roadmap/merge/) on 15th September 2022, it is necessary to run Lighthouse together with an execution client ([Nethermind](https://nethermind.io/), [Besu](https://www.hyperledger.org/use/besu), [Erigon](https://github.com/ledgerwatch/erigon), [Geth](https://geth.ethereum.org/)). The following system requirements listed are therefore for running a Lighthouse beacon node combined with an execution client , and a validator client with a modest number of validator keys (less than 100): -For more information see [System Requirements](./system-requirements.md). -[WSL]: https://docs.microsoft.com/en-us/windows/wsl/about +* CPU: Quad-core AMD Ryzen, Intel Broadwell, ARMv8 or newer +* Memory: 16 GB RAM or more +* Storage: 2 TB solid state storage +* Network: 100 Mb/s download, 20 Mb/s upload broadband connection diff --git a/book/src/late-block-re-orgs.md b/book/src/late-block-re-orgs.md index 0014af8f152..fc4530589d9 100644 --- a/book/src/late-block-re-orgs.md +++ b/book/src/late-block-re-orgs.md @@ -14,6 +14,15 @@ There are three flags which control the re-orging behaviour: * `--proposer-reorg-threshold N`: attempt to orphan blocks with less than N% of the committee vote. If this parameter isn't set then N defaults to 20% when the feature is enabled. * `--proposer-reorg-epochs-since-finalization N`: only attempt to re-org late blocks when the number of epochs since finalization is less than or equal to N. The default is 2 epochs, meaning re-orgs will only be attempted when the chain is finalizing optimally. +* `--proposer-reorg-cutoff T`: only attempt to re-org late blocks when the proposal is being made + before T milliseconds into the slot. Delays between the validator client and the beacon node can + cause some blocks to be requested later than the start of the slot, which makes them more likely + to fail. The default cutoff is 1000ms on mainnet, which gives blocks 3000ms to be signed and + propagated before the attestation deadline at 4000ms. +* `--proposer-reorg-disallowed-offsets N1,N2,N3...`: Prohibit Lighthouse from attempting to reorg at + specific offsets in each epoch. A disallowed offset `N` prevents reorging blocks from being + proposed at any `slot` such that `slot % SLOTS_PER_EPOCH == N`. The value to this flag is a + comma-separated list of integer offsets. All flags should be applied to `lighthouse bn`. The default configuration is recommended as it balances the chance of the re-org succeeding against the chance of failure due to attestations diff --git a/book/src/lighthouse-ui.md b/book/src/lighthouse-ui.md index 225f293f978..4182314da12 100644 --- a/book/src/lighthouse-ui.md +++ b/book/src/lighthouse-ui.md @@ -11,7 +11,7 @@ _Documentation for Siren users and developers._ Siren is a user interface built for Lighthouse that connects to a Lighthouse Beacon Node and a Lighthouse Validator Client to monitor performance and display key validator -metrics. +metrics. The UI is currently in active development. Its resides in the [Siren](https://github.com/sigp/siren) repository. @@ -24,7 +24,8 @@ information: - [Installation Guide](./ui-installation.md) - Information to install and run the Lighthouse UI. - [Configuration Guide](./ui-configuration.md) - Explanation of how to setup and configure Siren. -- [Usage](./ui-usage.md) - Details various Siren components. +- [Authentication Guide](./ui-authentication.md) - Explanation of how Siren authentication works and protects validator actions. +- [Usage](./ui-usage.md) - Details various Siren components. - [FAQs](./ui-faqs.md) - Frequently Asked Questions. ## Contributing diff --git a/book/src/pi.md b/book/src/pi.md index 24796d394e3..d8d154d765a 100644 --- a/book/src/pi.md +++ b/book/src/pi.md @@ -12,18 +12,18 @@ desktop) may be convenient.* ### 1. Install Ubuntu -Follow the [Ubuntu Raspberry Pi installation instructions](https://ubuntu.com/download/raspberry-pi). - -**A 64-bit version is required** and latest version is recommended (Ubuntu -20.04 LTS was the latest at the time of writing). +Follow the [Ubuntu Raspberry Pi installation instructions](https://ubuntu.com/download/raspberry-pi). **A 64-bit version is required** A graphical environment is not required in order to use Lighthouse. Only the terminal and an Internet connection are necessary. ### 2. Install Packages -Install the [Ubuntu Dependencies](installation-source.md#ubuntu). -(I.e., run the `sudo apt install ...` command at that link). +Install the Ubuntu dependencies: + +```bash +sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang protobuf-compiler +``` > Tips: > @@ -32,15 +32,18 @@ Install the [Ubuntu Dependencies](installation-source.md#ubuntu). ### 3. Install Rust -Install Rust as per [rustup](https://rustup.rs/). (I.e., run the `curl ... ` -command). +Install Rust as per [rustup](https://rustup.rs/): + +```bash +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh +``` > Tips: > -> - When prompted, enter `1` for the default installation. -> - Try running `cargo version` after Rust installation completes. If it cannot -> be found, run `source $HOME/.cargo/env`. -> - It's generally advised to append `source $HOME/.cargo/env` to `~/.bashrc`. +> - During installation, when prompted, enter `1` for the default installation. +> - After Rust installation completes, try running `cargo version` . If it cannot +> be found, run `source $HOME/.cargo/env`. After that, running `cargo version` should return the version, for example `cargo 1.68.2`. +> - It's generally advisable to append `source $HOME/.cargo/env` to `~/.bashrc`. ### 4. Install Lighthouse diff --git a/book/src/system-requirements.md b/book/src/system-requirements.md deleted file mode 100644 index 0c51d07cce8..00000000000 --- a/book/src/system-requirements.md +++ /dev/null @@ -1,23 +0,0 @@ -# System Requirements - -Lighthouse is able to run on most low to mid-range consumer hardware, but will perform best when -provided with ample system resources. The following system requirements are for running a beacon -node and a validator client with a modest number of validator keys (less than 100). - -## Minimum - -* Dual-core CPU, 2015 or newer -* 8 GB RAM -* 128 GB solid state storage -* 10 Mb/s download, 5 Mb/s upload broadband connection - -During smooth network conditions, Lighthouse's database will fit within 15 GB, but in case of a long -period of non-finality, it is **strongly recommended** that at least 128 GB is available. - -## Recommended - -* Quad-core AMD Ryzen, Intel Broadwell, ARMv8 or newer -* 16 GB RAM -* 256 GB solid state storage -* 100 Mb/s download, 20 Mb/s upload broadband connection - diff --git a/book/src/ui-authentication.md b/book/src/ui-authentication.md new file mode 100644 index 00000000000..0572824d5c6 --- /dev/null +++ b/book/src/ui-authentication.md @@ -0,0 +1,33 @@ +# Authentication + +To enhance the security of your account, we offer the option to set a session password. This allows the user to avoid re-entering the api-token when performing critical mutating operations on the validator. Instead a user can simply enter their session password. In the absence of a session password, Siren will revert to the api-token specified in your configuration settings as the default security measure. + +> This does not protect your validators from unauthorized device access. + +![](imgs/ui-session-auth.png) + +Session passwords must contain at least: + +- 12 characters +- 1 lowercase letter +- 1 uppercase letter +- 1 number +- 1 special character + + +## Protected Actions + +Prior to executing any sensitive validator action, Siren will request authentication of the session password or api-token. + +![](imgs/ui-exit.png) + + +In the event of three consecutive failed attempts, Siren will initiate a security measure by locking all actions and prompting for configuration settings to be renewed to regain access to these features. + +![](imgs/ui-fail-auth.png) + +## Auto Connect + +In the event that auto-connect is enabled, refreshing the Siren application will result in a prompt to authenticate the session password or api-token. If three consecutive authentication attempts fail, Siren will activate a security measure by locking the session and prompting for configuration settings to be reset to regain access. + +![](imgs/ui-autoconnect-auth.png) \ No newline at end of file diff --git a/book/src/ui-faqs.md b/book/src/ui-faqs.md index 240195421cb..51aa9385a44 100644 --- a/book/src/ui-faqs.md +++ b/book/src/ui-faqs.md @@ -1,13 +1,16 @@ # Frequently Asked Questions -## 1. Where can I find my API token? +## 1. Are there any requirements to run Siren? +Yes, Siren requires Lighthouse v3.5.1 or higher to function properly. These releases can be found on the [releases](https://github.com/sigp/lighthouse/releases) page of the Lighthouse repository. + +## 2. Where can I find my API token? The required Api token may be found in the default data directory of the validator client. For more information please refer to the lighthouse ui configuration [`api token section`](./ui-configuration.md#api-token). -## 2. How do I fix the Node Network Errors? +## 3. How do I fix the Node Network Errors? If you recieve a red notification with a BEACON or VALIDATOR NODE NETWORK ERROR you can refer to the lighthouse ui configuration and [`connecting to clients section`](./ui-configuration.md#connecting-to-the-clients). -## 3. How do I change my Beacon or Validator address after logging in? +## 4. How do I change my Beacon or Validator address after logging in? Once you have successfully arrived to the main dashboard, use the sidebar to access the settings view. In the top right hand corner there is a `Configurtion` action button that will redirect you back to the configuration screen where you can make appropriate changes. -## 4. Why doesn't my validator balance graph show any data? +## 5. Why doesn't my validator balance graph show any data? If your graph is not showing data, it usually means your validator node is still caching data. The application must wait at least 3 epochs before it can render any graphical visualizations. This could take up to 20min. diff --git a/book/src/ui-installation.md b/book/src/ui-installation.md index 0b96b1923b9..b8ae788c69b 100644 --- a/book/src/ui-installation.md +++ b/book/src/ui-installation.md @@ -2,6 +2,8 @@ Siren runs on Linux, MacOS and Windows. +## Version Requirement +The Siren app requires Lighthouse v3.5.1 or higher to function properly. These versions can be found on the [releases](https://github.com/sigp/lighthouse/releases) page of the Lighthouse repository. ## Pre-Built Electron Packages @@ -16,7 +18,7 @@ Simply download the package specific to your operating system and run it. ### Requirements -Building from source requires `Node v18` and `yarn`. +Building from source requires `Node v18` and `yarn`. ### Building From Source diff --git a/book/src/voluntary-exit.md b/book/src/voluntary-exit.md index 5056040e4c6..591b7d49109 100644 --- a/book/src/voluntary-exit.md +++ b/book/src/voluntary-exit.md @@ -12,12 +12,9 @@ This number can be much higher depending on how many other validators are queued ## Withdrawal of exited funds -Even though users can currently perform a voluntary exit, they **cannot withdraw their exited funds at this point in time**. -This implies that the staked funds are effectively **frozen** until withdrawals are enabled in a future hard fork (Capella). - -To understand the rollout strategy for Ethereum upgrades, please visit . - +In order to be eligible for fund withdrawal, the validator must have set a withdrawal address and fully exited the network. +For more information on how fund withdrawal works, please visit ## Initiating a voluntary exit @@ -55,8 +52,6 @@ Publishing a voluntary exit for validator 0xabcd WARNING: WARNING: THIS IS AN IRREVERSIBLE OPERATION -WARNING: WITHDRAWING STAKED ETH WILL NOT BE POSSIBLE UNTIL ETH1/ETH2 MERGE. - PLEASE VISIT https://lighthouse-book.sigmaprime.io/voluntary-exit.html TO MAKE SURE YOU UNDERSTAND THE IMPLICATIONS OF A VOLUNTARY EXIT. diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index 266dcdfe6f4..7eb37a9b94b 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "boot_node" -version = "3.5.1" +version = "4.1.0" authors = ["Sigma Prime "] edition = "2021" @@ -10,7 +10,7 @@ clap = "2.33.3" clap_utils = { path = "../common/clap_utils" } lighthouse_network = { path = "../beacon_node/lighthouse_network" } types = { path = "../consensus/types" } -eth2_ssz = "0.4.1" +ethereum_ssz = "0.5.0" slog = "2.5.2" tokio = "1.14.0" log = "0.4.11" diff --git a/boot_node/src/cli.rs b/boot_node/src/cli.rs index 9a37320028b..c3d7ac48a98 100644 --- a/boot_node/src/cli.rs +++ b/boot_node/src/cli.rs @@ -53,6 +53,14 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .takes_value(true) .conflicts_with("network-dir") ) + .arg( + Arg::with_name("enr-udp6-port") + .long("enr-udp6-port") + .value_name("PORT") + .help("The UDP6 port of the local ENR. Set this only if you are sure other nodes \ + can connect to your local node on this port over IpV6.") + .takes_value(true), + ) .arg( Arg::with_name("enable-enr-auto-update") .short("x") diff --git a/boot_node/src/config.rs b/boot_node/src/config.rs index b7a66cbbd84..d3ee58a9077 100644 --- a/boot_node/src/config.rs +++ b/boot_node/src/config.rs @@ -1,7 +1,7 @@ use beacon_node::{get_data_dir, set_network_config}; use clap::ArgMatches; use eth2_network_config::Eth2NetworkConfig; -use lighthouse_network::discv5::enr::EnrBuilder; +use lighthouse_network::discovery::create_enr_builder_from_config; use lighthouse_network::discv5::IpMode; use lighthouse_network::discv5::{enr::CombinedKey, Discv5Config, Enr}; use lighthouse_network::{ @@ -57,12 +57,24 @@ impl BootNodeConfig { let logger = slog_scope::logger(); - set_network_config(&mut network_config, matches, &data_dir, &logger, true)?; + set_network_config(&mut network_config, matches, &data_dir, &logger)?; - // Set the enr-udp-port to the default listening port if it was not specified. - if !matches.is_present("enr-udp-port") { - network_config.enr_udp_port = Some(network_config.discovery_port); - } + // Set the Enr UDP ports to the listening ports if not present. + if let Some(listening_addr_v4) = network_config.listen_addrs().v4() { + network_config.enr_udp4_port = Some( + network_config + .enr_udp4_port + .unwrap_or(listening_addr_v4.udp_port), + ) + }; + + if let Some(listening_addr_v6) = network_config.listen_addrs().v6() { + network_config.enr_udp6_port = Some( + network_config + .enr_udp6_port + .unwrap_or(listening_addr_v6.udp_port), + ) + }; // By default this is enabled. If it is not set, revert to false. if !matches.is_present("enable-enr-auto-update") { @@ -70,17 +82,29 @@ impl BootNodeConfig { } // the address to listen on - let listen_socket = - SocketAddr::new(network_config.listen_address, network_config.discovery_port); - if listen_socket.is_ipv6() { - // create ipv6 sockets and enable ipv4 mapped addresses. - network_config.discv5_config.ip_mode = IpMode::Ip6 { - enable_mapped_addresses: true, - }; - } else { - // Set explicitly as ipv4 otherwise - network_config.discv5_config.ip_mode = IpMode::Ip4; - } + let listen_socket = match network_config.listen_addrs().clone() { + lighthouse_network::ListenAddress::V4(v4_addr) => { + // Set explicitly as ipv4 otherwise + network_config.discv5_config.ip_mode = IpMode::Ip4; + v4_addr.udp_socket_addr() + } + lighthouse_network::ListenAddress::V6(v6_addr) => { + // create ipv6 sockets and enable ipv4 mapped addresses. + network_config.discv5_config.ip_mode = IpMode::Ip6 { + enable_mapped_addresses: false, + }; + + v6_addr.udp_socket_addr() + } + lighthouse_network::ListenAddress::DualStack(_v4_addr, v6_addr) => { + // create ipv6 sockets and enable ipv4 mapped addresses. + network_config.discv5_config.ip_mode = IpMode::Ip6 { + enable_mapped_addresses: true, + }; + + v6_addr.udp_socket_addr() + } + }; let private_key = load_private_key(&network_config, &logger); let local_key = CombinedKey::from_libp2p(&private_key)?; @@ -115,30 +139,8 @@ impl BootNodeConfig { // Build the local ENR let mut local_enr = { - let mut builder = EnrBuilder::new("v4"); - // Set the enr address if specified. Set also the port. - // NOTE: if the port is specified but the the address is not, the port won't be - // set since it can't be known if it's an ipv6 or ipv4 udp port. - if let Some(enr_address) = network_config.enr_address { - match enr_address { - std::net::IpAddr::V4(ipv4_addr) => { - builder.ip4(ipv4_addr); - if let Some(port) = network_config.enr_udp_port { - builder.udp4(port); - } - } - std::net::IpAddr::V6(ipv6_addr) => { - builder.ip6(ipv6_addr); - if let Some(port) = network_config.enr_udp_port { - builder.udp6(port); - // We are enabling mapped addresses in the boot node in this case, - // so advertise an udp4 port as well. - builder.udp4(port); - } - } - } - }; - + let enable_tcp = false; + let mut builder = create_enr_builder_from_config(&network_config, enable_tcp); // If we know of the ENR field, add it to the initial construction if let Some(enr_fork_bytes) = enr_fork { builder.add_value("eth2", enr_fork_bytes.as_slice()); diff --git a/boot_node/src/server.rs b/boot_node/src/server.rs index 8f38fb300dc..3f5419c2c68 100644 --- a/boot_node/src/server.rs +++ b/boot_node/src/server.rs @@ -44,7 +44,7 @@ pub async fn run(config: BootNodeConfig, log: slog::Logger) { info!(log, "Contact information"; "multiaddrs" => ?local_enr.multiaddr_p2p()); // construct the discv5 server - let mut discv5 = Discv5::new(local_enr.clone(), local_key, discv5_config).unwrap(); + let mut discv5: Discv5 = Discv5::new(local_enr.clone(), local_key, discv5_config).unwrap(); // If there are any bootnodes add them to the routing table for enr in boot_nodes { diff --git a/common/clap_utils/Cargo.toml b/common/clap_utils/Cargo.toml index 62eb8aa3d5d..a882b7ce64b 100644 --- a/common/clap_utils/Cargo.toml +++ b/common/clap_utils/Cargo.toml @@ -11,7 +11,7 @@ clap = "2.33.3" hex = "0.4.2" dirs = "3.0.1" eth2_network_config = { path = "../eth2_network_config" } -eth2_ssz = "0.4.1" +ethereum_ssz = "0.5.0" ethereum-types = "0.14.1" serde = "1.0.116" serde_json = "1.0.59" diff --git a/common/deposit_contract/Cargo.toml b/common/deposit_contract/Cargo.toml index 7be0e8f3d27..aabc07fc524 100644 --- a/common/deposit_contract/Cargo.toml +++ b/common/deposit_contract/Cargo.toml @@ -14,6 +14,6 @@ hex = "0.4.2" [dependencies] types = { path = "../../consensus/types"} -eth2_ssz = "0.4.1" -tree_hash = "0.4.1" +ethereum_ssz = "0.5.0" +tree_hash = "0.5.0" ethabi = "16.0.0" diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index eca086d838f..2c5e7060b2e 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -13,15 +13,15 @@ types = { path = "../../consensus/types" } reqwest = { version = "0.11.0", features = ["json","stream"] } lighthouse_network = { path = "../../beacon_node/lighthouse_network" } proto_array = { path = "../../consensus/proto_array", optional = true } -eth2_serde_utils = "0.1.1" +ethereum_serde_utils = "0.5.0" eth2_keystore = { path = "../../crypto/eth2_keystore" } libsecp256k1 = "0.7.0" ring = "0.16.19" bytes = "1.0.1" account_utils = { path = "../../common/account_utils" } sensitive_url = { path = "../../common/sensitive_url" } -eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.1" +ethereum_ssz = "0.5.0" +ethereum_ssz_derive = "0.5.0" futures-util = "0.3.8" futures = "0.3.8" store = { path = "../../beacon_node/store", optional = true } diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 4d74299fff2..e03cc2e9b02 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -22,13 +22,14 @@ use lighthouse_network::PeerId; pub use reqwest; use reqwest::{IntoUrl, RequestBuilder, Response}; pub use reqwest::{StatusCode, Url}; -pub use sensitive_url::SensitiveUrl; +pub use sensitive_url::{SensitiveError, SensitiveUrl}; use serde::{de::DeserializeOwned, Serialize}; use std::convert::TryFrom; use std::fmt; use std::iter::Iterator; use std::path::PathBuf; use std::time::Duration; +use store::fork_versioned_response::ExecutionOptimisticFinalizedForkVersionedResponse; pub const V1: EndpointVersion = EndpointVersion(1); pub const V2: EndpointVersion = EndpointVersion(2); @@ -338,7 +339,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_states_root( &self, state_id: StateId, - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -357,7 +358,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_states_fork( &self, state_id: StateId, - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -376,7 +377,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_states_finality_checkpoints( &self, state_id: StateId, - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -396,7 +397,8 @@ impl BeaconNodeHttpClient { &self, state_id: StateId, ids: Option<&[ValidatorId]>, - ) -> Result>>, Error> { + ) -> Result>>, Error> + { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -426,7 +428,7 @@ impl BeaconNodeHttpClient { state_id: StateId, ids: Option<&[ValidatorId]>, statuses: Option<&[ValidatorStatus]>, - ) -> Result>>, Error> { + ) -> Result>>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -466,7 +468,7 @@ impl BeaconNodeHttpClient { slot: Option, index: Option, epoch: Option, - ) -> Result>>, Error> { + ) -> Result>>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -499,7 +501,7 @@ impl BeaconNodeHttpClient { &self, state_id: StateId, epoch: Option, - ) -> Result, Error> { + ) -> Result, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -522,7 +524,7 @@ impl BeaconNodeHttpClient { &self, state_id: StateId, epoch: Option, - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -547,7 +549,7 @@ impl BeaconNodeHttpClient { &self, state_id: StateId, validator_id: &ValidatorId, - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -568,7 +570,7 @@ impl BeaconNodeHttpClient { &self, slot: Option, parent_root: Option, - ) -> Result>>, Error> { + ) -> Result>>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -595,7 +597,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_headers_block_id( &self, block_id: BlockId, - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -675,7 +677,10 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_blocks( &self, block_id: BlockId, - ) -> Result>>, Error> { + ) -> Result< + Option>>, + Error, + > { let path = self.get_beacon_blocks_path(block_id)?; let response = match self.get_response(path, |b| b).await.optional()? { Some(res) => res, @@ -691,8 +696,10 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_blinded_blocks( &self, block_id: BlockId, - ) -> Result>>, Error> - { + ) -> Result< + Option>>, + Error, + > { let path = self.get_beacon_blinded_blocks_path(block_id)?; let response = match self.get_response(path, |b| b).await.optional()? { Some(res) => res, @@ -760,7 +767,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_blocks_root( &self, block_id: BlockId, - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -779,7 +786,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_blocks_attestations( &self, block_id: BlockId, - ) -> Result>>>, Error> { + ) -> Result>>>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -1267,28 +1274,12 @@ impl BeaconNodeHttpClient { pub async fn get_debug_beacon_states( &self, state_id: StateId, - ) -> Result>>, Error> { + ) -> Result>>, Error> + { let path = self.get_debug_beacon_states_path(state_id)?; self.get_opt(path).await } - /// `GET v1/debug/beacon/states/{state_id}` (LEGACY) - pub async fn get_debug_beacon_states_v1( - &self, - state_id: StateId, - ) -> Result>>, Error> { - let mut path = self.eth_path(V1)?; - - path.path_segments_mut() - .map_err(|()| Error::InvalidUrl(self.server.clone()))? - .push("debug") - .push("beacon") - .push("states") - .push(&state_id.to_string()); - - self.get_opt(path).await - } - /// `GET debug/beacon/states/{state_id}` /// `-H "accept: application/octet-stream"` pub async fn get_debug_beacon_states_ssz( @@ -1334,6 +1325,18 @@ impl BeaconNodeHttpClient { self.get(path).await } + /// `GET v1/debug/fork_choice` + pub async fn get_debug_fork_choice(&self) -> Result { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("debug") + .push("fork_choice"); + + self.get(path).await + } + /// `GET validator/duties/proposer/{epoch}` pub async fn get_validator_duties_proposer( &self, @@ -1649,7 +1652,7 @@ impl BeaconNodeHttpClient { &self, epoch: Epoch, indices: &[u64], - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index e50d9f4dc09..bb933dbe121 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -13,7 +13,7 @@ use crate::{ BeaconState, ChainSpec, DepositTreeSnapshot, Epoch, EthSpec, FinalizedExecutionBlock, GenericResponse, ValidatorId, }, - BeaconNodeHttpClient, DepositData, Error, Eth1Data, Hash256, StateId, StatusCode, + BeaconNodeHttpClient, DepositData, Error, Eth1Data, Hash256, Slot, StateId, StatusCode, }; use proto_array::core::ProtoArray; use reqwest::IntoUrl; @@ -566,4 +566,73 @@ impl BeaconNodeHttpClient { self.post_with_response(path, &()).await } + + /// + /// Analysis endpoints. + /// + + /// `GET` lighthouse/analysis/block_rewards?start_slot,end_slot + pub async fn get_lighthouse_analysis_block_rewards( + &self, + start_slot: Slot, + end_slot: Slot, + ) -> Result, Error> { + let mut path = self.server.full.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("analysis") + .push("block_rewards"); + + path.query_pairs_mut() + .append_pair("start_slot", &start_slot.to_string()) + .append_pair("end_slot", &end_slot.to_string()); + + self.get(path).await + } + + /// `GET` lighthouse/analysis/block_packing?start_epoch,end_epoch + pub async fn get_lighthouse_analysis_block_packing( + &self, + start_epoch: Epoch, + end_epoch: Epoch, + ) -> Result, Error> { + let mut path = self.server.full.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("analysis") + .push("block_packing_efficiency"); + + path.query_pairs_mut() + .append_pair("start_epoch", &start_epoch.to_string()) + .append_pair("end_epoch", &end_epoch.to_string()); + + self.get(path).await + } + + /// `GET` lighthouse/analysis/attestation_performance/{index}?start_epoch,end_epoch + pub async fn get_lighthouse_analysis_attestation_performance( + &self, + start_epoch: Epoch, + end_epoch: Epoch, + target: String, + ) -> Result, Error> { + let mut path = self.server.full.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("analysis") + .push("attestation_performance") + .push(&target); + + path.query_pairs_mut() + .append_pair("start_epoch", &start_epoch.to_string()) + .append_pair("end_epoch", &end_epoch.to_string()); + + self.get(path).await + } } diff --git a/common/eth2/src/lighthouse/attestation_rewards.rs b/common/eth2/src/lighthouse/attestation_rewards.rs index 314ffb85121..bebd1c661b3 100644 --- a/common/eth2/src/lighthouse/attestation_rewards.rs +++ b/common/eth2/src/lighthouse/attestation_rewards.rs @@ -6,32 +6,32 @@ use serde::{Deserialize, Serialize}; #[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] pub struct IdealAttestationRewards { // Validator's effective balance in gwei - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub effective_balance: u64, // Ideal attester's reward for head vote in gwei - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub head: u64, // Ideal attester's reward for target vote in gwei - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub target: u64, // Ideal attester's reward for source vote in gwei - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub source: u64, } #[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] pub struct TotalAttestationRewards { // one entry for every validator based on their attestations in the epoch - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, // attester's reward for head vote in gwei - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub head: u64, // attester's reward for target vote in gwei - #[serde(with = "eth2_serde_utils::quoted_i64")] + #[serde(with = "serde_utils::quoted_i64")] pub target: i64, // attester's reward for source vote in gwei - #[serde(with = "eth2_serde_utils::quoted_i64")] + #[serde(with = "serde_utils::quoted_i64")] pub source: i64, // TBD attester's inclusion_delay reward in gwei (phase0 only) // pub inclusion_delay: u64, diff --git a/common/eth2/src/lighthouse/standard_block_rewards.rs b/common/eth2/src/lighthouse/standard_block_rewards.rs index 502577500d9..15fcdc60667 100644 --- a/common/eth2/src/lighthouse/standard_block_rewards.rs +++ b/common/eth2/src/lighthouse/standard_block_rewards.rs @@ -5,22 +5,22 @@ use serde::{Deserialize, Serialize}; #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] pub struct StandardBlockReward { // proposer of the block, the proposer index who receives these rewards - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub proposer_index: u64, // total block reward in gwei, // equal to attestations + sync_aggregate + proposer_slashings + attester_slashings - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub total: u64, // block reward component due to included attestations in gwei - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub attestations: u64, // block reward component due to included sync_aggregate in gwei - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub sync_aggregate: u64, // block reward component due to included proposer_slashings in gwei - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub proposer_slashings: u64, // block reward component due to included attester_slashings in gwei - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub attester_slashings: u64, } diff --git a/common/eth2/src/lighthouse/sync_committee_rewards.rs b/common/eth2/src/lighthouse/sync_committee_rewards.rs index e215d8e3e0b..66a721dc229 100644 --- a/common/eth2/src/lighthouse/sync_committee_rewards.rs +++ b/common/eth2/src/lighthouse/sync_committee_rewards.rs @@ -5,9 +5,9 @@ use serde::{Deserialize, Serialize}; #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] pub struct SyncCommitteeReward { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, // sync committee reward in gwei for the validator - #[serde(with = "eth2_serde_utils::quoted_i64")] + #[serde(with = "serde_utils::quoted_i64")] pub reward: i64, } diff --git a/common/eth2/src/lighthouse_vc/http_client.rs b/common/eth2/src/lighthouse_vc/http_client.rs index 88b5b684019..e576cfcb363 100644 --- a/common/eth2/src/lighthouse_vc/http_client.rs +++ b/common/eth2/src/lighthouse_vc/http_client.rs @@ -57,7 +57,7 @@ pub fn parse_pubkey(secret: &str) -> Result, Error> { &secret[SECRET_PREFIX.len()..] }; - eth2_serde_utils::hex::decode(secret) + serde_utils::hex::decode(secret) .map_err(|e| Error::InvalidSecret(format!("invalid hex: {:?}", e))) .and_then(|bytes| { if bytes.len() != PK_LEN { @@ -174,7 +174,7 @@ impl ValidatorClientHttpClient { let message = Message::parse_slice(digest(&SHA256, &body).as_ref()).expect("sha256 is 32 bytes"); - eth2_serde_utils::hex::decode(&sig) + serde_utils::hex::decode(&sig) .ok() .and_then(|bytes| { let sig = Signature::parse_der(&bytes).ok()?; @@ -642,6 +642,30 @@ impl ValidatorClientHttpClient { let url = self.make_gas_limit_url(pubkey)?; self.delete_with_raw_response(url, &()).await } + + /// `POST /eth/v1/validator/{pubkey}/voluntary_exit` + pub async fn post_validator_voluntary_exit( + &self, + pubkey: &PublicKeyBytes, + epoch: Option, + ) -> Result { + let mut path = self.server.full.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("eth") + .push("v1") + .push("validator") + .push(&pubkey.to_string()) + .push("voluntary_exit"); + + if let Some(epoch) = epoch { + path.query_pairs_mut() + .append_pair("epoch", &epoch.to_string()); + } + + self.post(path, &()).await + } } /// Returns `Ok(response)` if the response is a `200 OK` response or a diff --git a/common/eth2/src/lighthouse_vc/std_types.rs b/common/eth2/src/lighthouse_vc/std_types.rs index 887bcb99ea6..0d67df47a9a 100644 --- a/common/eth2/src/lighthouse_vc/std_types.rs +++ b/common/eth2/src/lighthouse_vc/std_types.rs @@ -13,7 +13,7 @@ pub struct GetFeeRecipientResponse { #[derive(Debug, Deserialize, Serialize, PartialEq)] pub struct GetGasLimitResponse { pub pubkey: PublicKeyBytes, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub gas_limit: u64, } @@ -45,7 +45,7 @@ pub struct ImportKeystoresRequest { #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] #[serde(transparent)] -pub struct KeystoreJsonStr(#[serde(with = "eth2_serde_utils::json_str")] pub Keystore); +pub struct KeystoreJsonStr(#[serde(with = "serde_utils::json_str")] pub Keystore); impl std::ops::Deref for KeystoreJsonStr { type Target = Keystore; @@ -56,7 +56,7 @@ impl std::ops::Deref for KeystoreJsonStr { #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] #[serde(transparent)] -pub struct InterchangeJsonStr(#[serde(with = "eth2_serde_utils::json_str")] pub Interchange); +pub struct InterchangeJsonStr(#[serde(with = "serde_utils::json_str")] pub Interchange); #[derive(Debug, Deserialize, Serialize)] pub struct ImportKeystoresResponse { @@ -103,7 +103,7 @@ pub struct DeleteKeystoresRequest { #[derive(Debug, Deserialize, Serialize)] pub struct DeleteKeystoresResponse { pub data: Vec>, - #[serde(with = "eth2_serde_utils::json_str")] + #[serde(with = "serde_utils::json_str")] pub slashing_protection: Interchange, } diff --git a/common/eth2/src/lighthouse_vc/types.rs b/common/eth2/src/lighthouse_vc/types.rs index 92439337f61..dd2ed03221b 100644 --- a/common/eth2/src/lighthouse_vc/types.rs +++ b/common/eth2/src/lighthouse_vc/types.rs @@ -32,14 +32,14 @@ pub struct ValidatorRequest { #[serde(default)] #[serde(skip_serializing_if = "Option::is_none")] pub builder_proposals: Option, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub deposit_gwei: u64, } #[derive(Clone, PartialEq, Serialize, Deserialize)] pub struct CreateValidatorsMnemonicRequest { pub mnemonic: ZeroizeString, - #[serde(with = "eth2_serde_utils::quoted_u32")] + #[serde(with = "serde_utils::quoted_u32")] pub key_derivation_path_offset: u32, pub validators: Vec, } @@ -62,7 +62,7 @@ pub struct CreatedValidator { #[serde(skip_serializing_if = "Option::is_none")] pub builder_proposals: Option, pub eth1_deposit_tx_data: String, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub deposit_gwei: u64, } @@ -141,6 +141,11 @@ pub struct UpdateFeeRecipientRequest { #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] pub struct UpdateGasLimitRequest { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub gas_limit: u64, } + +#[derive(Deserialize)] +pub struct VoluntaryExitQuery { + pub epoch: Option, +} diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index b4218c361a3..5545bf45d78 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -82,10 +82,10 @@ impl std::fmt::Display for EndpointVersion { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct GenesisData { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub genesis_time: u64, pub genesis_validators_root: Hash256, - #[serde(with = "eth2_serde_utils::bytes_4_hex")] + #[serde(with = "serde_utils::bytes_4_hex")] pub genesis_fork_version: [u8; 4], } @@ -200,6 +200,14 @@ pub struct ExecutionOptimisticResponse { + pub execution_optimistic: Option, + pub finalized: Option, + pub data: T, +} + #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(bound = "T: Serialize + serde::de::DeserializeOwned")] pub struct GenericResponse { @@ -222,6 +230,18 @@ impl GenericResponse { data: self.data, } } + + pub fn add_execution_optimistic_finalized( + self, + execution_optimistic: bool, + finalized: bool, + ) -> ExecutionOptimisticFinalizedResponse { + ExecutionOptimisticFinalizedResponse { + execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), + data: self.data, + } + } } #[derive(Debug, PartialEq, Clone, Serialize)] @@ -296,9 +316,9 @@ impl fmt::Display for ValidatorId { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ValidatorData { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub index: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub balance: u64, pub status: ValidatorStatus, pub validator: Validator, @@ -306,9 +326,9 @@ pub struct ValidatorData { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ValidatorBalanceData { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub index: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub balance: u64, } @@ -471,16 +491,16 @@ pub struct ValidatorsQuery { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct CommitteeData { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub index: u64, pub slot: Slot, - #[serde(with = "eth2_serde_utils::quoted_u64_vec")] + #[serde(with = "serde_utils::quoted_u64_vec")] pub validators: Vec, } #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct SyncCommitteeByValidatorIndices { - #[serde(with = "eth2_serde_utils::quoted_u64_vec")] + #[serde(with = "serde_utils::quoted_u64_vec")] pub validators: Vec, pub validator_aggregates: Vec, } @@ -493,7 +513,7 @@ pub struct RandaoMix { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(transparent)] pub struct SyncSubcommittee { - #[serde(with = "eth2_serde_utils::quoted_u64_vec")] + #[serde(with = "serde_utils::quoted_u64_vec")] pub indices: Vec, } @@ -518,7 +538,7 @@ pub struct BlockHeaderData { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct DepositContractData { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub chain_id: u64, pub address: Address, } @@ -542,7 +562,7 @@ pub struct IdentityData { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct MetaData { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub seq_number: u64, pub attnets: String, pub syncnets: String, @@ -629,27 +649,27 @@ pub struct ValidatorBalancesQuery { #[derive(Clone, Serialize, Deserialize)] #[serde(transparent)] -pub struct ValidatorIndexData(#[serde(with = "eth2_serde_utils::quoted_u64_vec")] pub Vec); +pub struct ValidatorIndexData(#[serde(with = "serde_utils::quoted_u64_vec")] pub Vec); /// Borrowed variant of `ValidatorIndexData`, for serializing/sending. #[derive(Clone, Copy, Serialize)] #[serde(transparent)] pub struct ValidatorIndexDataRef<'a>( - #[serde(serialize_with = "eth2_serde_utils::quoted_u64_vec::serialize")] pub &'a [u64], + #[serde(serialize_with = "serde_utils::quoted_u64_vec::serialize")] pub &'a [u64], ); #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct AttesterData { pub pubkey: PublicKeyBytes, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub committees_at_slot: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub committee_index: CommitteeIndex, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub committee_length: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_committee_index: u64, pub slot: Slot, } @@ -657,7 +677,7 @@ pub struct AttesterData { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ProposerData { pub pubkey: PublicKeyBytes, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, pub slot: Slot, } @@ -706,11 +726,11 @@ pub struct ValidatorAggregateAttestationQuery { #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Hash)] pub struct BeaconCommitteeSubscription { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub committee_index: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub committees_at_slot: u64, pub slot: Slot, pub is_aggregator: bool, @@ -831,13 +851,13 @@ impl fmt::Display for PeerDirection { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct PeerCount { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub connected: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub connecting: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub disconnected: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub disconnecting: u64, } @@ -872,7 +892,7 @@ pub struct SseHead { #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] pub struct SseChainReorg { pub slot: Slot, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub depth: u64, pub old_head_block: Hash256, pub old_head_state: Hash256, @@ -905,7 +925,7 @@ pub struct SseLateHead { #[serde(untagged)] pub struct SsePayloadAttributes { #[superstruct(getter(copy))] - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub timestamp: u64, #[superstruct(getter(copy))] pub prev_randao: Hash256, @@ -918,9 +938,11 @@ pub struct SsePayloadAttributes { #[derive(PartialEq, Debug, Deserialize, Serialize, Clone)] pub struct SseExtendedPayloadAttributesGeneric { pub proposal_slot: Slot, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub proposer_index: u64, pub parent_block_root: Hash256, + #[serde(with = "serde_utils::quoted_u64")] + pub parent_block_number: u64, pub parent_block_hash: ExecutionBlockHash, pub payload_attributes: T, } @@ -958,6 +980,7 @@ impl ForkVersionDeserialize for SseExtendedPayloadAttributes { proposal_slot: helper.proposal_slot, proposer_index: helper.proposer_index, parent_block_root: helper.parent_block_root, + parent_block_number: helper.parent_block_number, parent_block_hash: helper.parent_block_hash, payload_attributes: SsePayloadAttributes::deserialize_by_fork::( helper.payload_attributes, @@ -1182,18 +1205,38 @@ fn parse_accept(accept: &str) -> Result, String> { #[derive(Debug, Serialize, Deserialize)] pub struct LivenessRequestData { pub epoch: Epoch, - #[serde(with = "eth2_serde_utils::quoted_u64_vec")] + #[serde(with = "serde_utils::quoted_u64_vec")] pub indices: Vec, } #[derive(PartialEq, Debug, Serialize, Deserialize)] pub struct LivenessResponseData { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub index: u64, pub epoch: Epoch, pub is_live: bool, } +#[derive(Debug, Serialize, Deserialize)] +pub struct ForkChoice { + pub justified_checkpoint: Checkpoint, + pub finalized_checkpoint: Checkpoint, + pub fork_choice_nodes: Vec, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] +pub struct ForkChoiceNode { + pub slot: Slot, + pub block_root: Hash256, + pub parent_root: Option, + pub justified_epoch: Option, + pub finalized_epoch: Option, + #[serde(with = "serde_utils::quoted_u64")] + pub weight: u64, + pub validity: Option, + pub execution_block_hash: Option, +} + #[cfg(test)] mod tests { use super::*; diff --git a/common/eth2_interop_keypairs/Cargo.toml b/common/eth2_interop_keypairs/Cargo.toml index 5f577bedc3c..7a376568eb8 100644 --- a/common/eth2_interop_keypairs/Cargo.toml +++ b/common/eth2_interop_keypairs/Cargo.toml @@ -9,7 +9,7 @@ edition = "2021" [dependencies] lazy_static = "1.4.0" num-bigint = "0.4.2" -eth2_hashing = "0.3.0" +ethereum_hashing = "1.0.0-beta.2" hex = "0.4.2" serde_yaml = "0.8.13" serde = "1.0.116" diff --git a/common/eth2_interop_keypairs/src/lib.rs b/common/eth2_interop_keypairs/src/lib.rs index eb26f563e0d..7b5fa7a8e4f 100644 --- a/common/eth2_interop_keypairs/src/lib.rs +++ b/common/eth2_interop_keypairs/src/lib.rs @@ -20,7 +20,7 @@ extern crate lazy_static; use bls::{Keypair, PublicKey, SecretKey}; -use eth2_hashing::hash; +use ethereum_hashing::hash; use num_bigint::BigUint; use serde_derive::{Deserialize, Serialize}; use std::convert::TryInto; diff --git a/common/eth2_network_config/Cargo.toml b/common/eth2_network_config/Cargo.toml index 6199005552a..f8382c95d36 100644 --- a/common/eth2_network_config/Cargo.toml +++ b/common/eth2_network_config/Cargo.toml @@ -16,6 +16,6 @@ tempfile = "3.1.0" [dependencies] serde_yaml = "0.8.13" types = { path = "../../consensus/types"} -eth2_ssz = "0.4.1" +ethereum_ssz = "0.5.0" eth2_config = { path = "../eth2_config"} -enr = { version = "0.6.2", features = ["ed25519", "k256"] } +discv5 = "0.2.2" diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml index 9d9852f6275..0bbf873a3fb 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml @@ -38,7 +38,7 @@ BELLATRIX_FORK_VERSION: 0x02000000 BELLATRIX_FORK_EPOCH: 144896 # Sept 6, 2022, 11:34:47am UTC # Capella CAPELLA_FORK_VERSION: 0x03000000 -CAPELLA_FORK_EPOCH: 18446744073709551615 +CAPELLA_FORK_EPOCH: 194048 # April 12, 2023, 10:27:35pm UTC # Sharding SHARDING_FORK_VERSION: 0x03000000 SHARDING_FORK_EPOCH: 18446744073709551615 diff --git a/common/eth2_network_config/src/lib.rs b/common/eth2_network_config/src/lib.rs index 7aef784373d..7274bbf029b 100644 --- a/common/eth2_network_config/src/lib.rs +++ b/common/eth2_network_config/src/lib.rs @@ -11,7 +11,7 @@ //! To add a new built-in testnet, add it to the `define_hardcoded_nets` invocation in the `eth2_config` //! crate. -use enr::{CombinedKey, Enr}; +use discv5::enr::{CombinedKey, Enr}; use eth2_config::{instantiate_hardcoded_nets, HardcodedNet}; use std::fs::{create_dir_all, File}; use std::io::{Read, Write}; diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index 10d1a8c3259..d30f45ca292 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v3.5.1-", - fallback = "Lighthouse/v3.5.1" + prefix = "Lighthouse/v4.1.0-", + fallback = "Lighthouse/v4.1.0" ); /// Returns `VERSION`, but with platform information appended to the end. diff --git a/common/lru_cache/src/time.rs b/common/lru_cache/src/time.rs index 1253ef1ecc6..7b8e9ba9a8a 100644 --- a/common/lru_cache/src/time.rs +++ b/common/lru_cache/src/time.rs @@ -160,6 +160,12 @@ where self.map.contains(key) } + /// Shrink the mappings to fit the current size. + pub fn shrink_to_fit(&mut self) { + self.map.shrink_to_fit(); + self.list.shrink_to_fit(); + } + #[cfg(test)] #[track_caller] fn check_invariant(&self) { diff --git a/common/slot_clock/src/lib.rs b/common/slot_clock/src/lib.rs index 183f5c9313d..1c8813ca2f2 100644 --- a/common/slot_clock/src/lib.rs +++ b/common/slot_clock/src/lib.rs @@ -104,12 +104,23 @@ pub trait SlotClock: Send + Sync + Sized + Clone { self.slot_duration() * 2 / INTERVALS_PER_SLOT as u32 } - /// Returns the `Duration` since the start of the current `Slot`. Useful in determining whether to apply proposer boosts. - fn seconds_from_current_slot_start(&self, seconds_per_slot: u64) -> Option { + /// Returns the `Duration` since the start of the current `Slot` at seconds precision. Useful in determining whether to apply proposer boosts. + fn seconds_from_current_slot_start(&self) -> Option { self.now_duration() .and_then(|now| now.checked_sub(self.genesis_duration())) .map(|duration_into_slot| { - Duration::from_secs(duration_into_slot.as_secs() % seconds_per_slot) + Duration::from_secs(duration_into_slot.as_secs() % self.slot_duration().as_secs()) + }) + } + + /// Returns the `Duration` since the start of the current `Slot` at milliseconds precision. + fn millis_from_current_slot_start(&self) -> Option { + self.now_duration() + .and_then(|now| now.checked_sub(self.genesis_duration())) + .map(|duration_into_slot| { + Duration::from_millis( + (duration_into_slot.as_millis() % self.slot_duration().as_millis()) as u64, + ) }) } diff --git a/common/unused_port/Cargo.toml b/common/unused_port/Cargo.toml index 06c1ca8f58e..2dd041ff07e 100644 --- a/common/unused_port/Cargo.toml +++ b/common/unused_port/Cargo.toml @@ -6,3 +6,6 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +lru_cache = { path = "../lru_cache" } +lazy_static = "1.4.0" +parking_lot = "0.12.0" diff --git a/common/unused_port/src/lib.rs b/common/unused_port/src/lib.rs index 4a8cf17380d..386f08a7390 100644 --- a/common/unused_port/src/lib.rs +++ b/common/unused_port/src/lib.rs @@ -1,4 +1,8 @@ -use std::net::{TcpListener, UdpSocket}; +use lazy_static::lazy_static; +use lru_cache::LRUTimeCache; +use parking_lot::Mutex; +use std::net::{SocketAddr, TcpListener, UdpSocket}; +use std::time::Duration; #[derive(Copy, Clone)] pub enum Transport { @@ -6,14 +10,37 @@ pub enum Transport { Udp, } -/// A convenience function for `unused_port(Transport::Tcp)`. -pub fn unused_tcp_port() -> Result { - unused_port(Transport::Tcp) +#[derive(Copy, Clone)] +pub enum IpVersion { + Ipv4, + Ipv6, +} + +pub const CACHED_PORTS_TTL: Duration = Duration::from_secs(300); + +lazy_static! { + static ref FOUND_PORTS_CACHE: Mutex> = + Mutex::new(LRUTimeCache::new(CACHED_PORTS_TTL)); +} + +/// A convenience wrapper over [`zero_port`]. +pub fn unused_tcp4_port() -> Result { + zero_port(Transport::Tcp, IpVersion::Ipv4) +} + +/// A convenience wrapper over [`zero_port`]. +pub fn unused_udp4_port() -> Result { + zero_port(Transport::Udp, IpVersion::Ipv4) } -/// A convenience function for `unused_port(Transport::Tcp)`. -pub fn unused_udp_port() -> Result { - unused_port(Transport::Udp) +/// A convenience wrapper over [`zero_port`]. +pub fn unused_tcp6_port() -> Result { + zero_port(Transport::Tcp, IpVersion::Ipv6) +} + +/// A convenience wrapper over [`zero_port`]. +pub fn unused_udp6_port() -> Result { + zero_port(Transport::Udp, IpVersion::Ipv6) } /// A bit of hack to find an unused port. @@ -26,10 +53,29 @@ pub fn unused_udp_port() -> Result { /// It is possible that users are unable to bind to the ports returned by this function as the OS /// has a buffer period where it doesn't allow binding to the same port even after the socket is /// closed. We might have to use SO_REUSEADDR socket option from `std::net2` crate in that case. -pub fn unused_port(transport: Transport) -> Result { +pub fn zero_port(transport: Transport, ipv: IpVersion) -> Result { + let localhost = match ipv { + IpVersion::Ipv4 => std::net::Ipv4Addr::LOCALHOST.into(), + IpVersion::Ipv6 => std::net::Ipv6Addr::LOCALHOST.into(), + }; + let socket_addr = std::net::SocketAddr::new(localhost, 0); + let mut unused_port: u16; + loop { + unused_port = find_unused_port(transport, socket_addr)?; + let mut cache_lock = FOUND_PORTS_CACHE.lock(); + if !cache_lock.contains(&unused_port) { + cache_lock.insert(unused_port); + break; + } + } + + Ok(unused_port) +} + +fn find_unused_port(transport: Transport, socket_addr: SocketAddr) -> Result { let local_addr = match transport { Transport::Tcp => { - let listener = TcpListener::bind("127.0.0.1:0").map_err(|e| { + let listener = TcpListener::bind(socket_addr).map_err(|e| { format!("Failed to create TCP listener to find unused port: {:?}", e) })?; listener.local_addr().map_err(|e| { @@ -40,7 +86,7 @@ pub fn unused_port(transport: Transport) -> Result { })? } Transport::Udp => { - let socket = UdpSocket::bind("127.0.0.1:0") + let socket = UdpSocket::bind(socket_addr) .map_err(|e| format!("Failed to create UDP socket to find unused port: {:?}", e))?; socket.local_addr().map_err(|e| { format!( diff --git a/common/validator_dir/Cargo.toml b/common/validator_dir/Cargo.toml index 0eba4cf2327..39a14e28377 100644 --- a/common/validator_dir/Cargo.toml +++ b/common/validator_dir/Cargo.toml @@ -16,7 +16,7 @@ filesystem = { path = "../filesystem" } types = { path = "../../consensus/types" } rand = "0.8.5" deposit_contract = { path = "../deposit_contract" } -tree_hash = "0.4.1" +tree_hash = "0.5.0" hex = "0.4.2" derivative = "2.1.1" lockfile = { path = "../lockfile" } diff --git a/consensus/cached_tree_hash/Cargo.toml b/consensus/cached_tree_hash/Cargo.toml index 0e0ef0707e2..c2856003bfd 100644 --- a/consensus/cached_tree_hash/Cargo.toml +++ b/consensus/cached_tree_hash/Cargo.toml @@ -6,11 +6,11 @@ edition = "2021" [dependencies] ethereum-types = "0.14.1" -eth2_ssz_types = "0.2.2" -eth2_hashing = "0.3.0" -eth2_ssz_derive = "0.3.1" -eth2_ssz = "0.4.1" -tree_hash = "0.4.1" +ssz_types = "0.5.0" +ethereum_hashing = "1.0.0-beta.2" +ethereum_ssz_derive = "0.5.0" +ethereum_ssz = "0.5.0" +tree_hash = "0.5.0" smallvec = "1.6.1" [dev-dependencies] diff --git a/consensus/cached_tree_hash/src/cache.rs b/consensus/cached_tree_hash/src/cache.rs index edb60f30600..3b4878503ea 100644 --- a/consensus/cached_tree_hash/src/cache.rs +++ b/consensus/cached_tree_hash/src/cache.rs @@ -1,7 +1,7 @@ use crate::cache_arena; use crate::SmallVec8; use crate::{Error, Hash256}; -use eth2_hashing::{hash32_concat, ZERO_HASHES}; +use ethereum_hashing::{hash32_concat, ZERO_HASHES}; use smallvec::smallvec; use ssz_derive::{Decode, Encode}; use tree_hash::BYTES_PER_CHUNK; diff --git a/consensus/cached_tree_hash/src/test.rs b/consensus/cached_tree_hash/src/test.rs index 244439ab30a..69b49826bf8 100644 --- a/consensus/cached_tree_hash/src/test.rs +++ b/consensus/cached_tree_hash/src/test.rs @@ -1,6 +1,6 @@ use crate::impls::hash256_iter; use crate::{CacheArena, CachedTreeHash, Error, Hash256, TreeHashCache}; -use eth2_hashing::ZERO_HASHES; +use ethereum_hashing::ZERO_HASHES; use quickcheck_macros::quickcheck; use ssz_types::{ typenum::{Unsigned, U16, U255, U256, U257}, diff --git a/consensus/fork_choice/Cargo.toml b/consensus/fork_choice/Cargo.toml index f0381e5ad99..3864d52d47c 100644 --- a/consensus/fork_choice/Cargo.toml +++ b/consensus/fork_choice/Cargo.toml @@ -10,8 +10,8 @@ edition = "2021" types = { path = "../types" } state_processing = { path = "../state_processing" } proto_array = { path = "../proto_array" } -eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.1" +ethereum_ssz = "0.5.0" +ethereum_ssz_derive = "0.5.0" slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_trace"] } [dev-dependencies] diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 916b1d5582b..e6c46e83e78 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1,7 +1,7 @@ use crate::{ForkChoiceStore, InvalidationOperation}; use proto_array::{ - Block as ProtoBlock, CountUnrealizedFull, ExecutionStatus, ProposerHeadError, ProposerHeadInfo, - ProtoArrayForkChoice, ReOrgThreshold, + Block as ProtoBlock, DisallowedReOrgOffsets, ExecutionStatus, ProposerHeadError, + ProposerHeadInfo, ProtoArrayForkChoice, ReOrgThreshold, }; use slog::{crit, debug, warn, Logger}; use ssz_derive::{Decode, Encode}; @@ -187,51 +187,6 @@ impl CountUnrealized { pub fn is_true(&self) -> bool { matches!(self, CountUnrealized::True) } - - pub fn and(&self, other: CountUnrealized) -> CountUnrealized { - if self.is_true() && other.is_true() { - CountUnrealized::True - } else { - CountUnrealized::False - } - } -} - -impl From for CountUnrealized { - fn from(count_unrealized: bool) -> Self { - if count_unrealized { - CountUnrealized::True - } else { - CountUnrealized::False - } - } -} - -#[derive(Copy, Clone)] -enum UpdateJustifiedCheckpointSlots { - OnTick { - current_slot: Slot, - }, - OnBlock { - state_slot: Slot, - current_slot: Slot, - }, -} - -impl UpdateJustifiedCheckpointSlots { - fn current_slot(&self) -> Slot { - match self { - UpdateJustifiedCheckpointSlots::OnTick { current_slot } => *current_slot, - UpdateJustifiedCheckpointSlots::OnBlock { current_slot, .. } => *current_slot, - } - } - - fn state_slot(&self) -> Option { - match self { - UpdateJustifiedCheckpointSlots::OnTick { .. } => None, - UpdateJustifiedCheckpointSlots::OnBlock { state_slot, .. } => Some(*state_slot), - } - } } /// Indicates if a block has been verified by an execution payload. @@ -393,7 +348,6 @@ where anchor_block: &SignedBeaconBlock, anchor_state: &BeaconState, current_slot: Option, - count_unrealized_full_config: CountUnrealizedFull, spec: &ChainSpec, ) -> Result> { // Sanity check: the anchor must lie on an epoch boundary. @@ -440,7 +394,6 @@ where current_epoch_shuffling_id, next_epoch_shuffling_id, execution_status, - count_unrealized_full_config, )?; let mut fork_choice = Self { @@ -533,7 +486,7 @@ where // Provide the slot (as per the system clock) to the `fc_store` and then return its view of // the current slot. The `fc_store` will ensure that the `current_slot` is never // decreasing, a property which we must maintain. - let current_slot = self.update_time(system_time_current_slot, spec)?; + let current_slot = self.update_time(system_time_current_slot)?; let store = &mut self.fc_store; @@ -580,6 +533,7 @@ where current_slot: Slot, canonical_head: Hash256, re_org_threshold: ReOrgThreshold, + disallowed_offsets: &DisallowedReOrgOffsets, max_epochs_since_finalization: Epoch, ) -> Result>> { // Ensure that fork choice has already been updated for the current slot. This prevents @@ -611,6 +565,7 @@ where canonical_head, self.fc_store.justified_balances(), re_org_threshold, + disallowed_offsets, max_epochs_since_finalization, ) .map_err(ProposerHeadError::convert_inner_error) @@ -620,6 +575,7 @@ where &self, canonical_head: Hash256, re_org_threshold: ReOrgThreshold, + disallowed_offsets: &DisallowedReOrgOffsets, max_epochs_since_finalization: Epoch, ) -> Result>> { let current_slot = self.fc_store.get_current_slot(); @@ -629,6 +585,7 @@ where canonical_head, self.fc_store.justified_balances(), re_org_threshold, + disallowed_offsets, max_epochs_since_finalization, ) .map_err(ProposerHeadError::convert_inner_error) @@ -654,58 +611,6 @@ where } } - /// Returns `true` if the given `store` should be updated to set - /// `state.current_justified_checkpoint` its `justified_checkpoint`. - /// - /// ## Specification - /// - /// Is equivalent to: - /// - /// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/fork-choice.md#should_update_justified_checkpoint - fn should_update_justified_checkpoint( - &mut self, - new_justified_checkpoint: Checkpoint, - slots: UpdateJustifiedCheckpointSlots, - spec: &ChainSpec, - ) -> Result> { - self.update_time(slots.current_slot(), spec)?; - - if compute_slots_since_epoch_start::(self.fc_store.get_current_slot()) - < spec.safe_slots_to_update_justified - { - return Ok(true); - } - - let justified_slot = - compute_start_slot_at_epoch::(self.fc_store.justified_checkpoint().epoch); - - // This sanity check is not in the spec, but the invariant is implied. - if let Some(state_slot) = slots.state_slot() { - if justified_slot >= state_slot { - return Err(Error::AttemptToRevertJustification { - store: justified_slot, - state: state_slot, - }); - } - } - - // We know that the slot for `new_justified_checkpoint.root` is not greater than - // `state.slot`, since a state cannot justify its own slot. - // - // We know that `new_justified_checkpoint.root` is an ancestor of `state`, since a `state` - // only ever justifies ancestors. - // - // A prior `if` statement protects against a justified_slot that is greater than - // `state.slot` - let justified_ancestor = - self.get_ancestor(new_justified_checkpoint.root, justified_slot)?; - if justified_ancestor != Some(self.fc_store.justified_checkpoint().root) { - return Ok(false); - } - - Ok(true) - } - /// See `ProtoArrayForkChoice::process_execution_payload_validation` for documentation. pub fn on_valid_execution_payload( &mut self, @@ -759,7 +664,7 @@ where // Provide the slot (as per the system clock) to the `fc_store` and then return its view of // the current slot. The `fc_store` will ensure that the `current_slot` is never // decreasing, a property which we must maintain. - let current_slot = self.update_time(system_time_current_slot, spec)?; + let current_slot = self.update_time(system_time_current_slot)?; // Parent block must be known. let parent_block = self @@ -814,17 +719,10 @@ where self.fc_store.set_proposer_boost_root(block_root); } - let update_justified_checkpoint_slots = UpdateJustifiedCheckpointSlots::OnBlock { - state_slot: state.slot(), - current_slot, - }; - // Update store with checkpoints if necessary self.update_checkpoints( state.current_justified_checkpoint(), state.finalized_checkpoint(), - update_justified_checkpoint_slots, - spec, )?; // Update unrealized justified/finalized checkpoints. @@ -905,11 +803,9 @@ where // If block is from past epochs, try to update store's justified & finalized checkpoints right away if block.slot().epoch(E::slots_per_epoch()) < current_slot.epoch(E::slots_per_epoch()) { - self.update_checkpoints( + self.pull_up_store_checkpoints( unrealized_justified_checkpoint, unrealized_finalized_checkpoint, - update_justified_checkpoint_slots, - spec, )?; } @@ -1004,29 +900,19 @@ where &mut self, justified_checkpoint: Checkpoint, finalized_checkpoint: Checkpoint, - slots: UpdateJustifiedCheckpointSlots, - spec: &ChainSpec, ) -> Result<(), Error> { // Update justified checkpoint. if justified_checkpoint.epoch > self.fc_store.justified_checkpoint().epoch { - if justified_checkpoint.epoch > self.fc_store.best_justified_checkpoint().epoch { - self.fc_store - .set_best_justified_checkpoint(justified_checkpoint); - } - if self.should_update_justified_checkpoint(justified_checkpoint, slots, spec)? { - self.fc_store - .set_justified_checkpoint(justified_checkpoint) - .map_err(Error::UnableToSetJustifiedCheckpoint)?; - } + self.fc_store + .set_justified_checkpoint(justified_checkpoint) + .map_err(Error::UnableToSetJustifiedCheckpoint)?; } // Update finalized checkpoint. if finalized_checkpoint.epoch > self.fc_store.finalized_checkpoint().epoch { self.fc_store.set_finalized_checkpoint(finalized_checkpoint); - self.fc_store - .set_justified_checkpoint(justified_checkpoint) - .map_err(Error::UnableToSetJustifiedCheckpoint)?; } + Ok(()) } @@ -1167,9 +1053,8 @@ where system_time_current_slot: Slot, attestation: &IndexedAttestation, is_from_block: AttestationFromBlock, - spec: &ChainSpec, ) -> Result<(), Error> { - self.update_time(system_time_current_slot, spec)?; + self.update_time(system_time_current_slot)?; // Ignore any attestations to the zero hash. // @@ -1230,16 +1115,12 @@ where /// Call `on_tick` for all slots between `fc_store.get_current_slot()` and the provided /// `current_slot`. Returns the value of `self.fc_store.get_current_slot`. - pub fn update_time( - &mut self, - current_slot: Slot, - spec: &ChainSpec, - ) -> Result> { + pub fn update_time(&mut self, current_slot: Slot) -> Result> { while self.fc_store.get_current_slot() < current_slot { let previous_slot = self.fc_store.get_current_slot(); // Note: we are relying upon `on_tick` to update `fc_store.time` to ensure we don't // get stuck in a loop. - self.on_tick(previous_slot + 1, spec)? + self.on_tick(previous_slot + 1)? } // Process any attestations that might now be eligible. @@ -1255,7 +1136,7 @@ where /// Equivalent to: /// /// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/fork-choice.md#on_tick - fn on_tick(&mut self, time: Slot, spec: &ChainSpec) -> Result<(), Error> { + fn on_tick(&mut self, time: Slot) -> Result<(), Error> { let store = &mut self.fc_store; let previous_slot = store.get_current_slot(); @@ -1283,28 +1164,29 @@ where return Ok(()); } - if store.best_justified_checkpoint().epoch > store.justified_checkpoint().epoch { - let store = &self.fc_store; - if self.is_finalized_checkpoint_or_descendant(store.best_justified_checkpoint().root) { - let store = &mut self.fc_store; - store - .set_justified_checkpoint(*store.best_justified_checkpoint()) - .map_err(Error::ForkChoiceStoreError)?; - } - } - - // Update store.justified_checkpoint if a better unrealized justified checkpoint is known + // Update the justified/finalized checkpoints based upon the + // best-observed unrealized justification/finality. let unrealized_justified_checkpoint = *self.fc_store.unrealized_justified_checkpoint(); let unrealized_finalized_checkpoint = *self.fc_store.unrealized_finalized_checkpoint(); - self.update_checkpoints( + self.pull_up_store_checkpoints( unrealized_justified_checkpoint, unrealized_finalized_checkpoint, - UpdateJustifiedCheckpointSlots::OnTick { current_slot }, - spec, )?; + Ok(()) } + fn pull_up_store_checkpoints( + &mut self, + unrealized_justified_checkpoint: Checkpoint, + unrealized_finalized_checkpoint: Checkpoint, + ) -> Result<(), Error> { + self.update_checkpoints( + unrealized_justified_checkpoint, + unrealized_finalized_checkpoint, + ) + } + /// Processes and removes from the queue any queued attestations which may now be eligible for /// processing due to the slot clock incrementing. fn process_attestation_queue(&mut self) -> Result<(), Error> { @@ -1468,16 +1350,6 @@ where *self.fc_store.justified_checkpoint() } - /// Return the best justified checkpoint. - /// - /// ## Warning - /// - /// This is distinct to the "justified checkpoint" or the "current justified checkpoint". This - /// "best justified checkpoint" value should only be used internally or for testing. - pub fn best_justified_checkpoint(&self) -> Checkpoint { - *self.fc_store.best_justified_checkpoint() - } - pub fn unrealized_justified_checkpoint(&self) -> Checkpoint { *self.fc_store.unrealized_justified_checkpoint() } @@ -1538,13 +1410,11 @@ where pub fn proto_array_from_persisted( persisted: &PersistedForkChoice, reset_payload_statuses: ResetPayloadStatuses, - count_unrealized_full: CountUnrealizedFull, spec: &ChainSpec, log: &Logger, ) -> Result> { - let mut proto_array = - ProtoArrayForkChoice::from_bytes(&persisted.proto_array_bytes, count_unrealized_full) - .map_err(Error::InvalidProtoArrayBytes)?; + let mut proto_array = ProtoArrayForkChoice::from_bytes(&persisted.proto_array_bytes) + .map_err(Error::InvalidProtoArrayBytes)?; let contains_invalid_payloads = proto_array.contains_invalid_payloads(); debug!( @@ -1575,7 +1445,7 @@ where "error" => e, "info" => "please report this error", ); - ProtoArrayForkChoice::from_bytes(&persisted.proto_array_bytes, count_unrealized_full) + ProtoArrayForkChoice::from_bytes(&persisted.proto_array_bytes) .map_err(Error::InvalidProtoArrayBytes) } else { debug!( @@ -1592,17 +1462,11 @@ where persisted: PersistedForkChoice, reset_payload_statuses: ResetPayloadStatuses, fc_store: T, - count_unrealized_full: CountUnrealizedFull, spec: &ChainSpec, log: &Logger, ) -> Result> { - let proto_array = Self::proto_array_from_persisted( - &persisted, - reset_payload_statuses, - count_unrealized_full, - spec, - log, - )?; + let proto_array = + Self::proto_array_from_persisted(&persisted, reset_payload_statuses, spec, log)?; let current_slot = fc_store.get_current_slot(); diff --git a/consensus/fork_choice/src/fork_choice_store.rs b/consensus/fork_choice/src/fork_choice_store.rs index 9500b1c7da8..320f10141d9 100644 --- a/consensus/fork_choice/src/fork_choice_store.rs +++ b/consensus/fork_choice/src/fork_choice_store.rs @@ -47,9 +47,6 @@ pub trait ForkChoiceStore: Sized { /// Returns balances from the `state` identified by `justified_checkpoint.root`. fn justified_balances(&self) -> &JustifiedBalances; - /// Returns the `best_justified_checkpoint`. - fn best_justified_checkpoint(&self) -> &Checkpoint; - /// Returns the `finalized_checkpoint`. fn finalized_checkpoint(&self) -> &Checkpoint; @@ -68,9 +65,6 @@ pub trait ForkChoiceStore: Sized { /// Sets the `justified_checkpoint`. fn set_justified_checkpoint(&mut self, checkpoint: Checkpoint) -> Result<(), Self::Error>; - /// Sets the `best_justified_checkpoint`. - fn set_best_justified_checkpoint(&mut self, checkpoint: Checkpoint); - /// Sets the `unrealized_justified_checkpoint`. fn set_unrealized_justified_checkpoint(&mut self, checkpoint: Checkpoint); diff --git a/consensus/fork_choice/src/lib.rs b/consensus/fork_choice/src/lib.rs index b307c66d885..397a2ff8930 100644 --- a/consensus/fork_choice/src/lib.rs +++ b/consensus/fork_choice/src/lib.rs @@ -7,6 +7,4 @@ pub use crate::fork_choice::{ PersistedForkChoice, QueuedAttestation, ResetPayloadStatuses, }; pub use fork_choice_store::ForkChoiceStore; -pub use proto_array::{ - Block as ProtoBlock, CountUnrealizedFull, ExecutionStatus, InvalidationOperation, -}; +pub use proto_array::{Block as ProtoBlock, ExecutionStatus, InvalidationOperation}; diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 00bd1f763dc..82bf642f180 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -104,16 +104,6 @@ impl ForkChoiceTest { self } - /// Assert the epochs match. - pub fn assert_best_justified_epoch(self, epoch: u64) -> Self { - assert_eq!( - self.get(|fc_store| fc_store.best_justified_checkpoint().epoch), - Epoch::new(epoch), - "best_justified_epoch" - ); - self - } - /// Assert the given slot is greater than the head slot. pub fn assert_finalized_epoch_is_less_than(self, epoch: Epoch) -> Self { assert!(self.harness.finalized_checkpoint().epoch < epoch); @@ -151,7 +141,7 @@ impl ForkChoiceTest { .chain .canonical_head .fork_choice_write_lock() - .update_time(self.harness.chain.slot().unwrap(), &self.harness.spec) + .update_time(self.harness.chain.slot().unwrap()) .unwrap(); func( self.harness @@ -241,6 +231,11 @@ impl ForkChoiceTest { /// /// If the chain is presently in an unsafe period, transition through it and the following safe /// period. + /// + /// Note: the `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` variable has been removed + /// from the fork choice spec in Q1 2023. We're still leaving references to + /// it in our tests because (a) it's easier and (b) it allows us to easily + /// test for the absence of that parameter. pub fn move_to_next_unsafe_period(self) -> Self { self.move_inside_safe_to_update() .move_outside_safe_to_update() @@ -534,7 +529,6 @@ async fn justified_checkpoint_updates_with_descendent_outside_safe_slots() { .unwrap() .move_outside_safe_to_update() .assert_justified_epoch(2) - .assert_best_justified_epoch(2) .apply_blocks(1) .await .assert_justified_epoch(3); @@ -551,11 +545,9 @@ async fn justified_checkpoint_updates_first_justification_outside_safe_to_update .unwrap() .move_to_next_unsafe_period() .assert_justified_epoch(0) - .assert_best_justified_epoch(0) .apply_blocks(1) .await - .assert_justified_epoch(2) - .assert_best_justified_epoch(2); + .assert_justified_epoch(2); } /// - The new justified checkpoint **does not** descend from the current. @@ -583,8 +575,7 @@ async fn justified_checkpoint_updates_with_non_descendent_inside_safe_slots_with .unwrap(); }) .await - .assert_justified_epoch(3) - .assert_best_justified_epoch(3); + .assert_justified_epoch(3); } /// - The new justified checkpoint **does not** descend from the current. @@ -612,8 +603,9 @@ async fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_wit .unwrap(); }) .await - .assert_justified_epoch(2) - .assert_best_justified_epoch(3); + // Now that `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` has been removed, the new + // block should have updated the justified checkpoint. + .assert_justified_epoch(3); } /// - The new justified checkpoint **does not** descend from the current. @@ -641,8 +633,7 @@ async fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_wit .unwrap(); }) .await - .assert_justified_epoch(3) - .assert_best_justified_epoch(3); + .assert_justified_epoch(3); } /// Check that the balances are obtained correctly. diff --git a/consensus/merkle_proof/Cargo.toml b/consensus/merkle_proof/Cargo.toml index 2c0dbf1a758..2b883f8646e 100644 --- a/consensus/merkle_proof/Cargo.toml +++ b/consensus/merkle_proof/Cargo.toml @@ -6,7 +6,7 @@ edition = "2021" [dependencies] ethereum-types = "0.14.1" -eth2_hashing = "0.3.0" +ethereum_hashing = "1.0.0-beta.2" lazy_static = "1.4.0" safe_arith = { path = "../safe_arith" } diff --git a/consensus/merkle_proof/src/lib.rs b/consensus/merkle_proof/src/lib.rs index 887deb1efd6..dc3de71cefd 100644 --- a/consensus/merkle_proof/src/lib.rs +++ b/consensus/merkle_proof/src/lib.rs @@ -1,4 +1,4 @@ -use eth2_hashing::{hash, hash32_concat, ZERO_HASHES}; +use ethereum_hashing::{hash, hash32_concat, ZERO_HASHES}; use ethereum_types::H256; use lazy_static::lazy_static; use safe_arith::ArithError; diff --git a/consensus/proto_array/Cargo.toml b/consensus/proto_array/Cargo.toml index 205ef8f5210..cd43c566f00 100644 --- a/consensus/proto_array/Cargo.toml +++ b/consensus/proto_array/Cargo.toml @@ -10,8 +10,8 @@ path = "src/bin.rs" [dependencies] types = { path = "../types" } -eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.1" +ethereum_ssz = "0.5.0" +ethereum_ssz_derive = "0.5.0" serde = "1.0.116" serde_derive = "1.0.116" serde_yaml = "0.8.13" diff --git a/consensus/proto_array/src/error.rs b/consensus/proto_array/src/error.rs index c55739da792..1fe45fd0f10 100644 --- a/consensus/proto_array/src/error.rs +++ b/consensus/proto_array/src/error.rs @@ -50,6 +50,7 @@ pub enum Error { block_root: Hash256, parent_root: Hash256, }, + InvalidEpochOffset(u64), Arith(ArithError), } diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index 68b3fb71981..157f072ad37 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -3,7 +3,6 @@ mod ffg_updates; mod no_votes; mod votes; -use crate::proto_array::CountUnrealizedFull; use crate::proto_array_fork_choice::{Block, ExecutionStatus, ProtoArrayForkChoice}; use crate::{InvalidationOperation, JustifiedBalances}; use serde_derive::{Deserialize, Serialize}; @@ -88,7 +87,6 @@ impl ForkChoiceTestDefinition { junk_shuffling_id.clone(), junk_shuffling_id, ExecutionStatus::Optimistic(ExecutionBlockHash::zero()), - CountUnrealizedFull::default(), ) .expect("should create fork choice struct"); let equivocating_indices = BTreeSet::new(); @@ -307,8 +305,8 @@ fn get_checkpoint(i: u64) -> Checkpoint { fn check_bytes_round_trip(original: &ProtoArrayForkChoice) { let bytes = original.as_bytes(); - let decoded = ProtoArrayForkChoice::from_bytes(&bytes, CountUnrealizedFull::default()) - .expect("fork choice should decode from bytes"); + let decoded = + ProtoArrayForkChoice::from_bytes(&bytes).expect("fork choice should decode from bytes"); assert!( *original == decoded, "fork choice should encode and decode without change" diff --git a/consensus/proto_array/src/justified_balances.rs b/consensus/proto_array/src/justified_balances.rs index 75f6c2f7c80..c8787817f1a 100644 --- a/consensus/proto_array/src/justified_balances.rs +++ b/consensus/proto_array/src/justified_balances.rs @@ -24,7 +24,7 @@ impl JustifiedBalances { .validators() .iter() .map(|validator| { - if validator.is_active_at(current_epoch) { + if !validator.slashed && validator.is_active_at(current_epoch) { total_effective_balance.safe_add_assign(validator.effective_balance)?; num_active_validators.safe_add_assign(1)?; diff --git a/consensus/proto_array/src/lib.rs b/consensus/proto_array/src/lib.rs index f2b29e1c7b2..481daba47e4 100644 --- a/consensus/proto_array/src/lib.rs +++ b/consensus/proto_array/src/lib.rs @@ -6,12 +6,10 @@ mod proto_array_fork_choice; mod ssz_container; pub use crate::justified_balances::JustifiedBalances; -pub use crate::proto_array::{ - calculate_committee_fraction, CountUnrealizedFull, InvalidationOperation, -}; +pub use crate::proto_array::{calculate_committee_fraction, InvalidationOperation}; pub use crate::proto_array_fork_choice::{ - Block, DoNotReOrg, ExecutionStatus, ProposerHeadError, ProposerHeadInfo, ProtoArrayForkChoice, - ReOrgThreshold, + Block, DisallowedReOrgOffsets, DoNotReOrg, ExecutionStatus, ProposerHeadError, + ProposerHeadInfo, ProtoArrayForkChoice, ReOrgThreshold, }; pub use error::Error; diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index bf50c080261..2c19206cb75 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -118,24 +118,6 @@ impl Default for ProposerBoost { } } -/// Indicate whether we should strictly count unrealized justification/finalization votes. -#[derive(Default, PartialEq, Eq, Debug, Serialize, Deserialize, Copy, Clone)] -pub enum CountUnrealizedFull { - True, - #[default] - False, -} - -impl From for CountUnrealizedFull { - fn from(b: bool) -> Self { - if b { - CountUnrealizedFull::True - } else { - CountUnrealizedFull::False - } - } -} - #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] pub struct ProtoArray { /// Do not attempt to prune the tree unless it has at least this many nodes. Small prunes @@ -146,7 +128,6 @@ pub struct ProtoArray { pub nodes: Vec, pub indices: HashMap, pub previous_proposer_boost: ProposerBoost, - pub count_unrealized_full: CountUnrealizedFull, } impl ProtoArray { @@ -684,9 +665,9 @@ impl ProtoArray { start_root: *justified_root, justified_checkpoint: self.justified_checkpoint, finalized_checkpoint: self.finalized_checkpoint, - head_root: justified_node.root, - head_justified_checkpoint: justified_node.justified_checkpoint, - head_finalized_checkpoint: justified_node.finalized_checkpoint, + head_root: best_node.root, + head_justified_checkpoint: best_node.justified_checkpoint, + head_finalized_checkpoint: best_node.finalized_checkpoint, }))); } @@ -900,55 +881,44 @@ impl ProtoArray { } let genesis_epoch = Epoch::new(0); - - let checkpoint_match_predicate = - |node_justified_checkpoint: Checkpoint, node_finalized_checkpoint: Checkpoint| { - let correct_justified = node_justified_checkpoint == self.justified_checkpoint - || self.justified_checkpoint.epoch == genesis_epoch; - let correct_finalized = node_finalized_checkpoint == self.finalized_checkpoint - || self.finalized_checkpoint.epoch == genesis_epoch; - correct_justified && correct_finalized + let current_epoch = current_slot.epoch(E::slots_per_epoch()); + let node_epoch = node.slot.epoch(E::slots_per_epoch()); + let node_justified_checkpoint = + if let Some(justified_checkpoint) = node.justified_checkpoint { + justified_checkpoint + } else { + // The node does not have any information about the justified + // checkpoint. This indicates an inconsistent proto-array. + return false; }; - if let ( - Some(unrealized_justified_checkpoint), - Some(unrealized_finalized_checkpoint), - Some(justified_checkpoint), - Some(finalized_checkpoint), - ) = ( - node.unrealized_justified_checkpoint, - node.unrealized_finalized_checkpoint, - node.justified_checkpoint, - node.finalized_checkpoint, - ) { - let current_epoch = current_slot.epoch(E::slots_per_epoch()); + let voting_source = if current_epoch > node_epoch { + // The block is from a prior epoch, the voting source will be pulled-up. + node.unrealized_justified_checkpoint + // Sometimes we don't track the unrealized justification. In + // that case, just use the fully-realized justified checkpoint. + .unwrap_or(node_justified_checkpoint) + } else { + // The block is not from a prior epoch, therefore the voting source + // is not pulled up. + node_justified_checkpoint + }; - // If previous epoch is justified, pull up all tips to at least the previous epoch - if CountUnrealizedFull::True == self.count_unrealized_full - && (current_epoch > genesis_epoch - && self.justified_checkpoint.epoch + 1 == current_epoch) - { - unrealized_justified_checkpoint.epoch + 1 >= current_epoch - // If previous epoch is not justified, pull up only tips from past epochs up to the current epoch - } else { - // If block is from a previous epoch, filter using unrealized justification & finalization information - if node.slot.epoch(E::slots_per_epoch()) < current_epoch { - checkpoint_match_predicate( - unrealized_justified_checkpoint, - unrealized_finalized_checkpoint, - ) - // If block is from the current epoch, filter using the head state's justification & finalization information - } else { - checkpoint_match_predicate(justified_checkpoint, finalized_checkpoint) - } + let mut correct_justified = self.justified_checkpoint.epoch == genesis_epoch + || voting_source.epoch == self.justified_checkpoint.epoch; + + if let Some(node_unrealized_justified_checkpoint) = node.unrealized_justified_checkpoint { + if !correct_justified && self.justified_checkpoint.epoch + 1 == current_epoch { + correct_justified = node_unrealized_justified_checkpoint.epoch + >= self.justified_checkpoint.epoch + && voting_source.epoch + 2 >= current_epoch; } - } else if let (Some(justified_checkpoint), Some(finalized_checkpoint)) = - (node.justified_checkpoint, node.finalized_checkpoint) - { - checkpoint_match_predicate(justified_checkpoint, finalized_checkpoint) - } else { - false } + + let correct_finalized = self.finalized_checkpoint.epoch == genesis_epoch + || self.is_finalized_checkpoint_or_descendant::(node.root); + + correct_justified && correct_finalized } /// Return a reverse iterator over the nodes which comprise the chain ending at `block_root`. diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 0e0d806e76e..d376e62e8f6 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -1,8 +1,8 @@ use crate::{ error::Error, proto_array::{ - calculate_committee_fraction, CountUnrealizedFull, InvalidationOperation, Iter, - ProposerBoost, ProtoArray, ProtoNode, + calculate_committee_fraction, InvalidationOperation, Iter, ProposerBoost, ProtoArray, + ProtoNode, }, ssz_container::SszContainer, JustifiedBalances, @@ -10,7 +10,10 @@ use crate::{ use serde_derive::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; -use std::collections::{BTreeSet, HashMap}; +use std::{ + collections::{BTreeSet, HashMap}, + fmt, +}; use types::{ AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, Slot, @@ -125,6 +128,17 @@ impl ExecutionStatus { } } +impl fmt::Display for ExecutionStatus { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ExecutionStatus::Valid(_) => write!(f, "valid"), + ExecutionStatus::Invalid(_) => write!(f, "invalid"), + ExecutionStatus::Optimistic(_) => write!(f, "optimistic"), + ExecutionStatus::Irrelevant(_) => write!(f, "irrelevant"), + } + } +} + /// A block that is to be applied to the fork choice. /// /// A simplified version of `types::BeaconBlock`. @@ -236,6 +250,9 @@ pub enum DoNotReOrg { ParentDistance, HeadDistance, ShufflingUnstable, + DisallowedOffset { + offset: u64, + }, JustificationAndFinalizationNotCompetitive, ChainNotFinalizing { epochs_since_finalization: u64, @@ -257,6 +274,9 @@ impl std::fmt::Display for DoNotReOrg { Self::ParentDistance => write!(f, "parent too far from head"), Self::HeadDistance => write!(f, "head too far from current slot"), Self::ShufflingUnstable => write!(f, "shuffling unstable at epoch boundary"), + Self::DisallowedOffset { offset } => { + write!(f, "re-orgs disabled at offset {offset}") + } Self::JustificationAndFinalizationNotCompetitive => { write!(f, "justification or finalization not competitive") } @@ -290,6 +310,31 @@ impl std::fmt::Display for DoNotReOrg { #[serde(transparent)] pub struct ReOrgThreshold(pub u64); +/// New-type for disallowed re-org slots. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(transparent)] +pub struct DisallowedReOrgOffsets { + // Vecs are faster than hashmaps for small numbers of items. + offsets: Vec, +} + +impl Default for DisallowedReOrgOffsets { + fn default() -> Self { + DisallowedReOrgOffsets { offsets: vec![0] } + } +} + +impl DisallowedReOrgOffsets { + pub fn new(offsets: Vec) -> Result { + for &offset in &offsets { + if offset >= E::slots_per_epoch() { + return Err(Error::InvalidEpochOffset(offset)); + } + } + Ok(Self { offsets }) + } +} + #[derive(PartialEq)] pub struct ProtoArrayForkChoice { pub(crate) proto_array: ProtoArray, @@ -307,7 +352,6 @@ impl ProtoArrayForkChoice { current_epoch_shuffling_id: AttestationShufflingId, next_epoch_shuffling_id: AttestationShufflingId, execution_status: ExecutionStatus, - count_unrealized_full: CountUnrealizedFull, ) -> Result { let mut proto_array = ProtoArray { prune_threshold: DEFAULT_PRUNE_THRESHOLD, @@ -316,7 +360,6 @@ impl ProtoArrayForkChoice { nodes: Vec::with_capacity(1), indices: HashMap::with_capacity(1), previous_proposer_boost: ProposerBoost::default(), - count_unrealized_full, }; let block = Block { @@ -448,6 +491,7 @@ impl ProtoArrayForkChoice { canonical_head: Hash256, justified_balances: &JustifiedBalances, re_org_threshold: ReOrgThreshold, + disallowed_offsets: &DisallowedReOrgOffsets, max_epochs_since_finalization: Epoch, ) -> Result> { let info = self.get_proposer_head_info::( @@ -455,6 +499,7 @@ impl ProtoArrayForkChoice { canonical_head, justified_balances, re_org_threshold, + disallowed_offsets, max_epochs_since_finalization, )?; @@ -489,6 +534,7 @@ impl ProtoArrayForkChoice { canonical_head: Hash256, justified_balances: &JustifiedBalances, re_org_threshold: ReOrgThreshold, + disallowed_offsets: &DisallowedReOrgOffsets, max_epochs_since_finalization: Epoch, ) -> Result> { let mut nodes = self @@ -533,6 +579,12 @@ impl ProtoArrayForkChoice { return Err(DoNotReOrg::ShufflingUnstable.into()); } + // Check allowed slot offsets. + let offset = (re_org_block_slot % E::slots_per_epoch()).as_u64(); + if disallowed_offsets.offsets.contains(&offset) { + return Err(DoNotReOrg::DisallowedOffset { offset }.into()); + } + // Check FFG. let ffg_competitive = parent_node.unrealized_justified_checkpoint == head_node.unrealized_justified_checkpoint @@ -780,13 +832,10 @@ impl ProtoArrayForkChoice { SszContainer::from(self).as_ssz_bytes() } - pub fn from_bytes( - bytes: &[u8], - count_unrealized_full: CountUnrealizedFull, - ) -> Result { + pub fn from_bytes(bytes: &[u8]) -> Result { let container = SszContainer::from_ssz_bytes(bytes) .map_err(|e| format!("Failed to decode ProtoArrayForkChoice: {:?}", e))?; - (container, count_unrealized_full) + container .try_into() .map_err(|e| format!("Failed to initialize ProtoArrayForkChoice: {e:?}")) } @@ -950,7 +999,6 @@ mod test_compute_deltas { junk_shuffling_id.clone(), junk_shuffling_id.clone(), execution_status, - CountUnrealizedFull::default(), ) .unwrap(); @@ -1076,7 +1124,6 @@ mod test_compute_deltas { junk_shuffling_id.clone(), junk_shuffling_id.clone(), execution_status, - CountUnrealizedFull::default(), ) .unwrap(); diff --git a/consensus/proto_array/src/ssz_container.rs b/consensus/proto_array/src/ssz_container.rs index 1a20ef967ad..ed1efaae1af 100644 --- a/consensus/proto_array/src/ssz_container.rs +++ b/consensus/proto_array/src/ssz_container.rs @@ -1,6 +1,6 @@ use crate::proto_array::ProposerBoost; use crate::{ - proto_array::{CountUnrealizedFull, ProtoArray, ProtoNode}, + proto_array::{ProtoArray, ProtoNode}, proto_array_fork_choice::{ElasticList, ProtoArrayForkChoice, VoteTracker}, Error, JustifiedBalances, }; @@ -43,12 +43,10 @@ impl From<&ProtoArrayForkChoice> for SszContainer { } } -impl TryFrom<(SszContainer, CountUnrealizedFull)> for ProtoArrayForkChoice { +impl TryFrom for ProtoArrayForkChoice { type Error = Error; - fn try_from( - (from, count_unrealized_full): (SszContainer, CountUnrealizedFull), - ) -> Result { + fn try_from(from: SszContainer) -> Result { let proto_array = ProtoArray { prune_threshold: from.prune_threshold, justified_checkpoint: from.justified_checkpoint, @@ -56,7 +54,6 @@ impl TryFrom<(SszContainer, CountUnrealizedFull)> for ProtoArrayForkChoice { nodes: from.nodes, indices: from.indices.into_iter().collect::>(), previous_proposer_boost: from.previous_proposer_boost, - count_unrealized_full, }; Ok(Self { diff --git a/consensus/serde_utils/Cargo.toml b/consensus/serde_utils/Cargo.toml deleted file mode 100644 index d4ba02765fb..00000000000 --- a/consensus/serde_utils/Cargo.toml +++ /dev/null @@ -1,14 +0,0 @@ -[package] -name = "eth2_serde_utils" -version = "0.1.1" -authors = ["Paul Hauner "] -edition = "2021" -description = "Serialization and deserialization utilities useful for JSON representations of Ethereum 2.0 types." -license = "Apache-2.0" - -[dependencies] -serde = { version = "1.0.116", features = ["derive"] } -serde_derive = "1.0.116" -serde_json = "1.0.58" -hex = "0.4.2" -ethereum-types = "0.14.1" diff --git a/consensus/serde_utils/src/fixed_bytes_hex.rs b/consensus/serde_utils/src/fixed_bytes_hex.rs deleted file mode 100644 index 4e9dc98aca8..00000000000 --- a/consensus/serde_utils/src/fixed_bytes_hex.rs +++ /dev/null @@ -1,52 +0,0 @@ -//! Formats `[u8; n]` as a 0x-prefixed hex string. -//! -//! E.g., `[0, 1, 2, 3]` serializes as `"0x00010203"`. - -use crate::hex::PrefixedHexVisitor; -use serde::de::Error; -use serde::{Deserializer, Serializer}; - -macro_rules! bytes_hex { - ($num_bytes: tt) => { - use super::*; - - const BYTES_LEN: usize = $num_bytes; - - pub fn serialize(bytes: &[u8; BYTES_LEN], serializer: S) -> Result - where - S: Serializer, - { - let mut hex_string: String = "0x".to_string(); - hex_string.push_str(&hex::encode(&bytes)); - - serializer.serialize_str(&hex_string) - } - - pub fn deserialize<'de, D>(deserializer: D) -> Result<[u8; BYTES_LEN], D::Error> - where - D: Deserializer<'de>, - { - let decoded = deserializer.deserialize_str(PrefixedHexVisitor)?; - - if decoded.len() != BYTES_LEN { - return Err(D::Error::custom(format!( - "expected {} bytes for array, got {}", - BYTES_LEN, - decoded.len() - ))); - } - - let mut array = [0; BYTES_LEN]; - array.copy_from_slice(&decoded); - Ok(array) - } - }; -} - -pub mod bytes_4_hex { - bytes_hex!(4); -} - -pub mod bytes_8_hex { - bytes_hex!(8); -} diff --git a/consensus/serde_utils/src/hex.rs b/consensus/serde_utils/src/hex.rs deleted file mode 100644 index 9a2cd65c764..00000000000 --- a/consensus/serde_utils/src/hex.rs +++ /dev/null @@ -1,77 +0,0 @@ -//! Provides utilities for parsing 0x-prefixed hex strings. - -use serde::de::{self, Visitor}; -use std::fmt; - -/// Encode `data` as a 0x-prefixed hex string. -pub fn encode>(data: T) -> String { - let hex = hex::encode(data); - - let mut s = "0x".to_string(); - s.push_str(hex.as_str()); - s -} - -/// Decode `data` from a 0x-prefixed hex string. -pub fn decode(s: &str) -> Result, String> { - if let Some(stripped) = s.strip_prefix("0x") { - hex::decode(stripped).map_err(|e| format!("invalid hex: {:?}", e)) - } else { - Err("hex must have 0x prefix".to_string()) - } -} - -pub struct PrefixedHexVisitor; - -impl<'de> Visitor<'de> for PrefixedHexVisitor { - type Value = Vec; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("a hex string with 0x prefix") - } - - fn visit_str(self, value: &str) -> Result - where - E: de::Error, - { - decode(value).map_err(de::Error::custom) - } -} - -pub struct HexVisitor; - -impl<'de> Visitor<'de> for HexVisitor { - type Value = Vec; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("a hex string (irrelevant of prefix)") - } - - fn visit_str(self, value: &str) -> Result - where - E: de::Error, - { - hex::decode(value.trim_start_matches("0x")) - .map_err(|e| de::Error::custom(format!("invalid hex ({:?})", e))) - } -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn encoding() { - let bytes = vec![0, 255]; - let hex = encode(bytes); - assert_eq!(hex.as_str(), "0x00ff"); - - let bytes = vec![]; - let hex = encode(bytes); - assert_eq!(hex.as_str(), "0x"); - - let bytes = vec![1, 2, 3]; - let hex = encode(bytes); - assert_eq!(hex.as_str(), "0x010203"); - } -} diff --git a/consensus/serde_utils/src/hex_vec.rs b/consensus/serde_utils/src/hex_vec.rs deleted file mode 100644 index f7f4833628c..00000000000 --- a/consensus/serde_utils/src/hex_vec.rs +++ /dev/null @@ -1,23 +0,0 @@ -//! Formats `Vec` as a 0x-prefixed hex string. -//! -//! E.g., `vec![0, 1, 2, 3]` serializes as `"0x00010203"`. - -use crate::hex::PrefixedHexVisitor; -use serde::{Deserializer, Serializer}; - -pub fn serialize(bytes: &[u8], serializer: S) -> Result -where - S: Serializer, -{ - let mut hex_string: String = "0x".to_string(); - hex_string.push_str(&hex::encode(bytes)); - - serializer.serialize_str(&hex_string) -} - -pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> -where - D: Deserializer<'de>, -{ - deserializer.deserialize_str(PrefixedHexVisitor) -} diff --git a/consensus/serde_utils/src/json_str.rs b/consensus/serde_utils/src/json_str.rs deleted file mode 100644 index b9a1813915a..00000000000 --- a/consensus/serde_utils/src/json_str.rs +++ /dev/null @@ -1,25 +0,0 @@ -//! Serialize a datatype as a JSON-blob within a single string. -use serde::{ - de::{DeserializeOwned, Error as _}, - ser::Error as _, - Deserialize, Deserializer, Serialize, Serializer, -}; - -/// Serialize as a JSON object within a string. -pub fn serialize(value: &T, serializer: S) -> Result -where - S: Serializer, - T: Serialize, -{ - serializer.serialize_str(&serde_json::to_string(value).map_err(S::Error::custom)?) -} - -/// Deserialize a JSON object embedded in a string. -pub fn deserialize<'de, T, D>(deserializer: D) -> Result -where - D: Deserializer<'de>, - T: DeserializeOwned, -{ - let json_str = String::deserialize(deserializer)?; - serde_json::from_str(&json_str).map_err(D::Error::custom) -} diff --git a/consensus/serde_utils/src/lib.rs b/consensus/serde_utils/src/lib.rs deleted file mode 100644 index 5c5dafc6656..00000000000 --- a/consensus/serde_utils/src/lib.rs +++ /dev/null @@ -1,15 +0,0 @@ -mod quoted_int; - -pub mod fixed_bytes_hex; -pub mod hex; -pub mod hex_vec; -pub mod json_str; -pub mod list_of_bytes_lists; -pub mod quoted_u64_vec; -pub mod u256_hex_be; -pub mod u32_hex; -pub mod u64_hex_be; -pub mod u8_hex; - -pub use fixed_bytes_hex::{bytes_4_hex, bytes_8_hex}; -pub use quoted_int::{quoted_i64, quoted_u256, quoted_u32, quoted_u64, quoted_u8}; diff --git a/consensus/serde_utils/src/list_of_bytes_lists.rs b/consensus/serde_utils/src/list_of_bytes_lists.rs deleted file mode 100644 index b93321aa06b..00000000000 --- a/consensus/serde_utils/src/list_of_bytes_lists.rs +++ /dev/null @@ -1,49 +0,0 @@ -//! Formats `Vec` using quotes. -//! -//! E.g., `vec![0, 1, 2]` serializes as `["0", "1", "2"]`. -//! -//! Quotes can be optional during decoding. - -use crate::hex; -use serde::ser::SerializeSeq; -use serde::{de, Deserializer, Serializer}; - -pub struct ListOfBytesListVisitor; -impl<'a> serde::de::Visitor<'a> for ListOfBytesListVisitor { - type Value = Vec>; - - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(formatter, "a list of 0x-prefixed byte lists") - } - - fn visit_seq(self, mut seq: A) -> Result - where - A: serde::de::SeqAccess<'a>, - { - let mut vec = vec![]; - - while let Some(val) = seq.next_element::()? { - vec.push(hex::decode(&val).map_err(de::Error::custom)?); - } - - Ok(vec) - } -} - -pub fn serialize(value: &[Vec], serializer: S) -> Result -where - S: Serializer, -{ - let mut seq = serializer.serialize_seq(Some(value.len()))?; - for val in value { - seq.serialize_element(&hex::encode(val))?; - } - seq.end() -} - -pub fn deserialize<'de, D>(deserializer: D) -> Result>, D::Error> -where - D: Deserializer<'de>, -{ - deserializer.deserialize_any(ListOfBytesListVisitor) -} diff --git a/consensus/serde_utils/src/quoted_int.rs b/consensus/serde_utils/src/quoted_int.rs deleted file mode 100644 index 0cc35aa318c..00000000000 --- a/consensus/serde_utils/src/quoted_int.rs +++ /dev/null @@ -1,247 +0,0 @@ -//! Formats some integer types using quotes. -//! -//! E.g., `1` serializes as `"1"`. -//! -//! Quotes can be optional during decoding. - -use ethereum_types::U256; -use serde::{Deserializer, Serializer}; -use serde_derive::{Deserialize, Serialize}; -use std::convert::TryFrom; -use std::marker::PhantomData; - -macro_rules! define_mod { - ($int: ty) => { - /// Serde support for deserializing quoted integers. - /// - /// Configurable so that quotes are either required or optional. - pub struct QuotedIntVisitor { - require_quotes: bool, - _phantom: PhantomData, - } - - impl<'a, T> serde::de::Visitor<'a> for QuotedIntVisitor - where - T: From<$int> + Into<$int> + Copy + TryFrom, - { - type Value = T; - - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - if self.require_quotes { - write!(formatter, "a quoted integer") - } else { - write!(formatter, "a quoted or unquoted integer") - } - } - - fn visit_str(self, s: &str) -> Result - where - E: serde::de::Error, - { - s.parse::<$int>() - .map(T::from) - .map_err(serde::de::Error::custom) - } - - fn visit_u64(self, v: u64) -> Result - where - E: serde::de::Error, - { - if self.require_quotes { - Err(serde::de::Error::custom( - "received unquoted integer when quotes are required", - )) - } else { - T::try_from(v).map_err(|_| serde::de::Error::custom("invalid integer")) - } - } - } - - /// Compositional wrapper type that allows quotes or no quotes. - #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Deserialize, Serialize)] - #[serde(transparent)] - pub struct MaybeQuoted - where - T: From<$int> + Into<$int> + Copy + TryFrom, - { - #[serde(with = "self")] - pub value: T, - } - - /// Wrapper type for requiring quotes on a `$int`-like type. - /// - /// Unlike using `serde(with = "quoted_$int::require_quotes")` this is composable, and can be nested - /// inside types like `Option`, `Result` and `Vec`. - #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Deserialize, Serialize)] - #[serde(transparent)] - pub struct Quoted - where - T: From<$int> + Into<$int> + Copy + TryFrom, - { - #[serde(with = "require_quotes")] - pub value: T, - } - - /// Serialize with quotes. - pub fn serialize(value: &T, serializer: S) -> Result - where - S: Serializer, - T: From<$int> + Into<$int> + Copy, - { - let v: $int = (*value).into(); - serializer.serialize_str(&format!("{}", v)) - } - - /// Deserialize with or without quotes. - pub fn deserialize<'de, D, T>(deserializer: D) -> Result - where - D: Deserializer<'de>, - T: From<$int> + Into<$int> + Copy + TryFrom, - { - deserializer.deserialize_any(QuotedIntVisitor { - require_quotes: false, - _phantom: PhantomData, - }) - } - - /// Requires quotes when deserializing. - /// - /// Usage: `#[serde(with = "quoted_u64::require_quotes")]`. - pub mod require_quotes { - pub use super::serialize; - use super::*; - - pub fn deserialize<'de, D, T>(deserializer: D) -> Result - where - D: Deserializer<'de>, - T: From<$int> + Into<$int> + Copy + TryFrom, - { - deserializer.deserialize_any(QuotedIntVisitor { - require_quotes: true, - _phantom: PhantomData, - }) - } - } - - #[cfg(test)] - mod test { - use super::*; - - #[test] - fn require_quotes() { - let x = serde_json::from_str::>("\"8\"").unwrap(); - assert_eq!(x.value, 8); - serde_json::from_str::>("8").unwrap_err(); - } - } - }; -} - -pub mod quoted_u8 { - use super::*; - - define_mod!(u8); -} - -pub mod quoted_u32 { - use super::*; - - define_mod!(u32); -} - -pub mod quoted_u64 { - use super::*; - - define_mod!(u64); -} - -pub mod quoted_i64 { - use super::*; - - define_mod!(i64); -} - -pub mod quoted_u256 { - use super::*; - - struct U256Visitor; - - impl<'de> serde::de::Visitor<'de> for U256Visitor { - type Value = U256; - - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - formatter.write_str("a quoted U256 integer") - } - - fn visit_str(self, v: &str) -> Result - where - E: serde::de::Error, - { - U256::from_dec_str(v).map_err(serde::de::Error::custom) - } - } - - /// Serialize with quotes. - pub fn serialize(value: &U256, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&format!("{}", value)) - } - - /// Deserialize with quotes. - pub fn deserialize<'de, D>(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - deserializer.deserialize_str(U256Visitor) - } -} - -#[cfg(test)] -mod test { - use super::*; - - #[derive(Debug, PartialEq, Serialize, Deserialize)] - #[serde(transparent)] - struct WrappedU256(#[serde(with = "quoted_u256")] U256); - - #[test] - fn u256_with_quotes() { - assert_eq!( - &serde_json::to_string(&WrappedU256(U256::one())).unwrap(), - "\"1\"" - ); - assert_eq!( - serde_json::from_str::("\"1\"").unwrap(), - WrappedU256(U256::one()) - ); - } - - #[test] - fn u256_without_quotes() { - serde_json::from_str::("1").unwrap_err(); - } - - #[derive(Debug, PartialEq, Serialize, Deserialize)] - #[serde(transparent)] - struct WrappedI64(#[serde(with = "quoted_i64")] i64); - - #[test] - fn negative_i64_with_quotes() { - assert_eq!( - serde_json::from_str::("\"-200\"").unwrap().0, - -200 - ); - assert_eq!( - serde_json::to_string(&WrappedI64(-12_500)).unwrap(), - "\"-12500\"" - ); - } - - // It would be OK if this worked, but we don't need it to (i64s should always be quoted). - #[test] - fn negative_i64_without_quotes() { - serde_json::from_str::("-200").unwrap_err(); - } -} diff --git a/consensus/serde_utils/src/quoted_u64_vec.rs b/consensus/serde_utils/src/quoted_u64_vec.rs deleted file mode 100644 index f124c989092..00000000000 --- a/consensus/serde_utils/src/quoted_u64_vec.rs +++ /dev/null @@ -1,97 +0,0 @@ -//! Formats `Vec` using quotes. -//! -//! E.g., `vec![0, 1, 2]` serializes as `["0", "1", "2"]`. -//! -//! Quotes can be optional during decoding. - -use serde::ser::SerializeSeq; -use serde::{Deserializer, Serializer}; -use serde_derive::{Deserialize, Serialize}; - -#[derive(Serialize, Deserialize)] -#[serde(transparent)] -pub struct QuotedIntWrapper { - #[serde(with = "crate::quoted_u64")] - pub int: u64, -} - -pub struct QuotedIntVecVisitor; -impl<'a> serde::de::Visitor<'a> for QuotedIntVecVisitor { - type Value = Vec; - - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(formatter, "a list of quoted or unquoted integers") - } - - fn visit_seq(self, mut seq: A) -> Result - where - A: serde::de::SeqAccess<'a>, - { - let mut vec = vec![]; - - while let Some(val) = seq.next_element()? { - let val: QuotedIntWrapper = val; - vec.push(val.int); - } - - Ok(vec) - } -} - -pub fn serialize(value: &[u64], serializer: S) -> Result -where - S: Serializer, -{ - let mut seq = serializer.serialize_seq(Some(value.len()))?; - for &int in value { - seq.serialize_element(&QuotedIntWrapper { int })?; - } - seq.end() -} - -pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> -where - D: Deserializer<'de>, -{ - deserializer.deserialize_any(QuotedIntVecVisitor) -} - -#[cfg(test)] -mod test { - use super::*; - - #[derive(Debug, Serialize, Deserialize)] - struct Obj { - #[serde(with = "crate::quoted_u64_vec")] - values: Vec, - } - - #[test] - fn quoted_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": ["1", "2", "3", "4"] }"#).unwrap(); - assert_eq!(obj.values, vec![1, 2, 3, 4]); - } - - #[test] - fn unquoted_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": [1, 2, 3, 4] }"#).unwrap(); - assert_eq!(obj.values, vec![1, 2, 3, 4]); - } - - #[test] - fn mixed_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": ["1", 2, "3", "4"] }"#).unwrap(); - assert_eq!(obj.values, vec![1, 2, 3, 4]); - } - - #[test] - fn empty_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": [] }"#).unwrap(); - assert!(obj.values.is_empty()); - } - - #[test] - fn whole_list_quoted_err() { - serde_json::from_str::(r#"{ "values": "[1, 2, 3, 4]" }"#).unwrap_err(); - } -} diff --git a/consensus/serde_utils/src/u256_hex_be.rs b/consensus/serde_utils/src/u256_hex_be.rs deleted file mode 100644 index 8007e5792c3..00000000000 --- a/consensus/serde_utils/src/u256_hex_be.rs +++ /dev/null @@ -1,144 +0,0 @@ -use ethereum_types::U256; - -use serde::de::Visitor; -use serde::{de, Deserializer, Serialize, Serializer}; -use std::fmt; -use std::str::FromStr; - -pub fn serialize(num: &U256, serializer: S) -> Result -where - S: Serializer, -{ - num.serialize(serializer) -} - -pub struct U256Visitor; - -impl<'de> Visitor<'de> for U256Visitor { - type Value = String; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("a well formatted hex string") - } - - fn visit_str(self, value: &str) -> Result - where - E: de::Error, - { - if !value.starts_with("0x") { - return Err(de::Error::custom("must start with 0x")); - } - let stripped = &value[2..]; - if stripped.is_empty() { - Err(de::Error::custom(format!( - "quantity cannot be {:?}", - stripped - ))) - } else if stripped == "0" { - Ok(value.to_string()) - } else if stripped.starts_with('0') { - Err(de::Error::custom("cannot have leading zero")) - } else { - Ok(value.to_string()) - } - } -} - -pub fn deserialize<'de, D>(deserializer: D) -> Result -where - D: Deserializer<'de>, -{ - let decoded = deserializer.deserialize_string(U256Visitor)?; - - U256::from_str(&decoded).map_err(|e| de::Error::custom(format!("Invalid U256 string: {}", e))) -} - -#[cfg(test)] -mod test { - use ethereum_types::U256; - use serde::{Deserialize, Serialize}; - use serde_json; - - #[derive(Debug, PartialEq, Serialize, Deserialize)] - #[serde(transparent)] - struct Wrapper { - #[serde(with = "super")] - val: U256, - } - - #[test] - fn encoding() { - assert_eq!( - &serde_json::to_string(&Wrapper { val: 0.into() }).unwrap(), - "\"0x0\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { val: 1.into() }).unwrap(), - "\"0x1\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { val: 256.into() }).unwrap(), - "\"0x100\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { val: 65.into() }).unwrap(), - "\"0x41\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { val: 1024.into() }).unwrap(), - "\"0x400\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { - val: U256::max_value() - 1 - }) - .unwrap(), - "\"0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { - val: U256::max_value() - }) - .unwrap(), - "\"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"" - ); - } - - #[test] - fn decoding() { - assert_eq!( - serde_json::from_str::("\"0x0\"").unwrap(), - Wrapper { val: 0.into() }, - ); - assert_eq!( - serde_json::from_str::("\"0x41\"").unwrap(), - Wrapper { val: 65.into() }, - ); - assert_eq!( - serde_json::from_str::("\"0x400\"").unwrap(), - Wrapper { val: 1024.into() }, - ); - assert_eq!( - serde_json::from_str::( - "\"0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe\"" - ) - .unwrap(), - Wrapper { - val: U256::max_value() - 1 - }, - ); - assert_eq!( - serde_json::from_str::( - "\"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"" - ) - .unwrap(), - Wrapper { - val: U256::max_value() - }, - ); - serde_json::from_str::("\"0x\"").unwrap_err(); - serde_json::from_str::("\"0x0400\"").unwrap_err(); - serde_json::from_str::("\"400\"").unwrap_err(); - serde_json::from_str::("\"ff\"").unwrap_err(); - } -} diff --git a/consensus/serde_utils/src/u32_hex.rs b/consensus/serde_utils/src/u32_hex.rs deleted file mode 100644 index c1ab3537b2a..00000000000 --- a/consensus/serde_utils/src/u32_hex.rs +++ /dev/null @@ -1,21 +0,0 @@ -//! Formats `u32` as a 0x-prefixed, little-endian hex string. -//! -//! E.g., `0` serializes as `"0x00000000"`. - -use crate::bytes_4_hex; -use serde::{Deserializer, Serializer}; - -pub fn serialize(num: &u32, serializer: S) -> Result -where - S: Serializer, -{ - let hex = format!("0x{}", hex::encode(num.to_le_bytes())); - serializer.serialize_str(&hex) -} - -pub fn deserialize<'de, D>(deserializer: D) -> Result -where - D: Deserializer<'de>, -{ - bytes_4_hex::deserialize(deserializer).map(u32::from_le_bytes) -} diff --git a/consensus/serde_utils/src/u64_hex_be.rs b/consensus/serde_utils/src/u64_hex_be.rs deleted file mode 100644 index e3364a2d2c9..00000000000 --- a/consensus/serde_utils/src/u64_hex_be.rs +++ /dev/null @@ -1,134 +0,0 @@ -//! Formats `u64` as a 0x-prefixed, big-endian hex string. -//! -//! E.g., `0` serializes as `"0x0000000000000000"`. - -use serde::de::{self, Error, Visitor}; -use serde::{Deserializer, Serializer}; -use std::fmt; - -const BYTES_LEN: usize = 8; - -pub struct QuantityVisitor; -impl<'de> Visitor<'de> for QuantityVisitor { - type Value = Vec; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("a hex string") - } - - fn visit_str(self, value: &str) -> Result - where - E: de::Error, - { - if !value.starts_with("0x") { - return Err(de::Error::custom("must start with 0x")); - } - - let stripped = value.trim_start_matches("0x"); - - if stripped.is_empty() { - Err(de::Error::custom(format!( - "quantity cannot be {}", - stripped - ))) - } else if stripped == "0" { - Ok(vec![0]) - } else if stripped.starts_with('0') { - Err(de::Error::custom("cannot have leading zero")) - } else if stripped.len() % 2 != 0 { - hex::decode(format!("0{}", stripped)) - .map_err(|e| de::Error::custom(format!("invalid hex ({:?})", e))) - } else { - hex::decode(stripped).map_err(|e| de::Error::custom(format!("invalid hex ({:?})", e))) - } - } -} - -pub fn serialize(num: &u64, serializer: S) -> Result -where - S: Serializer, -{ - let raw = hex::encode(num.to_be_bytes()); - let trimmed = raw.trim_start_matches('0'); - - let hex = if trimmed.is_empty() { "0" } else { trimmed }; - - serializer.serialize_str(&format!("0x{}", &hex)) -} - -pub fn deserialize<'de, D>(deserializer: D) -> Result -where - D: Deserializer<'de>, -{ - let decoded = deserializer.deserialize_str(QuantityVisitor)?; - - // TODO: this is not strict about byte length like other methods. - if decoded.len() > BYTES_LEN { - return Err(D::Error::custom(format!( - "expected max {} bytes for array, got {}", - BYTES_LEN, - decoded.len() - ))); - } - - let mut array = [0; BYTES_LEN]; - array[BYTES_LEN - decoded.len()..].copy_from_slice(&decoded); - Ok(u64::from_be_bytes(array)) -} - -#[cfg(test)] -mod test { - use serde::{Deserialize, Serialize}; - use serde_json; - - #[derive(Debug, PartialEq, Serialize, Deserialize)] - #[serde(transparent)] - struct Wrapper { - #[serde(with = "super")] - val: u64, - } - - #[test] - fn encoding() { - assert_eq!( - &serde_json::to_string(&Wrapper { val: 0 }).unwrap(), - "\"0x0\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { val: 1 }).unwrap(), - "\"0x1\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { val: 256 }).unwrap(), - "\"0x100\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { val: 65 }).unwrap(), - "\"0x41\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { val: 1024 }).unwrap(), - "\"0x400\"" - ); - } - - #[test] - fn decoding() { - assert_eq!( - serde_json::from_str::("\"0x0\"").unwrap(), - Wrapper { val: 0 }, - ); - assert_eq!( - serde_json::from_str::("\"0x41\"").unwrap(), - Wrapper { val: 65 }, - ); - assert_eq!( - serde_json::from_str::("\"0x400\"").unwrap(), - Wrapper { val: 1024 }, - ); - serde_json::from_str::("\"0x\"").unwrap_err(); - serde_json::from_str::("\"0x0400\"").unwrap_err(); - serde_json::from_str::("\"400\"").unwrap_err(); - serde_json::from_str::("\"ff\"").unwrap_err(); - } -} diff --git a/consensus/serde_utils/src/u8_hex.rs b/consensus/serde_utils/src/u8_hex.rs deleted file mode 100644 index 8083e1d120b..00000000000 --- a/consensus/serde_utils/src/u8_hex.rs +++ /dev/null @@ -1,29 +0,0 @@ -//! Formats `u8` as a 0x-prefixed hex string. -//! -//! E.g., `0` serializes as `"0x00"`. - -use crate::hex::PrefixedHexVisitor; -use serde::de::Error; -use serde::{Deserializer, Serializer}; - -pub fn serialize(byte: &u8, serializer: S) -> Result -where - S: Serializer, -{ - let hex = format!("0x{}", hex::encode([*byte])); - serializer.serialize_str(&hex) -} - -pub fn deserialize<'de, D>(deserializer: D) -> Result -where - D: Deserializer<'de>, -{ - let bytes = deserializer.deserialize_str(PrefixedHexVisitor)?; - if bytes.len() != 1 { - return Err(D::Error::custom(format!( - "expected 1 byte for u8, got {}", - bytes.len() - ))); - } - Ok(bytes[0]) -} diff --git a/consensus/ssz/Cargo.toml b/consensus/ssz/Cargo.toml deleted file mode 100644 index d39ad10875a..00000000000 --- a/consensus/ssz/Cargo.toml +++ /dev/null @@ -1,21 +0,0 @@ -[package] -name = "eth2_ssz" -version = "0.4.1" -authors = ["Paul Hauner "] -edition = "2021" -description = "SimpleSerialize (SSZ) as used in Ethereum 2.0" -license = "Apache-2.0" - -[lib] -name = "ssz" - -[dev-dependencies] -eth2_ssz_derive = "0.3.1" - -[dependencies] -ethereum-types = "0.14.1" -smallvec = { version = "1.6.1", features = ["const_generics"] } -itertools = "0.10.3" - -[features] -arbitrary = ["ethereum-types/arbitrary"] diff --git a/consensus/ssz/README.md b/consensus/ssz/README.md deleted file mode 100644 index 04603cda33c..00000000000 --- a/consensus/ssz/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# simpleserialize (ssz) - -[](https://crates.io/crates/eth2_ssz) diff --git a/consensus/ssz/examples/large_list.rs b/consensus/ssz/examples/large_list.rs deleted file mode 100644 index a1b10ab7a3e..00000000000 --- a/consensus/ssz/examples/large_list.rs +++ /dev/null @@ -1,15 +0,0 @@ -//! Encode and decode a list many times. -//! -//! Useful for `cargo flamegraph`. - -use ssz::{Decode, Encode}; - -fn main() { - let vec: Vec = vec![4242; 8196]; - - let output: Vec> = (0..40_000) - .map(|_| Vec::from_ssz_bytes(&vec.as_ssz_bytes()).unwrap()) - .collect(); - - println!("{}", output.len()); -} diff --git a/consensus/ssz/examples/large_list_of_structs.rs b/consensus/ssz/examples/large_list_of_structs.rs deleted file mode 100644 index 2aaaf9b8a53..00000000000 --- a/consensus/ssz/examples/large_list_of_structs.rs +++ /dev/null @@ -1,31 +0,0 @@ -//! Encode and decode a list many times. -//! -//! Useful for `cargo flamegraph`. - -use ssz::{Decode, Encode}; -use ssz_derive::{Decode, Encode}; - -#[derive(Clone, Copy, Encode, Decode)] -pub struct FixedLen { - a: u64, - b: u64, - c: u64, - d: u64, -} - -fn main() { - let fixed_len = FixedLen { - a: 42, - b: 42, - c: 42, - d: 42, - }; - - let vec: Vec = vec![fixed_len; 8196]; - - let output: Vec> = (0..40_000) - .map(|_| Vec::from_ssz_bytes(&vec.as_ssz_bytes()).unwrap()) - .collect(); - - println!("{}", output.len()); -} diff --git a/consensus/ssz/examples/struct_definition.rs b/consensus/ssz/examples/struct_definition.rs deleted file mode 100644 index 123da12c58c..00000000000 --- a/consensus/ssz/examples/struct_definition.rs +++ /dev/null @@ -1,73 +0,0 @@ -use ssz::{Decode, DecodeError, Encode, SszDecoderBuilder, SszEncoder}; - -#[derive(Debug, PartialEq)] -pub struct Foo { - a: u16, - b: Vec, - c: u16, -} - -impl Encode for Foo { - fn is_ssz_fixed_len() -> bool { - ::is_ssz_fixed_len() && as Encode>::is_ssz_fixed_len() - } - - fn ssz_bytes_len(&self) -> usize { - ::ssz_fixed_len() - + ssz::BYTES_PER_LENGTH_OFFSET - + ::ssz_fixed_len() - + self.b.ssz_bytes_len() - } - - fn ssz_append(&self, buf: &mut Vec) { - let offset = ::ssz_fixed_len() - + as Encode>::ssz_fixed_len() - + ::ssz_fixed_len(); - - let mut encoder = SszEncoder::container(buf, offset); - - encoder.append(&self.a); - encoder.append(&self.b); - encoder.append(&self.c); - - encoder.finalize(); - } -} - -impl Decode for Foo { - fn is_ssz_fixed_len() -> bool { - ::is_ssz_fixed_len() && as Decode>::is_ssz_fixed_len() - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - let mut builder = SszDecoderBuilder::new(bytes); - - builder.register_type::()?; - builder.register_type::>()?; - builder.register_type::()?; - - let mut decoder = builder.build()?; - - Ok(Self { - a: decoder.decode_next()?, - b: decoder.decode_next()?, - c: decoder.decode_next()?, - }) - } -} - -fn main() { - let my_foo = Foo { - a: 42, - b: vec![0, 1, 2, 3], - c: 11, - }; - - let bytes = vec![42, 0, 8, 0, 0, 0, 11, 0, 0, 1, 2, 3]; - - assert_eq!(my_foo.as_ssz_bytes(), bytes); - - let decoded_foo = Foo::from_ssz_bytes(&bytes).unwrap(); - - assert_eq!(my_foo, decoded_foo); -} diff --git a/consensus/ssz/src/decode.rs b/consensus/ssz/src/decode.rs deleted file mode 100644 index 10b3573b169..00000000000 --- a/consensus/ssz/src/decode.rs +++ /dev/null @@ -1,374 +0,0 @@ -use super::*; -use smallvec::{smallvec, SmallVec}; -use std::cmp::Ordering; - -type SmallVec8 = SmallVec<[T; 8]>; - -pub mod impls; -pub mod try_from_iter; - -/// Returned when SSZ decoding fails. -#[derive(Debug, PartialEq, Clone)] -pub enum DecodeError { - /// The bytes supplied were too short to be decoded into the specified type. - InvalidByteLength { len: usize, expected: usize }, - /// The given bytes were too short to be read as a length prefix. - InvalidLengthPrefix { len: usize, expected: usize }, - /// A length offset pointed to a byte that was out-of-bounds (OOB). - /// - /// A bytes may be OOB for the following reasons: - /// - /// - It is `>= bytes.len()`. - /// - When decoding variable length items, the 1st offset points "backwards" into the fixed - /// length items (i.e., `length[0] < BYTES_PER_LENGTH_OFFSET`). - /// - When decoding variable-length items, the `n`'th offset was less than the `n-1`'th offset. - OutOfBoundsByte { i: usize }, - /// An offset points “backwards” into the fixed-bytes portion of the message, essentially - /// double-decoding bytes that will also be decoded as fixed-length. - /// - /// https://notes.ethereum.org/ruKvDXl6QOW3gnqVYb8ezA?view#1-Offset-into-fixed-portion - OffsetIntoFixedPortion(usize), - /// The first offset does not point to the byte that follows the fixed byte portion, - /// essentially skipping a variable-length byte. - /// - /// https://notes.ethereum.org/ruKvDXl6QOW3gnqVYb8ezA?view#2-Skip-first-variable-byte - OffsetSkipsVariableBytes(usize), - /// An offset points to bytes prior to the previous offset. Depending on how you look at it, - /// this either double-decodes bytes or makes the first offset a negative-length. - /// - /// https://notes.ethereum.org/ruKvDXl6QOW3gnqVYb8ezA?view#3-Offsets-are-decreasing - OffsetsAreDecreasing(usize), - /// An offset references byte indices that do not exist in the source bytes. - /// - /// https://notes.ethereum.org/ruKvDXl6QOW3gnqVYb8ezA?view#4-Offsets-are-out-of-bounds - OffsetOutOfBounds(usize), - /// A variable-length list does not have a fixed portion that is cleanly divisible by - /// `BYTES_PER_LENGTH_OFFSET`. - InvalidListFixedBytesLen(usize), - /// Some item has a `ssz_fixed_len` of zero. This is illegal. - ZeroLengthItem, - /// The given bytes were invalid for some application-level reason. - BytesInvalid(String), - /// The given union selector is out of bounds. - UnionSelectorInvalid(u8), -} - -/// Performs checks on the `offset` based upon the other parameters provided. -/// -/// ## Detail -/// -/// - `offset`: the offset bytes (e.g., result of `read_offset(..)`). -/// - `previous_offset`: unless this is the first offset in the SSZ object, the value of the -/// previously-read offset. Used to ensure offsets are not decreasing. -/// - `num_bytes`: the total number of bytes in the SSZ object. Used to ensure the offset is not -/// out of bounds. -/// - `num_fixed_bytes`: the number of fixed-bytes in the struct, if it is known. Used to ensure -/// that the first offset doesn't skip any variable bytes. -/// -/// ## References -/// -/// The checks here are derived from this document: -/// -/// https://notes.ethereum.org/ruKvDXl6QOW3gnqVYb8ezA?view -pub fn sanitize_offset( - offset: usize, - previous_offset: Option, - num_bytes: usize, - num_fixed_bytes: Option, -) -> Result { - if num_fixed_bytes.map_or(false, |fixed_bytes| offset < fixed_bytes) { - Err(DecodeError::OffsetIntoFixedPortion(offset)) - } else if previous_offset.is_none() - && num_fixed_bytes.map_or(false, |fixed_bytes| offset != fixed_bytes) - { - Err(DecodeError::OffsetSkipsVariableBytes(offset)) - } else if offset > num_bytes { - Err(DecodeError::OffsetOutOfBounds(offset)) - } else if previous_offset.map_or(false, |prev| prev > offset) { - Err(DecodeError::OffsetsAreDecreasing(offset)) - } else { - Ok(offset) - } -} - -/// Provides SSZ decoding (de-serialization) via the `from_ssz_bytes(&bytes)` method. -/// -/// See `examples/` for manual implementations or the crate root for implementations using -/// `#[derive(Decode)]`. -pub trait Decode: Sized { - /// Returns `true` if this object has a fixed-length. - /// - /// I.e., there are no variable length items in this object or any of it's contained objects. - fn is_ssz_fixed_len() -> bool; - - /// The number of bytes this object occupies in the fixed-length portion of the SSZ bytes. - /// - /// By default, this is set to `BYTES_PER_LENGTH_OFFSET` which is suitable for variable length - /// objects, but not fixed-length objects. Fixed-length objects _must_ return a value which - /// represents their length. - fn ssz_fixed_len() -> usize { - BYTES_PER_LENGTH_OFFSET - } - - /// Attempts to decode `Self` from `bytes`, returning a `DecodeError` on failure. - /// - /// The supplied bytes must be the exact length required to decode `Self`, excess bytes will - /// result in an error. - fn from_ssz_bytes(bytes: &[u8]) -> Result; -} - -#[derive(Copy, Clone, Debug)] -pub struct Offset { - position: usize, - offset: usize, -} - -/// Builds an `SszDecoder`. -/// -/// The purpose of this struct is to split some SSZ bytes into individual slices. The builder is -/// then converted into a `SszDecoder` which decodes those values into object instances. -/// -/// See [`SszDecoder`](struct.SszDecoder.html) for usage examples. -pub struct SszDecoderBuilder<'a> { - bytes: &'a [u8], - items: SmallVec8<&'a [u8]>, - offsets: SmallVec8, - items_index: usize, -} - -impl<'a> SszDecoderBuilder<'a> { - /// Instantiate a new builder that should build a `SszDecoder` over the given `bytes` which - /// are assumed to be the SSZ encoding of some object. - pub fn new(bytes: &'a [u8]) -> Self { - Self { - bytes, - items: smallvec![], - offsets: smallvec![], - items_index: 0, - } - } - - /// Registers a variable-length object as the next item in `bytes`, without specifying the - /// actual type. - /// - /// ## Notes - /// - /// Use of this function is generally discouraged since it cannot detect if some type changes - /// from variable to fixed length. - /// - /// Use `Self::register_type` wherever possible. - pub fn register_anonymous_variable_length_item(&mut self) -> Result<(), DecodeError> { - struct Anonymous; - - impl Decode for Anonymous { - fn is_ssz_fixed_len() -> bool { - false - } - - fn from_ssz_bytes(_bytes: &[u8]) -> Result { - unreachable!("Anonymous should never be decoded") - } - } - - self.register_type::() - } - - /// Declares that some type `T` is the next item in `bytes`. - pub fn register_type(&mut self) -> Result<(), DecodeError> { - self.register_type_parameterized(T::is_ssz_fixed_len(), T::ssz_fixed_len()) - } - - /// Declares that a type with the given parameters is the next item in `bytes`. - pub fn register_type_parameterized( - &mut self, - is_ssz_fixed_len: bool, - ssz_fixed_len: usize, - ) -> Result<(), DecodeError> { - if is_ssz_fixed_len { - let start = self.items_index; - self.items_index += ssz_fixed_len; - - let slice = - self.bytes - .get(start..self.items_index) - .ok_or(DecodeError::InvalidByteLength { - len: self.bytes.len(), - expected: self.items_index, - })?; - - self.items.push(slice); - } else { - self.offsets.push(Offset { - position: self.items.len(), - offset: sanitize_offset( - read_offset(&self.bytes[self.items_index..])?, - self.offsets.last().map(|o| o.offset), - self.bytes.len(), - None, - )?, - }); - - // Push an empty slice into items; it will be replaced later. - self.items.push(&[]); - - self.items_index += BYTES_PER_LENGTH_OFFSET; - } - - Ok(()) - } - - fn finalize(&mut self) -> Result<(), DecodeError> { - if let Some(first_offset) = self.offsets.first().map(|o| o.offset) { - // Check to ensure the first offset points to the byte immediately following the - // fixed-length bytes. - match first_offset.cmp(&self.items_index) { - Ordering::Less => return Err(DecodeError::OffsetIntoFixedPortion(first_offset)), - Ordering::Greater => { - return Err(DecodeError::OffsetSkipsVariableBytes(first_offset)) - } - Ordering::Equal => (), - } - - // Iterate through each pair of offsets, grabbing the slice between each of the offsets. - for pair in self.offsets.windows(2) { - let a = pair[0]; - let b = pair[1]; - - self.items[a.position] = &self.bytes[a.offset..b.offset]; - } - - // Handle the last offset, pushing a slice from it's start through to the end of - // `self.bytes`. - if let Some(last) = self.offsets.last() { - self.items[last.position] = &self.bytes[last.offset..] - } - } else { - // If the container is fixed-length, ensure there are no excess bytes. - if self.items_index != self.bytes.len() { - return Err(DecodeError::InvalidByteLength { - len: self.bytes.len(), - expected: self.items_index, - }); - } - } - - Ok(()) - } - - /// Finalizes the builder, returning a `SszDecoder` that may be used to instantiate objects. - pub fn build(mut self) -> Result, DecodeError> { - self.finalize()?; - - Ok(SszDecoder { items: self.items }) - } -} - -/// Decodes some slices of SSZ into object instances. Should be instantiated using -/// [`SszDecoderBuilder`](struct.SszDecoderBuilder.html). -/// -/// ## Example -/// -/// ```rust -/// use ssz_derive::{Encode, Decode}; -/// use ssz::{Decode, Encode, SszDecoder, SszDecoderBuilder}; -/// -/// #[derive(PartialEq, Debug, Encode, Decode)] -/// struct Foo { -/// a: u64, -/// b: Vec, -/// } -/// -/// fn ssz_decoding_example() { -/// let foo = Foo { -/// a: 42, -/// b: vec![1, 3, 3, 7] -/// }; -/// -/// let bytes = foo.as_ssz_bytes(); -/// -/// let mut builder = SszDecoderBuilder::new(&bytes); -/// -/// builder.register_type::().unwrap(); -/// builder.register_type::>().unwrap(); -/// -/// let mut decoder = builder.build().unwrap(); -/// -/// let decoded_foo = Foo { -/// a: decoder.decode_next().unwrap(), -/// b: decoder.decode_next().unwrap(), -/// }; -/// -/// assert_eq!(foo, decoded_foo); -/// } -/// -/// ``` -pub struct SszDecoder<'a> { - items: SmallVec8<&'a [u8]>, -} - -impl<'a> SszDecoder<'a> { - /// Decodes the next item. - /// - /// # Panics - /// - /// Panics when attempting to decode more items than actually exist. - pub fn decode_next(&mut self) -> Result { - self.decode_next_with(|slice| T::from_ssz_bytes(slice)) - } - - /// Decodes the next item using the provided function. - pub fn decode_next_with(&mut self, f: F) -> Result - where - F: FnOnce(&'a [u8]) -> Result, - { - f(self.items.remove(0)) - } -} - -/// Takes `bytes`, assuming it is the encoding for a SSZ union, and returns the union-selector and -/// the body (trailing bytes). -/// -/// ## Errors -/// -/// Returns an error if: -/// -/// - `bytes` is empty. -/// - the union selector is not a valid value (i.e., larger than the maximum number of variants. -pub fn split_union_bytes(bytes: &[u8]) -> Result<(UnionSelector, &[u8]), DecodeError> { - let selector = bytes - .first() - .copied() - .ok_or(DecodeError::OutOfBoundsByte { i: 0 }) - .and_then(UnionSelector::new)?; - let body = bytes - .get(1..) - .ok_or(DecodeError::OutOfBoundsByte { i: 1 })?; - Ok((selector, body)) -} - -/// Reads a `BYTES_PER_LENGTH_OFFSET`-byte length from `bytes`, where `bytes.len() >= -/// BYTES_PER_LENGTH_OFFSET`. -pub fn read_offset(bytes: &[u8]) -> Result { - decode_offset(bytes.get(0..BYTES_PER_LENGTH_OFFSET).ok_or( - DecodeError::InvalidLengthPrefix { - len: bytes.len(), - expected: BYTES_PER_LENGTH_OFFSET, - }, - )?) -} - -/// Decode bytes as a little-endian usize, returning an `Err` if `bytes.len() != -/// BYTES_PER_LENGTH_OFFSET`. -fn decode_offset(bytes: &[u8]) -> Result { - let len = bytes.len(); - let expected = BYTES_PER_LENGTH_OFFSET; - - if len != expected { - Err(DecodeError::InvalidLengthPrefix { len, expected }) - } else { - let mut array: [u8; BYTES_PER_LENGTH_OFFSET] = std::default::Default::default(); - array.clone_from_slice(bytes); - - Ok(u32::from_le_bytes(array) as usize) - } -} diff --git a/consensus/ssz/src/decode/impls.rs b/consensus/ssz/src/decode/impls.rs deleted file mode 100644 index 3d36fb4379e..00000000000 --- a/consensus/ssz/src/decode/impls.rs +++ /dev/null @@ -1,776 +0,0 @@ -use super::*; -use crate::decode::try_from_iter::{TryCollect, TryFromIter}; -use core::num::NonZeroUsize; -use ethereum_types::{H160, H256, U128, U256}; -use itertools::process_results; -use smallvec::SmallVec; -use std::collections::{BTreeMap, BTreeSet}; -use std::iter::{self, FromIterator}; -use std::sync::Arc; - -macro_rules! impl_decodable_for_uint { - ($type: ident, $bit_size: expr) => { - impl Decode for $type { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - $bit_size / 8 - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - let len = bytes.len(); - let expected = ::ssz_fixed_len(); - - if len != expected { - Err(DecodeError::InvalidByteLength { len, expected }) - } else { - let mut array: [u8; $bit_size / 8] = std::default::Default::default(); - array.clone_from_slice(bytes); - - Ok(Self::from_le_bytes(array)) - } - } - } - }; -} - -impl_decodable_for_uint!(u8, 8); -impl_decodable_for_uint!(u16, 16); -impl_decodable_for_uint!(u32, 32); -impl_decodable_for_uint!(u64, 64); - -#[cfg(target_pointer_width = "32")] -impl_decodable_for_uint!(usize, 32); - -#[cfg(target_pointer_width = "64")] -impl_decodable_for_uint!(usize, 64); - -macro_rules! impl_decode_for_tuples { - ($( - $Tuple:ident { - $(($idx:tt) -> $T:ident)+ - } - )+) => { - $( - impl<$($T: Decode),+> Decode for ($($T,)+) { - fn is_ssz_fixed_len() -> bool { - $( - <$T as Decode>::is_ssz_fixed_len() && - )* - true - } - - fn ssz_fixed_len() -> usize { - if ::is_ssz_fixed_len() { - $( - <$T as Decode>::ssz_fixed_len() + - )* - 0 - } else { - BYTES_PER_LENGTH_OFFSET - } - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - let mut builder = SszDecoderBuilder::new(bytes); - - $( - builder.register_type::<$T>()?; - )* - - let mut decoder = builder.build()?; - - Ok(($( - decoder.decode_next::<$T>()?, - )* - )) - } - } - )+ - } -} - -impl_decode_for_tuples! { - Tuple2 { - (0) -> A - (1) -> B - } - Tuple3 { - (0) -> A - (1) -> B - (2) -> C - } - Tuple4 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - } - Tuple5 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - } - Tuple6 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - } - Tuple7 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - (6) -> G - } - Tuple8 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - (6) -> G - (7) -> H - } - Tuple9 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - (6) -> G - (7) -> H - (8) -> I - } - Tuple10 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - (6) -> G - (7) -> H - (8) -> I - (9) -> J - } - Tuple11 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - (6) -> G - (7) -> H - (8) -> I - (9) -> J - (10) -> K - } - Tuple12 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - (6) -> G - (7) -> H - (8) -> I - (9) -> J - (10) -> K - (11) -> L - } -} - -impl Decode for bool { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - 1 - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - let len = bytes.len(); - let expected = ::ssz_fixed_len(); - - if len != expected { - Err(DecodeError::InvalidByteLength { len, expected }) - } else { - match bytes[0] { - 0b0000_0000 => Ok(false), - 0b0000_0001 => Ok(true), - _ => Err(DecodeError::BytesInvalid(format!( - "Out-of-range for boolean: {}", - bytes[0] - ))), - } - } - } -} - -impl Decode for NonZeroUsize { - fn is_ssz_fixed_len() -> bool { - ::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - ::ssz_fixed_len() - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - let x = usize::from_ssz_bytes(bytes)?; - - if x == 0 { - Err(DecodeError::BytesInvalid( - "NonZeroUsize cannot be zero.".to_string(), - )) - } else { - // `unwrap` is safe here as `NonZeroUsize::new()` succeeds if `x > 0` and this path - // never executes when `x == 0`. - Ok(NonZeroUsize::new(x).unwrap()) - } - } -} - -impl Decode for Option { - fn is_ssz_fixed_len() -> bool { - false - } - fn from_ssz_bytes(bytes: &[u8]) -> Result { - let (selector, body) = split_union_bytes(bytes)?; - match selector.into() { - 0u8 => Ok(None), - 1u8 => ::from_ssz_bytes(body).map(Option::Some), - other => Err(DecodeError::UnionSelectorInvalid(other)), - } - } -} - -impl Decode for Arc { - fn is_ssz_fixed_len() -> bool { - T::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - T::ssz_fixed_len() - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - T::from_ssz_bytes(bytes).map(Arc::new) - } -} - -impl Decode for H160 { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - 20 - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - let len = bytes.len(); - let expected = ::ssz_fixed_len(); - - if len != expected { - Err(DecodeError::InvalidByteLength { len, expected }) - } else { - Ok(Self::from_slice(bytes)) - } - } -} - -impl Decode for H256 { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - 32 - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - let len = bytes.len(); - let expected = ::ssz_fixed_len(); - - if len != expected { - Err(DecodeError::InvalidByteLength { len, expected }) - } else { - Ok(H256::from_slice(bytes)) - } - } -} - -impl Decode for U256 { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - 32 - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - let len = bytes.len(); - let expected = ::ssz_fixed_len(); - - if len != expected { - Err(DecodeError::InvalidByteLength { len, expected }) - } else { - Ok(U256::from_little_endian(bytes)) - } - } -} - -impl Decode for U128 { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - 16 - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - let len = bytes.len(); - let expected = ::ssz_fixed_len(); - - if len != expected { - Err(DecodeError::InvalidByteLength { len, expected }) - } else { - Ok(U128::from_little_endian(bytes)) - } - } -} - -macro_rules! impl_decodable_for_u8_array { - ($len: expr) => { - impl Decode for [u8; $len] { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - $len - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - let len = bytes.len(); - let expected = ::ssz_fixed_len(); - - if len != expected { - Err(DecodeError::InvalidByteLength { len, expected }) - } else { - let mut array: [u8; $len] = [0; $len]; - array.copy_from_slice(bytes); - - Ok(array) - } - } - } - }; -} - -impl_decodable_for_u8_array!(4); -impl_decodable_for_u8_array!(32); -impl_decodable_for_u8_array!(48); - -macro_rules! impl_for_vec { - ($type: ty, $max_len: expr) => { - impl Decode for $type { - fn is_ssz_fixed_len() -> bool { - false - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - if bytes.is_empty() { - Ok(Self::from_iter(iter::empty())) - } else if T::is_ssz_fixed_len() { - bytes - .chunks(T::ssz_fixed_len()) - .map(T::from_ssz_bytes) - .collect() - } else { - decode_list_of_variable_length_items(bytes, $max_len) - } - } - } - }; -} - -impl_for_vec!(Vec, None); -impl_for_vec!(SmallVec<[T; 1]>, None); -impl_for_vec!(SmallVec<[T; 2]>, None); -impl_for_vec!(SmallVec<[T; 3]>, None); -impl_for_vec!(SmallVec<[T; 4]>, None); -impl_for_vec!(SmallVec<[T; 5]>, None); -impl_for_vec!(SmallVec<[T; 6]>, None); -impl_for_vec!(SmallVec<[T; 7]>, None); -impl_for_vec!(SmallVec<[T; 8]>, None); - -impl Decode for BTreeMap -where - K: Decode + Ord, - V: Decode, -{ - fn is_ssz_fixed_len() -> bool { - false - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - if bytes.is_empty() { - Ok(Self::from_iter(iter::empty())) - } else if <(K, V)>::is_ssz_fixed_len() { - bytes - .chunks(<(K, V)>::ssz_fixed_len()) - .map(<(K, V)>::from_ssz_bytes) - .collect() - } else { - decode_list_of_variable_length_items(bytes, None) - } - } -} - -impl Decode for BTreeSet -where - T: Decode + Ord, -{ - fn is_ssz_fixed_len() -> bool { - false - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - if bytes.is_empty() { - Ok(Self::from_iter(iter::empty())) - } else if T::is_ssz_fixed_len() { - bytes - .chunks(T::ssz_fixed_len()) - .map(T::from_ssz_bytes) - .collect() - } else { - decode_list_of_variable_length_items(bytes, None) - } - } -} - -/// Decodes `bytes` as if it were a list of variable-length items. -/// -/// The `ssz::SszDecoder` can also perform this functionality, however this function is -/// significantly faster as it is optimized to read same-typed items whilst `ssz::SszDecoder` -/// supports reading items of differing types. -pub fn decode_list_of_variable_length_items>( - bytes: &[u8], - max_len: Option, -) -> Result { - if bytes.is_empty() { - return Container::try_from_iter(iter::empty()).map_err(|e| { - DecodeError::BytesInvalid(format!("Error trying to collect empty list: {:?}", e)) - }); - } - - let first_offset = read_offset(bytes)?; - sanitize_offset(first_offset, None, bytes.len(), Some(first_offset))?; - - if first_offset % BYTES_PER_LENGTH_OFFSET != 0 || first_offset < BYTES_PER_LENGTH_OFFSET { - return Err(DecodeError::InvalidListFixedBytesLen(first_offset)); - } - - let num_items = first_offset / BYTES_PER_LENGTH_OFFSET; - - if max_len.map_or(false, |max| num_items > max) { - return Err(DecodeError::BytesInvalid(format!( - "Variable length list of {} items exceeds maximum of {:?}", - num_items, max_len - ))); - } - - let mut offset = first_offset; - process_results( - (1..=num_items).map(|i| { - let slice_option = if i == num_items { - bytes.get(offset..) - } else { - let start = offset; - - let next_offset = read_offset(&bytes[(i * BYTES_PER_LENGTH_OFFSET)..])?; - offset = - sanitize_offset(next_offset, Some(offset), bytes.len(), Some(first_offset))?; - - bytes.get(start..offset) - }; - - let slice = slice_option.ok_or(DecodeError::OutOfBoundsByte { i: offset })?; - T::from_ssz_bytes(slice) - }), - |iter| iter.try_collect(), - )? - .map_err(|e| DecodeError::BytesInvalid(format!("Error collecting into container: {:?}", e))) -} - -#[cfg(test)] -mod tests { - use super::*; - - // Note: decoding of valid bytes is generally tested "indirectly" in the `/tests` dir, by - // encoding then decoding the element. - - #[test] - fn invalid_u8_array_4() { - assert_eq!( - <[u8; 4]>::from_ssz_bytes(&[0; 3]), - Err(DecodeError::InvalidByteLength { - len: 3, - expected: 4 - }) - ); - - assert_eq!( - <[u8; 4]>::from_ssz_bytes(&[0; 5]), - Err(DecodeError::InvalidByteLength { - len: 5, - expected: 4 - }) - ); - } - - #[test] - fn invalid_bool() { - assert_eq!( - bool::from_ssz_bytes(&[0; 2]), - Err(DecodeError::InvalidByteLength { - len: 2, - expected: 1 - }) - ); - - assert_eq!( - bool::from_ssz_bytes(&[]), - Err(DecodeError::InvalidByteLength { - len: 0, - expected: 1 - }) - ); - - if let Err(DecodeError::BytesInvalid(_)) = bool::from_ssz_bytes(&[2]) { - // Success. - } else { - panic!("Did not return error on invalid bool val") - } - } - - #[test] - fn invalid_h256() { - assert_eq!( - H256::from_ssz_bytes(&[0; 33]), - Err(DecodeError::InvalidByteLength { - len: 33, - expected: 32 - }) - ); - - assert_eq!( - H256::from_ssz_bytes(&[0; 31]), - Err(DecodeError::InvalidByteLength { - len: 31, - expected: 32 - }) - ); - } - - #[test] - fn empty_list() { - let vec: Vec> = vec![]; - let bytes = vec.as_ssz_bytes(); - assert!(bytes.is_empty()); - assert_eq!(Vec::from_ssz_bytes(&bytes), Ok(vec),); - } - - #[test] - fn first_length_points_backwards() { - assert_eq!( - >>::from_ssz_bytes(&[0, 0, 0, 0]), - Err(DecodeError::InvalidListFixedBytesLen(0)) - ); - - assert_eq!( - >>::from_ssz_bytes(&[1, 0, 0, 0]), - Err(DecodeError::InvalidListFixedBytesLen(1)) - ); - - assert_eq!( - >>::from_ssz_bytes(&[2, 0, 0, 0]), - Err(DecodeError::InvalidListFixedBytesLen(2)) - ); - - assert_eq!( - >>::from_ssz_bytes(&[3, 0, 0, 0]), - Err(DecodeError::InvalidListFixedBytesLen(3)) - ); - } - - #[test] - fn lengths_are_decreasing() { - assert_eq!( - >>::from_ssz_bytes(&[12, 0, 0, 0, 14, 0, 0, 0, 12, 0, 0, 0, 1, 0, 1, 0]), - Err(DecodeError::OffsetsAreDecreasing(12)) - ); - } - - #[test] - fn awkward_fixed_length_portion() { - assert_eq!( - >>::from_ssz_bytes(&[10, 0, 0, 0, 10, 0, 0, 0, 0, 0]), - Err(DecodeError::InvalidListFixedBytesLen(10)) - ); - } - - #[test] - fn length_out_of_bounds() { - assert_eq!( - >>::from_ssz_bytes(&[5, 0, 0, 0]), - Err(DecodeError::OffsetOutOfBounds(5)) - ); - assert_eq!( - >>::from_ssz_bytes(&[8, 0, 0, 0, 9, 0, 0, 0]), - Err(DecodeError::OffsetOutOfBounds(9)) - ); - assert_eq!( - >>::from_ssz_bytes(&[8, 0, 0, 0, 16, 0, 0, 0]), - Err(DecodeError::OffsetOutOfBounds(16)) - ); - } - - #[test] - fn vec_of_vec_of_u16() { - assert_eq!( - >>::from_ssz_bytes(&[4, 0, 0, 0]), - Ok(vec![vec![]]) - ); - - assert_eq!( - >::from_ssz_bytes(&[0, 0, 1, 0, 2, 0, 3, 0]), - Ok(vec![0, 1, 2, 3]) - ); - assert_eq!(::from_ssz_bytes(&[16, 0]), Ok(16)); - assert_eq!(::from_ssz_bytes(&[0, 1]), Ok(256)); - assert_eq!(::from_ssz_bytes(&[255, 255]), Ok(65535)); - - assert_eq!( - ::from_ssz_bytes(&[255]), - Err(DecodeError::InvalidByteLength { - len: 1, - expected: 2 - }) - ); - - assert_eq!( - ::from_ssz_bytes(&[]), - Err(DecodeError::InvalidByteLength { - len: 0, - expected: 2 - }) - ); - - assert_eq!( - ::from_ssz_bytes(&[0, 1, 2]), - Err(DecodeError::InvalidByteLength { - len: 3, - expected: 2 - }) - ); - } - - #[test] - fn vec_of_u16() { - assert_eq!(>::from_ssz_bytes(&[0, 0, 0, 0]), Ok(vec![0, 0])); - assert_eq!( - >::from_ssz_bytes(&[0, 0, 1, 0, 2, 0, 3, 0]), - Ok(vec![0, 1, 2, 3]) - ); - assert_eq!(::from_ssz_bytes(&[16, 0]), Ok(16)); - assert_eq!(::from_ssz_bytes(&[0, 1]), Ok(256)); - assert_eq!(::from_ssz_bytes(&[255, 255]), Ok(65535)); - - assert_eq!( - ::from_ssz_bytes(&[255]), - Err(DecodeError::InvalidByteLength { - len: 1, - expected: 2 - }) - ); - - assert_eq!( - ::from_ssz_bytes(&[]), - Err(DecodeError::InvalidByteLength { - len: 0, - expected: 2 - }) - ); - - assert_eq!( - ::from_ssz_bytes(&[0, 1, 2]), - Err(DecodeError::InvalidByteLength { - len: 3, - expected: 2 - }) - ); - } - - #[test] - fn u16() { - assert_eq!(::from_ssz_bytes(&[0, 0]), Ok(0)); - assert_eq!(::from_ssz_bytes(&[16, 0]), Ok(16)); - assert_eq!(::from_ssz_bytes(&[0, 1]), Ok(256)); - assert_eq!(::from_ssz_bytes(&[255, 255]), Ok(65535)); - - assert_eq!( - ::from_ssz_bytes(&[255]), - Err(DecodeError::InvalidByteLength { - len: 1, - expected: 2 - }) - ); - - assert_eq!( - ::from_ssz_bytes(&[]), - Err(DecodeError::InvalidByteLength { - len: 0, - expected: 2 - }) - ); - - assert_eq!( - ::from_ssz_bytes(&[0, 1, 2]), - Err(DecodeError::InvalidByteLength { - len: 3, - expected: 2 - }) - ); - } - - #[test] - fn tuple() { - assert_eq!(<(u16, u16)>::from_ssz_bytes(&[0, 0, 0, 0]), Ok((0, 0))); - assert_eq!(<(u16, u16)>::from_ssz_bytes(&[16, 0, 17, 0]), Ok((16, 17))); - assert_eq!(<(u16, u16)>::from_ssz_bytes(&[0, 1, 2, 0]), Ok((256, 2))); - assert_eq!( - <(u16, u16)>::from_ssz_bytes(&[255, 255, 0, 0]), - Ok((65535, 0)) - ); - } -} diff --git a/consensus/ssz/src/decode/try_from_iter.rs b/consensus/ssz/src/decode/try_from_iter.rs deleted file mode 100644 index 1ff89a107f4..00000000000 --- a/consensus/ssz/src/decode/try_from_iter.rs +++ /dev/null @@ -1,103 +0,0 @@ -use smallvec::SmallVec; -use std::collections::{BTreeMap, BTreeSet}; -use std::convert::Infallible; -use std::fmt::Debug; - -/// Partial variant of `std::iter::FromIterator`. -/// -/// This trait is implemented for types which can be constructed from an iterator of decoded SSZ -/// values, but which may refuse values once a length limit is reached. -pub trait TryFromIter: Sized { - type Error: Debug; - - fn try_from_iter(iter: I) -> Result - where - I: IntoIterator; -} - -// It would be nice to be able to do a blanket impl, e.g. -// -// `impl TryFromIter for C where C: FromIterator` -// -// However this runs into trait coherence issues due to the type parameter `T` on `TryFromIter`. -// -// E.g. If we added an impl downstream for `List` then another crate downstream of that -// could legally add an impl of `FromIterator for List` which would create -// two conflicting implementations for `List`. Hence the `List` impl is disallowed -// by the compiler in the presence of the blanket impl. That's obviously annoying, so we opt to -// abandon the blanket impl in favour of impls for selected types. -impl TryFromIter for Vec { - type Error = Infallible; - - fn try_from_iter(values: I) -> Result - where - I: IntoIterator, - { - // Pre-allocate the expected size of the Vec, which is parsed from the SSZ input bytes as - // `num_items`. This length has already been checked to be less than or equal to the type's - // maximum length in `decode_list_of_variable_length_items`. - let iter = values.into_iter(); - let (_, opt_max_len) = iter.size_hint(); - let mut vec = Vec::with_capacity(opt_max_len.unwrap_or(0)); - vec.extend(iter); - Ok(vec) - } -} - -impl TryFromIter for SmallVec<[T; N]> { - type Error = Infallible; - - fn try_from_iter(iter: I) -> Result - where - I: IntoIterator, - { - Ok(Self::from_iter(iter)) - } -} - -impl TryFromIter<(K, V)> for BTreeMap -where - K: Ord, -{ - type Error = Infallible; - - fn try_from_iter(iter: I) -> Result - where - I: IntoIterator, - { - Ok(Self::from_iter(iter)) - } -} - -impl TryFromIter for BTreeSet -where - T: Ord, -{ - type Error = Infallible; - - fn try_from_iter(iter: I) -> Result - where - I: IntoIterator, - { - Ok(Self::from_iter(iter)) - } -} - -/// Partial variant of `collect`. -pub trait TryCollect: Iterator { - fn try_collect(self) -> Result - where - C: TryFromIter; -} - -impl TryCollect for I -where - I: Iterator, -{ - fn try_collect(self) -> Result - where - C: TryFromIter, - { - C::try_from_iter(self) - } -} diff --git a/consensus/ssz/src/encode.rs b/consensus/ssz/src/encode.rs deleted file mode 100644 index a46ef80e05c..00000000000 --- a/consensus/ssz/src/encode.rs +++ /dev/null @@ -1,196 +0,0 @@ -use super::*; - -mod impls; - -/// Provides SSZ encoding (serialization) via the `as_ssz_bytes(&self)` method. -/// -/// See `examples/` for manual implementations or the crate root for implementations using -/// `#[derive(Encode)]`. -pub trait Encode { - /// Returns `true` if this object has a fixed-length. - /// - /// I.e., there are no variable length items in this object or any of it's contained objects. - fn is_ssz_fixed_len() -> bool; - - /// Append the encoding `self` to `buf`. - /// - /// Note, variable length objects need only to append their "variable length" portion, they do - /// not need to provide their offset. - fn ssz_append(&self, buf: &mut Vec); - - /// The number of bytes this object occupies in the fixed-length portion of the SSZ bytes. - /// - /// By default, this is set to `BYTES_PER_LENGTH_OFFSET` which is suitable for variable length - /// objects, but not fixed-length objects. Fixed-length objects _must_ return a value which - /// represents their length. - fn ssz_fixed_len() -> usize { - BYTES_PER_LENGTH_OFFSET - } - - /// Returns the size (in bytes) when `self` is serialized. - /// - /// Returns the same value as `self.as_ssz_bytes().len()` but this method is significantly more - /// efficient. - fn ssz_bytes_len(&self) -> usize; - - /// Returns the full-form encoding of this object. - /// - /// The default implementation of this method should suffice for most cases. - fn as_ssz_bytes(&self) -> Vec { - let mut buf = vec![]; - - self.ssz_append(&mut buf); - - buf - } -} - -/// Allow for encoding an ordered series of distinct or indistinct objects as SSZ bytes. -/// -/// **You must call `finalize(..)` after the final `append(..)` call** to ensure the bytes are -/// written to `buf`. -/// -/// ## Example -/// -/// Use `SszEncoder` to produce identical output to `foo.as_ssz_bytes()`: -/// -/// ```rust -/// use ssz_derive::{Encode, Decode}; -/// use ssz::{Decode, Encode, SszEncoder}; -/// -/// #[derive(PartialEq, Debug, Encode, Decode)] -/// struct Foo { -/// a: u64, -/// b: Vec, -/// } -/// -/// fn ssz_encode_example() { -/// let foo = Foo { -/// a: 42, -/// b: vec![1, 3, 3, 7] -/// }; -/// -/// let mut buf: Vec = vec![]; -/// let offset = ::ssz_fixed_len() + as Encode>::ssz_fixed_len(); -/// -/// let mut encoder = SszEncoder::container(&mut buf, offset); -/// -/// encoder.append(&foo.a); -/// encoder.append(&foo.b); -/// -/// encoder.finalize(); -/// -/// assert_eq!(foo.as_ssz_bytes(), buf); -/// } -/// -/// ``` -pub struct SszEncoder<'a> { - offset: usize, - buf: &'a mut Vec, - variable_bytes: Vec, -} - -impl<'a> SszEncoder<'a> { - /// Instantiate a new encoder for encoding a SSZ container. - pub fn container(buf: &'a mut Vec, num_fixed_bytes: usize) -> Self { - buf.reserve(num_fixed_bytes); - - Self { - offset: num_fixed_bytes, - buf, - variable_bytes: vec![], - } - } - - /// Append some `item` to the SSZ bytes. - pub fn append(&mut self, item: &T) { - self.append_parameterized(T::is_ssz_fixed_len(), |buf| item.ssz_append(buf)) - } - - /// Uses `ssz_append` to append the encoding of some item to the SSZ bytes. - pub fn append_parameterized(&mut self, is_ssz_fixed_len: bool, ssz_append: F) - where - F: Fn(&mut Vec), - { - if is_ssz_fixed_len { - ssz_append(self.buf); - } else { - self.buf - .extend_from_slice(&encode_length(self.offset + self.variable_bytes.len())); - - ssz_append(&mut self.variable_bytes); - } - } - - /// Write the variable bytes to `self.bytes`. - /// - /// This method must be called after the final `append(..)` call when serializing - /// variable-length items. - pub fn finalize(&mut self) -> &mut Vec { - self.buf.append(&mut self.variable_bytes); - - self.buf - } -} - -/// Encode `len` as a little-endian byte array of `BYTES_PER_LENGTH_OFFSET` length. -/// -/// If `len` is larger than `2 ^ BYTES_PER_LENGTH_OFFSET`, a `debug_assert` is raised. -pub fn encode_length(len: usize) -> [u8; BYTES_PER_LENGTH_OFFSET] { - // Note: it is possible for `len` to be larger than what can be encoded in - // `BYTES_PER_LENGTH_OFFSET` bytes, triggering this debug assertion. - // - // These are the alternatives to using a `debug_assert` here: - // - // 1. Use `assert`. - // 2. Push an error to the caller (e.g., `Option` or `Result`). - // 3. Ignore it completely. - // - // I have avoided (1) because it's basically a choice between "produce invalid SSZ" or "kill - // the entire program". I figure it may be possible for an attacker to trigger this assert and - // take the program down -- I think producing invalid SSZ is a better option than this. - // - // I have avoided (2) because this error will need to be propagated upstream, making encoding a - // function which may fail. I don't think this is ergonomic and the upsides don't outweigh the - // downsides. - // - // I figure a `debug_assertion` is better than (3) as it will give us a change to detect the - // error during testing. - // - // If you have a different opinion, feel free to start an issue and tag @paulhauner. - debug_assert!(len <= MAX_LENGTH_VALUE); - - let mut bytes = [0; BYTES_PER_LENGTH_OFFSET]; - bytes.copy_from_slice(&len.to_le_bytes()[0..BYTES_PER_LENGTH_OFFSET]); - bytes -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_encode_length() { - assert_eq!(encode_length(0), [0; 4]); - - assert_eq!(encode_length(1), [1, 0, 0, 0]); - - assert_eq!( - encode_length(MAX_LENGTH_VALUE), - [255; BYTES_PER_LENGTH_OFFSET] - ); - } - - #[test] - #[should_panic] - #[cfg(debug_assertions)] - fn test_encode_length_above_max_debug_panics() { - encode_length(MAX_LENGTH_VALUE + 1); - } - - #[test] - #[cfg(not(debug_assertions))] - fn test_encode_length_above_max_not_debug_does_not_panic() { - assert_eq!(&encode_length(MAX_LENGTH_VALUE + 1)[..], &[0; 4]); - } -} diff --git a/consensus/ssz/src/encode/impls.rs b/consensus/ssz/src/encode/impls.rs deleted file mode 100644 index 8c609d93976..00000000000 --- a/consensus/ssz/src/encode/impls.rs +++ /dev/null @@ -1,633 +0,0 @@ -use super::*; -use core::num::NonZeroUsize; -use ethereum_types::{H160, H256, U128, U256}; -use smallvec::SmallVec; -use std::collections::{BTreeMap, BTreeSet}; -use std::sync::Arc; - -macro_rules! impl_encodable_for_uint { - ($type: ident, $bit_size: expr) => { - impl Encode for $type { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - $bit_size / 8 - } - - fn ssz_bytes_len(&self) -> usize { - $bit_size / 8 - } - - fn ssz_append(&self, buf: &mut Vec) { - buf.extend_from_slice(&self.to_le_bytes()); - } - } - }; -} - -impl_encodable_for_uint!(u8, 8); -impl_encodable_for_uint!(u16, 16); -impl_encodable_for_uint!(u32, 32); -impl_encodable_for_uint!(u64, 64); - -#[cfg(target_pointer_width = "32")] -impl_encodable_for_uint!(usize, 32); - -#[cfg(target_pointer_width = "64")] -impl_encodable_for_uint!(usize, 64); - -// Based on the `tuple_impls` macro from the standard library. -macro_rules! impl_encode_for_tuples { - ($( - $Tuple:ident { - $(($idx:tt) -> $T:ident)+ - } - )+) => { - $( - impl<$($T: Encode),+> Encode for ($($T,)+) { - fn is_ssz_fixed_len() -> bool { - $( - <$T as Encode>::is_ssz_fixed_len() && - )* - true - } - - fn ssz_fixed_len() -> usize { - if ::is_ssz_fixed_len() { - $( - <$T as Encode>::ssz_fixed_len() + - )* - 0 - } else { - BYTES_PER_LENGTH_OFFSET - } - } - - fn ssz_bytes_len(&self) -> usize { - if ::is_ssz_fixed_len() { - ::ssz_fixed_len() - } else { - let mut len = 0; - $( - len += if <$T as Encode>::is_ssz_fixed_len() { - <$T as Encode>::ssz_fixed_len() - } else { - BYTES_PER_LENGTH_OFFSET + - self.$idx.ssz_bytes_len() - }; - )* - len - } - } - - fn ssz_append(&self, buf: &mut Vec) { - let offset = $( - <$T as Encode>::ssz_fixed_len() + - )* - 0; - - let mut encoder = SszEncoder::container(buf, offset); - - $( - encoder.append(&self.$idx); - )* - - encoder.finalize(); - } - } - )+ - } -} - -impl_encode_for_tuples! { - Tuple2 { - (0) -> A - (1) -> B - } - Tuple3 { - (0) -> A - (1) -> B - (2) -> C - } - Tuple4 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - } - Tuple5 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - } - Tuple6 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - } - Tuple7 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - (6) -> G - } - Tuple8 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - (6) -> G - (7) -> H - } - Tuple9 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - (6) -> G - (7) -> H - (8) -> I - } - Tuple10 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - (6) -> G - (7) -> H - (8) -> I - (9) -> J - } - Tuple11 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - (6) -> G - (7) -> H - (8) -> I - (9) -> J - (10) -> K - } - Tuple12 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - (6) -> G - (7) -> H - (8) -> I - (9) -> J - (10) -> K - (11) -> L - } -} - -impl Encode for Option { - fn is_ssz_fixed_len() -> bool { - false - } - fn ssz_append(&self, buf: &mut Vec) { - match self { - Option::None => { - let union_selector: u8 = 0u8; - buf.push(union_selector); - } - Option::Some(ref inner) => { - let union_selector: u8 = 1u8; - buf.push(union_selector); - inner.ssz_append(buf); - } - } - } - fn ssz_bytes_len(&self) -> usize { - match self { - Option::None => 1usize, - Option::Some(ref inner) => inner - .ssz_bytes_len() - .checked_add(1) - .expect("encoded length must be less than usize::max_value"), - } - } -} - -impl Encode for Arc { - fn is_ssz_fixed_len() -> bool { - T::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - T::ssz_fixed_len() - } - - fn ssz_append(&self, buf: &mut Vec) { - self.as_ref().ssz_append(buf) - } - - fn ssz_bytes_len(&self) -> usize { - self.as_ref().ssz_bytes_len() - } -} - -// Encode transparently through references. -impl<'a, T: Encode> Encode for &'a T { - fn is_ssz_fixed_len() -> bool { - T::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - T::ssz_fixed_len() - } - - fn ssz_append(&self, buf: &mut Vec) { - T::ssz_append(self, buf) - } - - fn ssz_bytes_len(&self) -> usize { - T::ssz_bytes_len(self) - } -} - -/// Compute the encoded length of a vector-like sequence of `T`. -pub fn sequence_ssz_bytes_len(iter: I) -> usize -where - I: Iterator + ExactSizeIterator, - T: Encode, -{ - // Compute length before doing any iteration. - let length = iter.len(); - if ::is_ssz_fixed_len() { - ::ssz_fixed_len() * length - } else { - let mut len = iter.map(|item| item.ssz_bytes_len()).sum(); - len += BYTES_PER_LENGTH_OFFSET * length; - len - } -} - -/// Encode a vector-like sequence of `T`. -pub fn sequence_ssz_append(iter: I, buf: &mut Vec) -where - I: Iterator + ExactSizeIterator, - T: Encode, -{ - if T::is_ssz_fixed_len() { - buf.reserve(T::ssz_fixed_len() * iter.len()); - - for item in iter { - item.ssz_append(buf); - } - } else { - let mut encoder = SszEncoder::container(buf, iter.len() * BYTES_PER_LENGTH_OFFSET); - - for item in iter { - encoder.append(&item); - } - - encoder.finalize(); - } -} - -macro_rules! impl_for_vec { - ($type: ty) => { - impl Encode for $type { - fn is_ssz_fixed_len() -> bool { - false - } - - fn ssz_bytes_len(&self) -> usize { - sequence_ssz_bytes_len(self.iter()) - } - - fn ssz_append(&self, buf: &mut Vec) { - sequence_ssz_append(self.iter(), buf) - } - } - }; -} - -impl_for_vec!(Vec); -impl_for_vec!(SmallVec<[T; 1]>); -impl_for_vec!(SmallVec<[T; 2]>); -impl_for_vec!(SmallVec<[T; 3]>); -impl_for_vec!(SmallVec<[T; 4]>); -impl_for_vec!(SmallVec<[T; 5]>); -impl_for_vec!(SmallVec<[T; 6]>); -impl_for_vec!(SmallVec<[T; 7]>); -impl_for_vec!(SmallVec<[T; 8]>); - -impl Encode for BTreeMap -where - K: Encode + Ord, - V: Encode, -{ - fn is_ssz_fixed_len() -> bool { - false - } - - fn ssz_bytes_len(&self) -> usize { - sequence_ssz_bytes_len(self.iter()) - } - - fn ssz_append(&self, buf: &mut Vec) { - sequence_ssz_append(self.iter(), buf) - } -} - -impl Encode for BTreeSet -where - T: Encode + Ord, -{ - fn is_ssz_fixed_len() -> bool { - false - } - - fn ssz_bytes_len(&self) -> usize { - sequence_ssz_bytes_len(self.iter()) - } - - fn ssz_append(&self, buf: &mut Vec) { - sequence_ssz_append(self.iter(), buf) - } -} - -impl Encode for bool { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - 1 - } - - fn ssz_bytes_len(&self) -> usize { - 1 - } - - fn ssz_append(&self, buf: &mut Vec) { - buf.extend_from_slice(&(*self as u8).to_le_bytes()); - } -} - -impl Encode for NonZeroUsize { - fn is_ssz_fixed_len() -> bool { - ::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - ::ssz_fixed_len() - } - - fn ssz_bytes_len(&self) -> usize { - std::mem::size_of::() - } - - fn ssz_append(&self, buf: &mut Vec) { - self.get().ssz_append(buf) - } -} - -impl Encode for H160 { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - 20 - } - - fn ssz_bytes_len(&self) -> usize { - 20 - } - - fn ssz_append(&self, buf: &mut Vec) { - buf.extend_from_slice(self.as_bytes()); - } -} - -impl Encode for H256 { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - 32 - } - - fn ssz_bytes_len(&self) -> usize { - 32 - } - - fn ssz_append(&self, buf: &mut Vec) { - buf.extend_from_slice(self.as_bytes()); - } -} - -impl Encode for U256 { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - 32 - } - - fn ssz_bytes_len(&self) -> usize { - 32 - } - - fn ssz_append(&self, buf: &mut Vec) { - let n = ::ssz_fixed_len(); - let s = buf.len(); - - buf.resize(s + n, 0); - self.to_little_endian(&mut buf[s..]); - } -} - -impl Encode for U128 { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - 16 - } - - fn ssz_bytes_len(&self) -> usize { - 16 - } - - fn ssz_append(&self, buf: &mut Vec) { - let n = ::ssz_fixed_len(); - let s = buf.len(); - - buf.resize(s + n, 0); - self.to_little_endian(&mut buf[s..]); - } -} - -macro_rules! impl_encodable_for_u8_array { - ($len: expr) => { - impl Encode for [u8; $len] { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - $len - } - - fn ssz_bytes_len(&self) -> usize { - $len - } - - fn ssz_append(&self, buf: &mut Vec) { - buf.extend_from_slice(&self[..]); - } - } - }; -} - -impl_encodable_for_u8_array!(4); -impl_encodable_for_u8_array!(32); -impl_encodable_for_u8_array!(48); - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn vec_of_u8() { - let vec: Vec = vec![]; - assert_eq!(vec.as_ssz_bytes(), vec![]); - - let vec: Vec = vec![1]; - assert_eq!(vec.as_ssz_bytes(), vec![1]); - - let vec: Vec = vec![0, 1, 2, 3]; - assert_eq!(vec.as_ssz_bytes(), vec![0, 1, 2, 3]); - } - - #[test] - fn vec_of_vec_of_u8() { - let vec: Vec> = vec![]; - assert_eq!(vec.as_ssz_bytes(), vec![]); - - let vec: Vec> = vec![vec![]]; - assert_eq!(vec.as_ssz_bytes(), vec![4, 0, 0, 0]); - - let vec: Vec> = vec![vec![], vec![]]; - assert_eq!(vec.as_ssz_bytes(), vec![8, 0, 0, 0, 8, 0, 0, 0]); - - let vec: Vec> = vec![vec![0, 1, 2], vec![11, 22, 33]]; - assert_eq!( - vec.as_ssz_bytes(), - vec![8, 0, 0, 0, 11, 0, 0, 0, 0, 1, 2, 11, 22, 33] - ); - } - - #[test] - fn ssz_encode_u8() { - assert_eq!(0_u8.as_ssz_bytes(), vec![0]); - assert_eq!(1_u8.as_ssz_bytes(), vec![1]); - assert_eq!(100_u8.as_ssz_bytes(), vec![100]); - assert_eq!(255_u8.as_ssz_bytes(), vec![255]); - } - - #[test] - fn ssz_encode_u16() { - assert_eq!(1_u16.as_ssz_bytes(), vec![1, 0]); - assert_eq!(100_u16.as_ssz_bytes(), vec![100, 0]); - assert_eq!((1_u16 << 8).as_ssz_bytes(), vec![0, 1]); - assert_eq!(65535_u16.as_ssz_bytes(), vec![255, 255]); - } - - #[test] - fn ssz_encode_u32() { - assert_eq!(1_u32.as_ssz_bytes(), vec![1, 0, 0, 0]); - assert_eq!(100_u32.as_ssz_bytes(), vec![100, 0, 0, 0]); - assert_eq!((1_u32 << 16).as_ssz_bytes(), vec![0, 0, 1, 0]); - assert_eq!((1_u32 << 24).as_ssz_bytes(), vec![0, 0, 0, 1]); - assert_eq!((!0_u32).as_ssz_bytes(), vec![255, 255, 255, 255]); - } - - #[test] - fn ssz_encode_u64() { - assert_eq!(1_u64.as_ssz_bytes(), vec![1, 0, 0, 0, 0, 0, 0, 0]); - assert_eq!( - (!0_u64).as_ssz_bytes(), - vec![255, 255, 255, 255, 255, 255, 255, 255] - ); - } - - #[test] - fn ssz_encode_usize() { - assert_eq!(1_usize.as_ssz_bytes(), vec![1, 0, 0, 0, 0, 0, 0, 0]); - assert_eq!( - (!0_usize).as_ssz_bytes(), - vec![255, 255, 255, 255, 255, 255, 255, 255] - ); - } - - #[test] - fn ssz_encode_option_u8() { - let opt: Option = None; - assert_eq!(opt.as_ssz_bytes(), vec![0]); - let opt: Option = Some(2); - assert_eq!(opt.as_ssz_bytes(), vec![1, 2]); - } - - #[test] - fn ssz_encode_bool() { - assert_eq!(true.as_ssz_bytes(), vec![1]); - assert_eq!(false.as_ssz_bytes(), vec![0]); - } - - #[test] - fn ssz_encode_h256() { - assert_eq!(H256::from(&[0; 32]).as_ssz_bytes(), vec![0; 32]); - assert_eq!(H256::from(&[1; 32]).as_ssz_bytes(), vec![1; 32]); - - let bytes = vec![ - 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]; - - assert_eq!(H256::from_slice(&bytes).as_ssz_bytes(), bytes); - } - - #[test] - fn ssz_encode_u8_array_4() { - assert_eq!([0, 0, 0, 0].as_ssz_bytes(), vec![0; 4]); - assert_eq!([1, 0, 0, 0].as_ssz_bytes(), vec![1, 0, 0, 0]); - assert_eq!([1, 2, 3, 4].as_ssz_bytes(), vec![1, 2, 3, 4]); - } - - #[test] - fn tuple() { - assert_eq!((10u8, 11u8).as_ssz_bytes(), vec![10, 11]); - assert_eq!((10u32, 11u8).as_ssz_bytes(), vec![10, 0, 0, 0, 11]); - assert_eq!((10u8, 11u8, 12u8).as_ssz_bytes(), vec![10, 11, 12]); - } -} diff --git a/consensus/ssz/src/legacy.rs b/consensus/ssz/src/legacy.rs deleted file mode 100644 index 4953db057de..00000000000 --- a/consensus/ssz/src/legacy.rs +++ /dev/null @@ -1,265 +0,0 @@ -//! Provides a "legacy" version of SSZ encoding for `Option where T: Encode + Decode`. -//! -//! The SSZ specification changed in 2021 to use a 1-byte union selector, instead of a 4-byte one -//! which was used in the Lighthouse database. -//! -//! Users can use the `four_byte_option_impl` macro to define a module that can be used with the -//! `#[ssz(with = "module")]`. -//! -//! ## Example -//! -//! ```rust -//! use ssz_derive::{Encode, Decode}; -//! use ssz::four_byte_option_impl; -//! -//! four_byte_option_impl!(impl_for_u64, u64); -//! -//! #[derive(Encode, Decode)] -//! struct Foo { -//! #[ssz(with = "impl_for_u64")] -//! a: Option, -//! } -//! ``` - -use crate::*; - -#[macro_export] -macro_rules! four_byte_option_impl { - ($mod_name: ident, $type: ty) => { - #[allow(dead_code)] - mod $mod_name { - use super::*; - - pub mod encode { - use super::*; - #[allow(unused_imports)] - use ssz::*; - - pub fn is_ssz_fixed_len() -> bool { - false - } - - pub fn ssz_fixed_len() -> usize { - BYTES_PER_LENGTH_OFFSET - } - - pub fn ssz_bytes_len(opt: &Option<$type>) -> usize { - if let Some(some) = opt { - let len = if <$type as Encode>::is_ssz_fixed_len() { - <$type as Encode>::ssz_fixed_len() - } else { - <$type as Encode>::ssz_bytes_len(some) - }; - len + BYTES_PER_LENGTH_OFFSET - } else { - BYTES_PER_LENGTH_OFFSET - } - } - - pub fn ssz_append(opt: &Option<$type>, buf: &mut Vec) { - match opt { - None => buf.extend_from_slice(&legacy::encode_four_byte_union_selector(0)), - Some(t) => { - buf.extend_from_slice(&legacy::encode_four_byte_union_selector(1)); - t.ssz_append(buf); - } - } - } - - pub fn as_ssz_bytes(opt: &Option<$type>) -> Vec { - let mut buf = vec![]; - - ssz_append(opt, &mut buf); - - buf - } - } - - pub mod decode { - use super::*; - #[allow(unused_imports)] - use ssz::*; - - pub fn is_ssz_fixed_len() -> bool { - false - } - - pub fn ssz_fixed_len() -> usize { - BYTES_PER_LENGTH_OFFSET - } - - pub fn from_ssz_bytes(bytes: &[u8]) -> Result, DecodeError> { - if bytes.len() < BYTES_PER_LENGTH_OFFSET { - return Err(DecodeError::InvalidByteLength { - len: bytes.len(), - expected: BYTES_PER_LENGTH_OFFSET, - }); - } - - let (index_bytes, value_bytes) = bytes.split_at(BYTES_PER_LENGTH_OFFSET); - - let index = legacy::read_four_byte_union_selector(index_bytes)?; - if index == 0 { - Ok(None) - } else if index == 1 { - Ok(Some(<$type as ssz::Decode>::from_ssz_bytes(value_bytes)?)) - } else { - Err(DecodeError::BytesInvalid(format!( - "{} is not a valid union index for Option", - index - ))) - } - } - } - } - }; -} - -pub fn encode_four_byte_union_selector(selector: usize) -> [u8; BYTES_PER_LENGTH_OFFSET] { - encode_length(selector) -} - -pub fn read_four_byte_union_selector(bytes: &[u8]) -> Result { - read_offset(bytes) -} - -#[cfg(test)] -mod test { - use super::*; - use crate as ssz; - use ssz_derive::{Decode, Encode}; - - type VecU16 = Vec; - - four_byte_option_impl!(impl_u16, u16); - four_byte_option_impl!(impl_vec_u16, VecU16); - - #[test] - fn ssz_encode_option_u16() { - let item = Some(65535_u16); - let bytes = vec![1, 0, 0, 0, 255, 255]; - assert_eq!(impl_u16::encode::as_ssz_bytes(&item), bytes); - assert_eq!(impl_u16::decode::from_ssz_bytes(&bytes).unwrap(), item); - - let item = None; - let bytes = vec![0, 0, 0, 0]; - assert_eq!(impl_u16::encode::as_ssz_bytes(&item), bytes); - assert_eq!(impl_u16::decode::from_ssz_bytes(&bytes).unwrap(), None); - } - - #[test] - fn ssz_encode_option_vec_u16() { - let item = Some(vec![0_u16, 1]); - let bytes = vec![1, 0, 0, 0, 0, 0, 1, 0]; - assert_eq!(impl_vec_u16::encode::as_ssz_bytes(&item), bytes); - assert_eq!(impl_vec_u16::decode::from_ssz_bytes(&bytes).unwrap(), item); - - let item = None; - let bytes = vec![0, 0, 0, 0]; - assert_eq!(impl_vec_u16::encode::as_ssz_bytes(&item), bytes); - assert_eq!(impl_vec_u16::decode::from_ssz_bytes(&bytes).unwrap(), item); - } - - fn round_trip(items: Vec) { - for item in items { - let encoded = &item.as_ssz_bytes(); - assert_eq!(item.ssz_bytes_len(), encoded.len()); - assert_eq!(T::from_ssz_bytes(encoded), Ok(item)); - } - } - - #[derive(Debug, PartialEq, Encode, Decode)] - struct TwoVariableLenOptions { - a: u16, - #[ssz(with = "impl_u16")] - b: Option, - #[ssz(with = "impl_vec_u16")] - c: Option>, - #[ssz(with = "impl_vec_u16")] - d: Option>, - } - - #[test] - #[allow(clippy::zero_prefixed_literal)] - fn two_variable_len_options_encoding() { - let s = TwoVariableLenOptions { - a: 42, - b: None, - c: Some(vec![0]), - d: None, - }; - - let bytes = vec![ - // 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 - // | option | offset | offset | option = vec![ - TwoVariableLenOptions { - a: 42, - b: Some(12), - c: Some(vec![0]), - d: Some(vec![1]), - }, - TwoVariableLenOptions { - a: 42, - b: Some(12), - c: Some(vec![0]), - d: None, - }, - TwoVariableLenOptions { - a: 42, - b: None, - c: Some(vec![0]), - d: None, - }, - TwoVariableLenOptions { - a: 42, - b: None, - c: None, - d: None, - }, - ]; - - round_trip(vec); - } - - #[test] - fn tuple_u8_u16() { - let vec: Vec<(u8, u16)> = vec![ - (0, 0), - (0, 1), - (1, 0), - (u8::max_value(), u16::max_value()), - (0, u16::max_value()), - (u8::max_value(), 0), - (42, 12301), - ]; - - round_trip(vec); - } - - #[test] - fn tuple_vec_vec() { - let vec: Vec<(u64, Vec, Vec>)> = vec![ - (0, vec![], vec![vec![]]), - (99, vec![101], vec![vec![], vec![]]), - ( - 42, - vec![12, 13, 14], - vec![vec![99, 98, 97, 96], vec![42, 44, 46, 48, 50]], - ), - ]; - - round_trip(vec); - } -} diff --git a/consensus/ssz/src/lib.rs b/consensus/ssz/src/lib.rs deleted file mode 100644 index e71157a3eed..00000000000 --- a/consensus/ssz/src/lib.rs +++ /dev/null @@ -1,71 +0,0 @@ -//! Provides encoding (serialization) and decoding (deserialization) in the SimpleSerialize (SSZ) -//! format designed for use in Ethereum 2.0. -//! -//! Adheres to the Ethereum 2.0 [SSZ -//! specification](https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/ssz/simple-serialize.md) -//! at v0.12.1. -//! -//! ## Example -//! -//! ```rust -//! use ssz_derive::{Encode, Decode}; -//! use ssz::{Decode, Encode}; -//! -//! #[derive(PartialEq, Debug, Encode, Decode)] -//! struct Foo { -//! a: u64, -//! b: Vec, -//! } -//! -//! fn ssz_encode_decode_example() { -//! let foo = Foo { -//! a: 42, -//! b: vec![1, 3, 3, 7] -//! }; -//! -//! let ssz_bytes: Vec = foo.as_ssz_bytes(); -//! -//! let decoded_foo = Foo::from_ssz_bytes(&ssz_bytes).unwrap(); -//! -//! assert_eq!(foo, decoded_foo); -//! } -//! -//! ``` -//! -//! See `examples/` for manual implementations of the `Encode` and `Decode` traits. - -mod decode; -mod encode; -pub mod legacy; -mod union_selector; - -pub use decode::{ - impls::decode_list_of_variable_length_items, read_offset, split_union_bytes, - try_from_iter::TryFromIter, Decode, DecodeError, SszDecoder, SszDecoderBuilder, -}; -pub use encode::{encode_length, Encode, SszEncoder}; -pub use union_selector::UnionSelector; - -/// The number of bytes used to represent an offset. -pub const BYTES_PER_LENGTH_OFFSET: usize = 4; -/// The maximum value that can be represented using `BYTES_PER_LENGTH_OFFSET`. -#[cfg(target_pointer_width = "32")] -pub const MAX_LENGTH_VALUE: usize = (std::u32::MAX >> (8 * (4 - BYTES_PER_LENGTH_OFFSET))) as usize; -#[cfg(target_pointer_width = "64")] -pub const MAX_LENGTH_VALUE: usize = (std::u64::MAX >> (8 * (8 - BYTES_PER_LENGTH_OFFSET))) as usize; - -/// The number of bytes used to indicate the variant of a union. -pub const BYTES_PER_UNION_SELECTOR: usize = 1; -/// The highest possible union selector value (higher values are reserved for backwards compatible -/// extensions). -pub const MAX_UNION_SELECTOR: u8 = 127; - -/// Convenience function to SSZ encode an object supporting ssz::Encode. -/// -/// Equivalent to `val.as_ssz_bytes()`. -pub fn ssz_encode(val: &T) -> Vec -where - T: Encode, -{ - val.as_ssz_bytes() -} diff --git a/consensus/ssz/src/union_selector.rs b/consensus/ssz/src/union_selector.rs deleted file mode 100644 index 18bab094aab..00000000000 --- a/consensus/ssz/src/union_selector.rs +++ /dev/null @@ -1,29 +0,0 @@ -use crate::*; - -/// Provides the one-byte "selector" from the SSZ union specification: -/// -/// https://github.com/ethereum/consensus-specs/blob/v1.1.0-beta.3/ssz/simple-serialize.md#union -#[derive(Copy, Clone)] -pub struct UnionSelector(u8); - -impl From for u8 { - fn from(union_selector: UnionSelector) -> u8 { - union_selector.0 - } -} - -impl PartialEq for UnionSelector { - fn eq(&self, other: &u8) -> bool { - self.0 == *other - } -} - -impl UnionSelector { - /// Instantiate `self`, returning an error if `selector > MAX_UNION_SELECTOR`. - pub fn new(selector: u8) -> Result { - Some(selector) - .filter(|_| selector <= MAX_UNION_SELECTOR) - .map(Self) - .ok_or(DecodeError::UnionSelectorInvalid(selector)) - } -} diff --git a/consensus/ssz/tests/tests.rs b/consensus/ssz/tests/tests.rs deleted file mode 100644 index f52d2c5cdfe..00000000000 --- a/consensus/ssz/tests/tests.rs +++ /dev/null @@ -1,390 +0,0 @@ -use ethereum_types::H256; -use ssz::{Decode, DecodeError, Encode}; -use ssz_derive::{Decode, Encode}; - -mod round_trip { - use super::*; - use std::collections::BTreeMap; - use std::iter::FromIterator; - - fn round_trip(items: Vec) { - for item in items { - let encoded = &item.as_ssz_bytes(); - assert_eq!(item.ssz_bytes_len(), encoded.len()); - assert_eq!(T::from_ssz_bytes(encoded), Ok(item)); - } - } - - #[test] - fn bool() { - let items: Vec = vec![true, false]; - - round_trip(items); - } - - #[test] - fn option_u16() { - let items: Vec> = vec![None, Some(2u16)]; - - round_trip(items); - } - - #[test] - fn u8_array_4() { - let items: Vec<[u8; 4]> = vec![[0, 0, 0, 0], [1, 0, 0, 0], [1, 2, 3, 4], [1, 2, 0, 4]]; - - round_trip(items); - } - - #[test] - fn h256() { - let items: Vec = vec![H256::zero(), H256::from([1; 32]), H256::random()]; - - round_trip(items); - } - - #[test] - fn vec_of_h256() { - let items: Vec> = vec![ - vec![], - vec![H256::zero(), H256::from([1; 32]), H256::random()], - ]; - - round_trip(items); - } - - #[test] - fn option_vec_h256() { - let items: Vec>> = vec![ - None, - Some(vec![]), - Some(vec![H256::zero(), H256::from([1; 32]), H256::random()]), - ]; - - round_trip(items); - } - - #[test] - fn vec_u16() { - let items: Vec> = vec![ - vec![], - vec![255], - vec![0, 1, 2], - vec![100; 64], - vec![255, 0, 255], - ]; - - round_trip(items); - } - - #[test] - fn vec_of_vec_u16() { - let items: Vec>> = vec![ - vec![], - vec![vec![]], - vec![vec![1, 2, 3]], - vec![vec![], vec![]], - vec![vec![], vec![1, 2, 3]], - vec![vec![1, 2, 3], vec![1, 2, 3]], - vec![vec![1, 2, 3], vec![], vec![1, 2, 3]], - vec![vec![], vec![], vec![1, 2, 3]], - vec![vec![], vec![1], vec![1, 2, 3]], - vec![vec![], vec![1], vec![1, 2, 3]], - ]; - - round_trip(items); - } - - #[derive(Debug, PartialEq, Encode, Decode)] - struct FixedLen { - a: u16, - b: u64, - c: u32, - } - - #[test] - #[allow(clippy::zero_prefixed_literal)] - fn fixed_len_struct_encoding() { - let items: Vec = vec![ - FixedLen { a: 0, b: 0, c: 0 }, - FixedLen { a: 1, b: 1, c: 1 }, - FixedLen { a: 1, b: 0, c: 1 }, - ]; - - let expected_encodings = vec![ - // | u16--| u64----------------------------| u32----------| - vec![00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00], - vec![01, 00, 01, 00, 00, 00, 00, 00, 00, 00, 01, 00, 00, 00], - vec![01, 00, 00, 00, 00, 00, 00, 00, 00, 00, 01, 00, 00, 00], - ]; - - for i in 0..items.len() { - assert_eq!( - items[i].as_ssz_bytes(), - expected_encodings[i], - "Failed on {}", - i - ); - } - } - - #[test] - fn fixed_len_excess_bytes() { - let fixed = FixedLen { a: 1, b: 2, c: 3 }; - - let mut bytes = fixed.as_ssz_bytes(); - bytes.append(&mut vec![0]); - - assert_eq!( - FixedLen::from_ssz_bytes(&bytes), - Err(DecodeError::InvalidByteLength { - len: 15, - expected: 14, - }) - ); - } - - #[test] - fn vec_of_fixed_len_struct() { - let items: Vec = vec![ - FixedLen { a: 0, b: 0, c: 0 }, - FixedLen { a: 1, b: 1, c: 1 }, - FixedLen { a: 1, b: 0, c: 1 }, - ]; - - round_trip(items); - } - - #[derive(Debug, PartialEq, Encode, Decode)] - struct VariableLen { - a: u16, - b: Vec, - c: u32, - } - - #[test] - #[allow(clippy::zero_prefixed_literal)] - fn offset_into_fixed_bytes() { - let bytes = vec![ - // 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 - // | offset | u32 | variable - 01, 00, 09, 00, 00, 00, 01, 00, 00, 00, 00, 00, 01, 00, 02, 00, - ]; - - assert_eq!( - VariableLen::from_ssz_bytes(&bytes), - Err(DecodeError::OffsetIntoFixedPortion(9)) - ); - } - - #[test] - fn variable_len_excess_bytes() { - let variable = VariableLen { - a: 1, - b: vec![2], - c: 3, - }; - - let mut bytes = variable.as_ssz_bytes(); - bytes.append(&mut vec![0]); - - // The error message triggered is not so helpful, it's caught by a side-effect. Just - // checking there is _some_ error is fine. - assert!(VariableLen::from_ssz_bytes(&bytes).is_err()); - } - - #[test] - #[allow(clippy::zero_prefixed_literal)] - fn first_offset_skips_byte() { - let bytes = vec![ - // 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 - // | offset | u32 | variable - 01, 00, 11, 00, 00, 00, 01, 00, 00, 00, 00, 00, 01, 00, 02, 00, - ]; - - assert_eq!( - VariableLen::from_ssz_bytes(&bytes), - Err(DecodeError::OffsetSkipsVariableBytes(11)) - ); - } - - #[test] - #[allow(clippy::zero_prefixed_literal)] - fn variable_len_struct_encoding() { - let items: Vec = vec![ - VariableLen { - a: 0, - b: vec![], - c: 0, - }, - VariableLen { - a: 1, - b: vec![0], - c: 1, - }, - VariableLen { - a: 1, - b: vec![0, 1, 2], - c: 1, - }, - ]; - - let expected_encodings = vec![ - // 00..................................09 - // | u16--| vec offset-----| u32------------| vec payload --------| - vec![00, 00, 10, 00, 00, 00, 00, 00, 00, 00], - vec![01, 00, 10, 00, 00, 00, 01, 00, 00, 00, 00, 00], - vec![ - 01, 00, 10, 00, 00, 00, 01, 00, 00, 00, 00, 00, 01, 00, 02, 00, - ], - ]; - - for i in 0..items.len() { - assert_eq!( - items[i].as_ssz_bytes(), - expected_encodings[i], - "Failed on {}", - i - ); - } - } - - #[test] - fn vec_of_variable_len_struct() { - let items: Vec = vec![ - VariableLen { - a: 0, - b: vec![], - c: 0, - }, - VariableLen { - a: 255, - b: vec![0, 1, 2, 3], - c: 99, - }, - VariableLen { - a: 255, - b: vec![0], - c: 99, - }, - VariableLen { - a: 50, - b: vec![0], - c: 0, - }, - ]; - - round_trip(items); - } - - #[derive(Debug, PartialEq, Encode, Decode)] - struct ThreeVariableLen { - a: u16, - b: Vec, - c: Vec, - d: Vec, - } - - #[test] - fn three_variable_len() { - let vec: Vec = vec![ThreeVariableLen { - a: 42, - b: vec![0], - c: vec![1], - d: vec![2], - }]; - - round_trip(vec); - } - - #[test] - #[allow(clippy::zero_prefixed_literal)] - fn offsets_decreasing() { - let bytes = vec![ - // 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 - // | offset | offset | offset | variable - 01, 00, 14, 00, 00, 00, 15, 00, 00, 00, 14, 00, 00, 00, 00, 00, - ]; - - assert_eq!( - ThreeVariableLen::from_ssz_bytes(&bytes), - Err(DecodeError::OffsetsAreDecreasing(14)) - ); - } - - #[test] - fn tuple_u8_u16() { - let vec: Vec<(u8, u16)> = vec![ - (0, 0), - (0, 1), - (1, 0), - (u8::max_value(), u16::max_value()), - (0, u16::max_value()), - (u8::max_value(), 0), - (42, 12301), - ]; - - round_trip(vec); - } - - #[test] - fn tuple_vec_vec() { - let vec: Vec<(u64, Vec, Vec>)> = vec![ - (0, vec![], vec![vec![]]), - (99, vec![101], vec![vec![], vec![]]), - ( - 42, - vec![12, 13, 14], - vec![vec![99, 98, 97, 96], vec![42, 44, 46, 48, 50]], - ), - ]; - - round_trip(vec); - } - - #[test] - fn btree_map_fixed() { - let data = vec![ - BTreeMap::new(), - BTreeMap::from_iter(vec![(0u8, 0u16), (1, 2), (2, 4), (4, 6)]), - ]; - round_trip(data); - } - - #[test] - fn btree_map_variable_value() { - let data = vec![ - BTreeMap::new(), - BTreeMap::from_iter(vec![ - ( - 0u64, - ThreeVariableLen { - a: 1, - b: vec![3, 5, 7], - c: vec![], - d: vec![0, 0], - }, - ), - ( - 1, - ThreeVariableLen { - a: 99, - b: vec![1], - c: vec![2, 3, 4, 5, 6, 7, 8, 9, 10], - d: vec![4, 5, 6, 7, 8], - }, - ), - ( - 2, - ThreeVariableLen { - a: 0, - b: vec![], - c: vec![], - d: vec![], - }, - ), - ]), - ]; - round_trip(data); - } -} diff --git a/consensus/ssz_derive/Cargo.toml b/consensus/ssz_derive/Cargo.toml deleted file mode 100644 index d3b2865a61d..00000000000 --- a/consensus/ssz_derive/Cargo.toml +++ /dev/null @@ -1,20 +0,0 @@ -[package] -name = "eth2_ssz_derive" -version = "0.3.1" -authors = ["Paul Hauner "] -edition = "2021" -description = "Procedural derive macros to accompany the eth2_ssz crate." -license = "Apache-2.0" - -[lib] -name = "ssz_derive" -proc-macro = true - -[dependencies] -syn = "1.0.42" -proc-macro2 = "1.0.23" -quote = "1.0.7" -darling = "0.13.0" - -[dev-dependencies] -eth2_ssz = "0.4.1" diff --git a/consensus/ssz_derive/src/lib.rs b/consensus/ssz_derive/src/lib.rs deleted file mode 100644 index 53752ba44b6..00000000000 --- a/consensus/ssz_derive/src/lib.rs +++ /dev/null @@ -1,981 +0,0 @@ -//! Provides procedural derive macros for the `Encode` and `Decode` traits of the `eth2_ssz` crate. -//! -//! ## Attributes -//! -//! The following struct/enum attributes are available: -//! -//! - `#[ssz(enum_behaviour = "union")]`: encodes and decodes an `enum` with a one-byte variant selector. -//! - `#[ssz(enum_behaviour = "transparent")]`: allows encoding an `enum` by serializing only the -//! value whilst ignoring outermost the `enum`. -//! - `#[ssz(struct_behaviour = "container")]`: encodes and decodes the `struct` as an SSZ -//! "container". -//! - `#[ssz(struct_behaviour = "transparent")]`: encodes and decodes a `struct` with exactly one -//! non-skipped field as if the outermost `struct` does not exist. -//! -//! The following field attributes are available: -//! -//! - `#[ssz(with = "module")]`: uses the methods in `module` to implement `ssz::Encode` and -//! `ssz::Decode`. This is useful when it's not possible to create an `impl` for that type -//! (e.g. the type is defined in another crate). -//! - `#[ssz(skip_serializing)]`: this field will not be included in the serialized SSZ vector. -//! - `#[ssz(skip_deserializing)]`: this field will not be expected in the serialized -//! SSZ vector and it will be initialized from a `Default` implementation. -//! -//! ## Examples -//! -//! ### Structs -//! -//! ```rust -//! use ssz::{Encode, Decode}; -//! use ssz_derive::{Encode, Decode}; -//! -//! /// Represented as an SSZ "list" wrapped in an SSZ "container". -//! #[derive(Debug, PartialEq, Encode, Decode)] -//! #[ssz(struct_behaviour = "container")] // "container" is the default behaviour -//! struct TypicalStruct { -//! foo: Vec -//! } -//! -//! assert_eq!( -//! TypicalStruct { foo: vec![42] }.as_ssz_bytes(), -//! vec![4, 0, 0, 0, 42] -//! ); -//! -//! assert_eq!( -//! TypicalStruct::from_ssz_bytes(&[4, 0, 0, 0, 42]).unwrap(), -//! TypicalStruct { foo: vec![42] }, -//! ); -//! -//! /// Represented as an SSZ "list" *without* an SSZ "container". -//! #[derive(Encode, Decode)] -//! #[ssz(struct_behaviour = "transparent")] -//! struct WrapperStruct { -//! foo: Vec -//! } -//! -//! assert_eq!( -//! WrapperStruct { foo: vec![42] }.as_ssz_bytes(), -//! vec![42] -//! ); -//! -//! /// Represented as an SSZ "list" *without* an SSZ "container". The `bar` byte is ignored. -//! #[derive(Debug, PartialEq, Encode, Decode)] -//! #[ssz(struct_behaviour = "transparent")] -//! struct WrapperStructSkippedField { -//! foo: Vec, -//! #[ssz(skip_serializing, skip_deserializing)] -//! bar: u8, -//! } -//! -//! assert_eq!( -//! WrapperStructSkippedField { foo: vec![42], bar: 99 }.as_ssz_bytes(), -//! vec![42] -//! ); -//! assert_eq!( -//! WrapperStructSkippedField::from_ssz_bytes(&[42]).unwrap(), -//! WrapperStructSkippedField { foo: vec![42], bar: 0 } -//! ); -//! -//! /// Represented as an SSZ "list" *without* an SSZ "container". -//! #[derive(Encode, Decode)] -//! #[ssz(struct_behaviour = "transparent")] -//! struct NewType(Vec); -//! -//! assert_eq!( -//! NewType(vec![42]).as_ssz_bytes(), -//! vec![42] -//! ); -//! -//! /// Represented as an SSZ "list" *without* an SSZ "container". The `bar` byte is ignored. -//! #[derive(Debug, PartialEq, Encode, Decode)] -//! #[ssz(struct_behaviour = "transparent")] -//! struct NewTypeSkippedField(Vec, #[ssz(skip_serializing, skip_deserializing)] u8); -//! -//! assert_eq!( -//! NewTypeSkippedField(vec![42], 99).as_ssz_bytes(), -//! vec![42] -//! ); -//! assert_eq!( -//! NewTypeSkippedField::from_ssz_bytes(&[42]).unwrap(), -//! NewTypeSkippedField(vec![42], 0) -//! ); -//! ``` -//! -//! ### Enums -//! -//! ```rust -//! use ssz::{Encode, Decode}; -//! use ssz_derive::{Encode, Decode}; -//! -//! /// Represented as an SSZ "union". -//! #[derive(Debug, PartialEq, Encode, Decode)] -//! #[ssz(enum_behaviour = "union")] -//! enum UnionEnum { -//! Foo(u8), -//! Bar(Vec), -//! } -//! -//! assert_eq!( -//! UnionEnum::Foo(42).as_ssz_bytes(), -//! vec![0, 42] -//! ); -//! assert_eq!( -//! UnionEnum::from_ssz_bytes(&[1, 42, 42]).unwrap(), -//! UnionEnum::Bar(vec![42, 42]), -//! ); -//! -//! /// Represented as only the value in the enum variant. -//! #[derive(Debug, PartialEq, Encode)] -//! #[ssz(enum_behaviour = "transparent")] -//! enum TransparentEnum { -//! Foo(u8), -//! Bar(Vec), -//! } -//! -//! assert_eq!( -//! TransparentEnum::Foo(42).as_ssz_bytes(), -//! vec![42] -//! ); -//! assert_eq!( -//! TransparentEnum::Bar(vec![42, 42]).as_ssz_bytes(), -//! vec![42, 42] -//! ); -//! ``` - -use darling::{FromDeriveInput, FromMeta}; -use proc_macro::TokenStream; -use quote::quote; -use std::convert::TryInto; -use syn::{parse_macro_input, DataEnum, DataStruct, DeriveInput, Ident}; - -/// The highest possible union selector value (higher values are reserved for backwards compatible -/// extensions). -const MAX_UNION_SELECTOR: u8 = 127; - -const ENUM_TRANSPARENT: &str = "transparent"; -const ENUM_UNION: &str = "union"; -const NO_ENUM_BEHAVIOUR_ERROR: &str = "enums require an \"enum_behaviour\" attribute with \ - a \"transparent\" or \"union\" value, e.g., #[ssz(enum_behaviour = \"transparent\")]"; - -#[derive(Debug, FromDeriveInput)] -#[darling(attributes(ssz))] -struct StructOpts { - #[darling(default)] - enum_behaviour: Option, - #[darling(default)] - struct_behaviour: Option, -} - -/// Field-level configuration. -#[derive(Debug, Default, FromMeta)] -struct FieldOpts { - #[darling(default)] - with: Option, - #[darling(default)] - skip_serializing: bool, - #[darling(default)] - skip_deserializing: bool, -} - -enum Procedure<'a> { - Struct { - data: &'a syn::DataStruct, - behaviour: StructBehaviour, - }, - Enum { - data: &'a syn::DataEnum, - behaviour: EnumBehaviour, - }, -} - -enum StructBehaviour { - Container, - Transparent, -} - -enum EnumBehaviour { - Union, - Transparent, -} - -impl<'a> Procedure<'a> { - fn read(item: &'a DeriveInput) -> Self { - let opts = StructOpts::from_derive_input(item).unwrap(); - - match &item.data { - syn::Data::Struct(data) => { - if opts.enum_behaviour.is_some() { - panic!("cannot use \"enum_behaviour\" for a struct"); - } - - match opts.struct_behaviour.as_deref() { - Some("container") | None => Procedure::Struct { - data, - behaviour: StructBehaviour::Container, - }, - Some("transparent") => Procedure::Struct { - data, - behaviour: StructBehaviour::Transparent, - }, - Some(other) => panic!( - "{} is not a valid struct behaviour, use \"container\" or \"transparent\"", - other - ), - } - } - syn::Data::Enum(data) => { - if opts.struct_behaviour.is_some() { - panic!("cannot use \"struct_behaviour\" for an enum"); - } - - match opts.enum_behaviour.as_deref() { - Some("union") => Procedure::Enum { - data, - behaviour: EnumBehaviour::Union, - }, - Some("transparent") => Procedure::Enum { - data, - behaviour: EnumBehaviour::Transparent, - }, - Some(other) => panic!( - "{} is not a valid enum behaviour, use \"container\" or \"transparent\"", - other - ), - None => panic!("{}", NO_ENUM_BEHAVIOUR_ERROR), - } - } - _ => panic!("ssz_derive only supports structs and enums"), - } - } -} - -fn parse_ssz_fields( - struct_data: &syn::DataStruct, -) -> Vec<(&syn::Type, Option<&syn::Ident>, FieldOpts)> { - struct_data - .fields - .iter() - .map(|field| { - let ty = &field.ty; - let ident = field.ident.as_ref(); - - let field_opts_candidates = field - .attrs - .iter() - .filter(|attr| attr.path.get_ident().map_or(false, |ident| *ident == "ssz")) - .collect::>(); - - if field_opts_candidates.len() > 1 { - panic!("more than one field-level \"ssz\" attribute provided") - } - - let field_opts = field_opts_candidates - .first() - .map(|attr| { - let meta = attr.parse_meta().unwrap(); - FieldOpts::from_meta(&meta).unwrap() - }) - .unwrap_or_default(); - - (ty, ident, field_opts) - }) - .collect() -} - -/// Implements `ssz::Encode` for some `struct` or `enum`. -#[proc_macro_derive(Encode, attributes(ssz))] -pub fn ssz_encode_derive(input: TokenStream) -> TokenStream { - let item = parse_macro_input!(input as DeriveInput); - let procedure = Procedure::read(&item); - - match procedure { - Procedure::Struct { data, behaviour } => match behaviour { - StructBehaviour::Transparent => ssz_encode_derive_struct_transparent(&item, data), - StructBehaviour::Container => ssz_encode_derive_struct(&item, data), - }, - Procedure::Enum { data, behaviour } => match behaviour { - EnumBehaviour::Transparent => ssz_encode_derive_enum_transparent(&item, data), - EnumBehaviour::Union => ssz_encode_derive_enum_union(&item, data), - }, - } -} - -/// Derive `ssz::Encode` for a struct. -/// -/// Fields are encoded in the order they are defined. -/// -/// ## Field attributes -/// -/// - `#[ssz(skip_serializing)]`: the field will not be serialized. -fn ssz_encode_derive_struct(derive_input: &DeriveInput, struct_data: &DataStruct) -> TokenStream { - let name = &derive_input.ident; - let (impl_generics, ty_generics, where_clause) = &derive_input.generics.split_for_impl(); - - let field_is_ssz_fixed_len = &mut vec![]; - let field_fixed_len = &mut vec![]; - let field_ssz_bytes_len = &mut vec![]; - let field_encoder_append = &mut vec![]; - - for (ty, ident, field_opts) in parse_ssz_fields(struct_data) { - if field_opts.skip_serializing { - continue; - } - - let ident = match ident { - Some(ref ident) => ident, - _ => panic!( - "#[ssz(struct_behaviour = \"container\")] only supports named struct fields." - ), - }; - - if let Some(module) = field_opts.with { - let module = quote! { #module::encode }; - field_is_ssz_fixed_len.push(quote! { #module::is_ssz_fixed_len() }); - field_fixed_len.push(quote! { #module::ssz_fixed_len() }); - field_ssz_bytes_len.push(quote! { #module::ssz_bytes_len(&self.#ident) }); - field_encoder_append.push(quote! { - encoder.append_parameterized( - #module::is_ssz_fixed_len(), - |buf| #module::ssz_append(&self.#ident, buf) - ) - }); - } else { - field_is_ssz_fixed_len.push(quote! { <#ty as ssz::Encode>::is_ssz_fixed_len() }); - field_fixed_len.push(quote! { <#ty as ssz::Encode>::ssz_fixed_len() }); - field_ssz_bytes_len.push(quote! { self.#ident.ssz_bytes_len() }); - field_encoder_append.push(quote! { encoder.append(&self.#ident) }); - } - } - - let output = quote! { - impl #impl_generics ssz::Encode for #name #ty_generics #where_clause { - fn is_ssz_fixed_len() -> bool { - #( - #field_is_ssz_fixed_len && - )* - true - } - - fn ssz_fixed_len() -> usize { - if ::is_ssz_fixed_len() { - let mut len: usize = 0; - #( - len = len - .checked_add(#field_fixed_len) - .expect("encode ssz_fixed_len length overflow"); - )* - len - } else { - ssz::BYTES_PER_LENGTH_OFFSET - } - } - - fn ssz_bytes_len(&self) -> usize { - if ::is_ssz_fixed_len() { - ::ssz_fixed_len() - } else { - let mut len: usize = 0; - #( - if #field_is_ssz_fixed_len { - len = len - .checked_add(#field_fixed_len) - .expect("encode ssz_bytes_len length overflow"); - } else { - len = len - .checked_add(ssz::BYTES_PER_LENGTH_OFFSET) - .expect("encode ssz_bytes_len length overflow for offset"); - len = len - .checked_add(#field_ssz_bytes_len) - .expect("encode ssz_bytes_len length overflow for bytes"); - } - )* - - len - } - } - - fn ssz_append(&self, buf: &mut Vec) { - let mut offset: usize = 0; - #( - offset = offset - .checked_add(#field_fixed_len) - .expect("encode ssz_append offset overflow"); - )* - - let mut encoder = ssz::SszEncoder::container(buf, offset); - - #( - #field_encoder_append; - )* - - encoder.finalize(); - } - } - }; - output.into() -} - -/// Derive `ssz::Encode` "transparently" for a struct which has exactly one non-skipped field. -/// -/// The single field is encoded directly, making the outermost `struct` transparent. -/// -/// ## Field attributes -/// -/// - `#[ssz(skip_serializing)]`: the field will not be serialized. -fn ssz_encode_derive_struct_transparent( - derive_input: &DeriveInput, - struct_data: &DataStruct, -) -> TokenStream { - let name = &derive_input.ident; - let (impl_generics, ty_generics, where_clause) = &derive_input.generics.split_for_impl(); - let ssz_fields = parse_ssz_fields(struct_data); - let num_fields = ssz_fields - .iter() - .filter(|(_, _, field_opts)| !field_opts.skip_deserializing) - .count(); - - if num_fields != 1 { - panic!( - "A \"transparent\" struct must have exactly one non-skipped field ({} fields found)", - num_fields - ); - } - - let (ty, ident, _field_opts) = ssz_fields - .iter() - .find(|(_, _, field_opts)| !field_opts.skip_deserializing) - .expect("\"transparent\" struct must have at least one non-skipped field"); - - let output = if let Some(field_name) = ident { - quote! { - impl #impl_generics ssz::Encode for #name #ty_generics #where_clause { - fn is_ssz_fixed_len() -> bool { - <#ty as ssz::Encode>::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - <#ty as ssz::Encode>::ssz_fixed_len() - } - - fn ssz_bytes_len(&self) -> usize { - self.#field_name.ssz_bytes_len() - } - - fn ssz_append(&self, buf: &mut Vec) { - self.#field_name.ssz_append(buf) - } - } - } - } else { - quote! { - impl #impl_generics ssz::Encode for #name #ty_generics #where_clause { - fn is_ssz_fixed_len() -> bool { - <#ty as ssz::Encode>::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - <#ty as ssz::Encode>::ssz_fixed_len() - } - - fn ssz_bytes_len(&self) -> usize { - self.0.ssz_bytes_len() - } - - fn ssz_append(&self, buf: &mut Vec) { - self.0.ssz_append(buf) - } - } - } - }; - - output.into() -} - -/// Derive `ssz::Encode` for an enum in the "transparent" method. -/// -/// The "transparent" method is distinct from the "union" method specified in the SSZ specification. -/// When using "transparent", the enum will be ignored and the contained field will be serialized as -/// if the enum does not exist. Since an union variant "selector" is not serialized, it is not -/// possible to reliably decode an enum that is serialized transparently. -/// -/// ## Limitations -/// -/// Only supports: -/// - Enums with a single field per variant, where -/// - All fields are variably sized from an SSZ-perspective (not fixed size). -/// -/// ## Panics -/// -/// Will panic at compile-time if the single field requirement isn't met, but will panic *at run -/// time* if the variable-size requirement isn't met. -fn ssz_encode_derive_enum_transparent( - derive_input: &DeriveInput, - enum_data: &DataEnum, -) -> TokenStream { - let name = &derive_input.ident; - let (impl_generics, ty_generics, where_clause) = &derive_input.generics.split_for_impl(); - - let (patterns, assert_exprs): (Vec<_>, Vec<_>) = enum_data - .variants - .iter() - .map(|variant| { - let variant_name = &variant.ident; - - if variant.fields.len() != 1 { - panic!("ssz::Encode can only be derived for enums with 1 field per variant"); - } - - let pattern = quote! { - #name::#variant_name(ref inner) - }; - - let ty = &(&variant.fields).into_iter().next().unwrap().ty; - let type_assert = quote! { - !<#ty as ssz::Encode>::is_ssz_fixed_len() - }; - (pattern, type_assert) - }) - .unzip(); - - let output = quote! { - impl #impl_generics ssz::Encode for #name #ty_generics #where_clause { - fn is_ssz_fixed_len() -> bool { - assert!( - #( - #assert_exprs && - )* true, - "not all enum variants are variably-sized" - ); - false - } - - fn ssz_bytes_len(&self) -> usize { - match self { - #( - #patterns => inner.ssz_bytes_len(), - )* - } - } - - fn ssz_append(&self, buf: &mut Vec) { - match self { - #( - #patterns => inner.ssz_append(buf), - )* - } - } - } - }; - output.into() -} - -/// Derive `ssz::Encode` for an `enum` following the "union" SSZ spec. -/// -/// The union selector will be determined based upon the order in which the enum variants are -/// defined. E.g., the top-most variant in the enum will have a selector of `0`, the variant -/// beneath it will have a selector of `1` and so on. -/// -/// # Limitations -/// -/// Only supports enums where each variant has a single field. -fn ssz_encode_derive_enum_union(derive_input: &DeriveInput, enum_data: &DataEnum) -> TokenStream { - let name = &derive_input.ident; - let (impl_generics, ty_generics, where_clause) = &derive_input.generics.split_for_impl(); - - let patterns: Vec<_> = enum_data - .variants - .iter() - .map(|variant| { - let variant_name = &variant.ident; - - if variant.fields.len() != 1 { - panic!("ssz::Encode can only be derived for enums with 1 field per variant"); - } - - let pattern = quote! { - #name::#variant_name(ref inner) - }; - pattern - }) - .collect(); - - let union_selectors = compute_union_selectors(patterns.len()); - - let output = quote! { - impl #impl_generics ssz::Encode for #name #ty_generics #where_clause { - fn is_ssz_fixed_len() -> bool { - false - } - - fn ssz_bytes_len(&self) -> usize { - match self { - #( - #patterns => inner - .ssz_bytes_len() - .checked_add(1) - .expect("encoded length must be less than usize::max_value"), - )* - } - } - - fn ssz_append(&self, buf: &mut Vec) { - match self { - #( - #patterns => { - let union_selector: u8 = #union_selectors; - debug_assert!(union_selector <= ssz::MAX_UNION_SELECTOR); - buf.push(union_selector); - inner.ssz_append(buf) - }, - )* - } - } - } - }; - output.into() -} - -/// Derive `ssz::Decode` for a struct or enum. -#[proc_macro_derive(Decode, attributes(ssz))] -pub fn ssz_decode_derive(input: TokenStream) -> TokenStream { - let item = parse_macro_input!(input as DeriveInput); - let procedure = Procedure::read(&item); - - match procedure { - Procedure::Struct { data, behaviour } => match behaviour { - StructBehaviour::Transparent => ssz_decode_derive_struct_transparent(&item, data), - StructBehaviour::Container => ssz_decode_derive_struct(&item, data), - }, - Procedure::Enum { data, behaviour } => match behaviour { - EnumBehaviour::Union => ssz_decode_derive_enum_union(&item, data), - EnumBehaviour::Transparent => panic!( - "Decode cannot be derived for enum_behaviour \"{}\", only \"{}\" is valid.", - ENUM_TRANSPARENT, ENUM_UNION - ), - }, - } -} - -/// Implements `ssz::Decode` for some `struct`. -/// -/// Fields are decoded in the order they are defined. -/// -/// ## Field attributes -/// -/// - `#[ssz(skip_deserializing)]`: during de-serialization the field will be instantiated from a -/// `Default` implementation. The decoder will assume that the field was not serialized at all -/// (e.g., if it has been serialized, an error will be raised instead of `Default` overriding it). -fn ssz_decode_derive_struct(item: &DeriveInput, struct_data: &DataStruct) -> TokenStream { - let name = &item.ident; - let (impl_generics, ty_generics, where_clause) = &item.generics.split_for_impl(); - - let mut register_types = vec![]; - let mut field_names = vec![]; - let mut fixed_decodes = vec![]; - let mut decodes = vec![]; - let mut is_fixed_lens = vec![]; - let mut fixed_lens = vec![]; - - for (ty, ident, field_opts) in parse_ssz_fields(struct_data) { - let ident = match ident { - Some(ref ident) => ident, - _ => panic!( - "#[ssz(struct_behaviour = \"container\")] only supports named struct fields." - ), - }; - - field_names.push(quote! { - #ident - }); - - // Field should not be deserialized; use a `Default` impl to instantiate. - if field_opts.skip_deserializing { - decodes.push(quote! { - let #ident = <_>::default(); - }); - - fixed_decodes.push(quote! { - let #ident = <_>::default(); - }); - - continue; - } - - let is_ssz_fixed_len; - let ssz_fixed_len; - let from_ssz_bytes; - if let Some(module) = field_opts.with { - let module = quote! { #module::decode }; - - is_ssz_fixed_len = quote! { #module::is_ssz_fixed_len() }; - ssz_fixed_len = quote! { #module::ssz_fixed_len() }; - from_ssz_bytes = quote! { #module::from_ssz_bytes(slice) }; - - register_types.push(quote! { - builder.register_type_parameterized(#is_ssz_fixed_len, #ssz_fixed_len)?; - }); - decodes.push(quote! { - let #ident = decoder.decode_next_with(|slice| #module::from_ssz_bytes(slice))?; - }); - } else { - is_ssz_fixed_len = quote! { <#ty as ssz::Decode>::is_ssz_fixed_len() }; - ssz_fixed_len = quote! { <#ty as ssz::Decode>::ssz_fixed_len() }; - from_ssz_bytes = quote! { <#ty as ssz::Decode>::from_ssz_bytes(slice) }; - - register_types.push(quote! { - builder.register_type::<#ty>()?; - }); - decodes.push(quote! { - let #ident = decoder.decode_next()?; - }); - } - - fixed_decodes.push(quote! { - let #ident = { - start = end; - end = end - .checked_add(#ssz_fixed_len) - .ok_or_else(|| ssz::DecodeError::OutOfBoundsByte { - i: usize::max_value() - })?; - let slice = bytes.get(start..end) - .ok_or_else(|| ssz::DecodeError::InvalidByteLength { - len: bytes.len(), - expected: end - })?; - #from_ssz_bytes? - }; - }); - is_fixed_lens.push(is_ssz_fixed_len); - fixed_lens.push(ssz_fixed_len); - } - - let output = quote! { - impl #impl_generics ssz::Decode for #name #ty_generics #where_clause { - fn is_ssz_fixed_len() -> bool { - #( - #is_fixed_lens && - )* - true - } - - fn ssz_fixed_len() -> usize { - if ::is_ssz_fixed_len() { - let mut len: usize = 0; - #( - len = len - .checked_add(#fixed_lens) - .expect("decode ssz_fixed_len overflow"); - )* - len - } else { - ssz::BYTES_PER_LENGTH_OFFSET - } - } - - fn from_ssz_bytes(bytes: &[u8]) -> std::result::Result { - if ::is_ssz_fixed_len() { - if bytes.len() != ::ssz_fixed_len() { - return Err(ssz::DecodeError::InvalidByteLength { - len: bytes.len(), - expected: ::ssz_fixed_len(), - }); - } - - let mut start: usize = 0; - let mut end = start; - - #( - #fixed_decodes - )* - - Ok(Self { - #( - #field_names, - )* - }) - } else { - let mut builder = ssz::SszDecoderBuilder::new(bytes); - - #( - #register_types - )* - - let mut decoder = builder.build()?; - - #( - #decodes - )* - - - Ok(Self { - #( - #field_names, - )* - }) - } - } - } - }; - output.into() -} - -/// Implements `ssz::Decode` "transparently" for a `struct` with exactly one non-skipped field. -/// -/// The bytes will be decoded as if they are the inner field, without the outermost struct. The -/// outermost struct will then be applied artificially. -/// -/// ## Field attributes -/// -/// - `#[ssz(skip_deserializing)]`: during de-serialization the field will be instantiated from a -/// `Default` implementation. The decoder will assume that the field was not serialized at all -/// (e.g., if it has been serialized, an error will be raised instead of `Default` overriding it). -fn ssz_decode_derive_struct_transparent( - item: &DeriveInput, - struct_data: &DataStruct, -) -> TokenStream { - let name = &item.ident; - let (impl_generics, ty_generics, where_clause) = &item.generics.split_for_impl(); - let ssz_fields = parse_ssz_fields(struct_data); - let num_fields = ssz_fields - .iter() - .filter(|(_, _, field_opts)| !field_opts.skip_deserializing) - .count(); - - if num_fields != 1 { - panic!( - "A \"transparent\" struct must have exactly one non-skipped field ({} fields found)", - num_fields - ); - } - - let mut fields = vec![]; - let mut wrapped_type = None; - - for (i, (ty, ident, field_opts)) in ssz_fields.into_iter().enumerate() { - if let Some(name) = ident { - if field_opts.skip_deserializing { - fields.push(quote! { - #name: <_>::default(), - }); - } else { - fields.push(quote! { - #name: <_>::from_ssz_bytes(bytes)?, - }); - wrapped_type = Some(ty); - } - } else { - let index = syn::Index::from(i); - if field_opts.skip_deserializing { - fields.push(quote! { - #index:<_>::default(), - }); - } else { - fields.push(quote! { - #index:<_>::from_ssz_bytes(bytes)?, - }); - wrapped_type = Some(ty); - } - } - } - - let ty = wrapped_type.unwrap(); - - let output = quote! { - impl #impl_generics ssz::Decode for #name #ty_generics #where_clause { - fn is_ssz_fixed_len() -> bool { - <#ty as ssz::Decode>::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - <#ty as ssz::Decode>::ssz_fixed_len() - } - - fn from_ssz_bytes(bytes: &[u8]) -> std::result::Result { - Ok(Self { - #( - #fields - )* - - }) - } - } - }; - output.into() -} - -/// Derive `ssz::Decode` for an `enum` following the "union" SSZ spec. -fn ssz_decode_derive_enum_union(derive_input: &DeriveInput, enum_data: &DataEnum) -> TokenStream { - let name = &derive_input.ident; - let (impl_generics, ty_generics, where_clause) = &derive_input.generics.split_for_impl(); - - let (constructors, var_types): (Vec<_>, Vec<_>) = enum_data - .variants - .iter() - .map(|variant| { - let variant_name = &variant.ident; - - if variant.fields.len() != 1 { - panic!("ssz::Encode can only be derived for enums with 1 field per variant"); - } - - let constructor = quote! { - #name::#variant_name - }; - - let ty = &(&variant.fields).into_iter().next().unwrap().ty; - (constructor, ty) - }) - .unzip(); - - let union_selectors = compute_union_selectors(constructors.len()); - - let output = quote! { - impl #impl_generics ssz::Decode for #name #ty_generics #where_clause { - fn is_ssz_fixed_len() -> bool { - false - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - // Sanity check to ensure the definition here does not drift from the one defined in - // `ssz`. - debug_assert_eq!(#MAX_UNION_SELECTOR, ssz::MAX_UNION_SELECTOR); - - let (selector, body) = ssz::split_union_bytes(bytes)?; - - match selector.into() { - #( - #union_selectors => { - <#var_types as ssz::Decode>::from_ssz_bytes(body).map(#constructors) - }, - )* - other => Err(ssz::DecodeError::UnionSelectorInvalid(other)) - } - } - } - }; - output.into() -} - -fn compute_union_selectors(num_variants: usize) -> Vec { - let union_selectors = (0..num_variants) - .map(|i| { - i.try_into() - .expect("union selector exceeds u8::max_value, union has too many variants") - }) - .collect::>(); - - let highest_selector = union_selectors - .last() - .copied() - .expect("0-variant union is not permitted"); - - assert!( - highest_selector <= MAX_UNION_SELECTOR, - "union selector {} exceeds limit of {}, enum has too many variants", - highest_selector, - MAX_UNION_SELECTOR - ); - - union_selectors -} diff --git a/consensus/ssz_derive/tests/tests.rs b/consensus/ssz_derive/tests/tests.rs deleted file mode 100644 index 2eeb3a48db7..00000000000 --- a/consensus/ssz_derive/tests/tests.rs +++ /dev/null @@ -1,215 +0,0 @@ -use ssz::{Decode, Encode}; -use ssz_derive::{Decode, Encode}; -use std::fmt::Debug; -use std::marker::PhantomData; - -fn assert_encode(item: &T, bytes: &[u8]) { - assert_eq!(item.as_ssz_bytes(), bytes); -} - -fn assert_encode_decode(item: &T, bytes: &[u8]) { - assert_encode(item, bytes); - assert_eq!(T::from_ssz_bytes(bytes).unwrap(), *item); -} - -#[derive(PartialEq, Debug, Encode, Decode)] -#[ssz(enum_behaviour = "union")] -enum TwoFixedUnion { - U8(u8), - U16(u16), -} - -#[derive(PartialEq, Debug, Encode, Decode)] -struct TwoFixedUnionStruct { - a: TwoFixedUnion, -} - -#[test] -fn two_fixed_union() { - let eight = TwoFixedUnion::U8(1); - let sixteen = TwoFixedUnion::U16(1); - - assert_encode_decode(&eight, &[0, 1]); - assert_encode_decode(&sixteen, &[1, 1, 0]); - - assert_encode_decode(&TwoFixedUnionStruct { a: eight }, &[4, 0, 0, 0, 0, 1]); - assert_encode_decode(&TwoFixedUnionStruct { a: sixteen }, &[4, 0, 0, 0, 1, 1, 0]); -} - -#[derive(PartialEq, Debug, Encode, Decode)] -struct VariableA { - a: u8, - b: Vec, -} - -#[derive(PartialEq, Debug, Encode, Decode)] -struct VariableB { - a: Vec, - b: u8, -} - -#[derive(PartialEq, Debug, Encode)] -#[ssz(enum_behaviour = "transparent")] -enum TwoVariableTrans { - A(VariableA), - B(VariableB), -} - -#[derive(PartialEq, Debug, Encode)] -struct TwoVariableTransStruct { - a: TwoVariableTrans, -} - -#[derive(PartialEq, Debug, Encode, Decode)] -#[ssz(enum_behaviour = "union")] -enum TwoVariableUnion { - A(VariableA), - B(VariableB), -} - -#[derive(PartialEq, Debug, Encode, Decode)] -struct TwoVariableUnionStruct { - a: TwoVariableUnion, -} - -#[test] -fn two_variable_trans() { - let trans_a = TwoVariableTrans::A(VariableA { - a: 1, - b: vec![2, 3], - }); - let trans_b = TwoVariableTrans::B(VariableB { - a: vec![1, 2], - b: 3, - }); - - assert_encode(&trans_a, &[1, 5, 0, 0, 0, 2, 3]); - assert_encode(&trans_b, &[5, 0, 0, 0, 3, 1, 2]); - - assert_encode( - &TwoVariableTransStruct { a: trans_a }, - &[4, 0, 0, 0, 1, 5, 0, 0, 0, 2, 3], - ); - assert_encode( - &TwoVariableTransStruct { a: trans_b }, - &[4, 0, 0, 0, 5, 0, 0, 0, 3, 1, 2], - ); -} - -#[test] -fn two_variable_union() { - let union_a = TwoVariableUnion::A(VariableA { - a: 1, - b: vec![2, 3], - }); - let union_b = TwoVariableUnion::B(VariableB { - a: vec![1, 2], - b: 3, - }); - - assert_encode_decode(&union_a, &[0, 1, 5, 0, 0, 0, 2, 3]); - assert_encode_decode(&union_b, &[1, 5, 0, 0, 0, 3, 1, 2]); - - assert_encode_decode( - &TwoVariableUnionStruct { a: union_a }, - &[4, 0, 0, 0, 0, 1, 5, 0, 0, 0, 2, 3], - ); - assert_encode_decode( - &TwoVariableUnionStruct { a: union_b }, - &[4, 0, 0, 0, 1, 5, 0, 0, 0, 3, 1, 2], - ); -} - -#[derive(PartialEq, Debug, Encode, Decode)] -#[ssz(enum_behaviour = "union")] -enum TwoVecUnion { - A(Vec), - B(Vec), -} - -#[test] -fn two_vec_union() { - assert_encode_decode(&TwoVecUnion::A(vec![]), &[0]); - assert_encode_decode(&TwoVecUnion::B(vec![]), &[1]); - - assert_encode_decode(&TwoVecUnion::A(vec![0]), &[0, 0]); - assert_encode_decode(&TwoVecUnion::B(vec![0]), &[1, 0]); - - assert_encode_decode(&TwoVecUnion::A(vec![0, 1]), &[0, 0, 1]); - assert_encode_decode(&TwoVecUnion::B(vec![0, 1]), &[1, 0, 1]); -} - -#[derive(PartialEq, Debug, Encode, Decode)] -#[ssz(struct_behaviour = "transparent")] -struct TransparentStruct { - inner: Vec, -} - -impl TransparentStruct { - fn new(inner: u8) -> Self { - Self { inner: vec![inner] } - } -} - -#[test] -fn transparent_struct() { - assert_encode_decode(&TransparentStruct::new(42), &vec![42_u8].as_ssz_bytes()); -} - -#[derive(PartialEq, Debug, Encode, Decode)] -#[ssz(struct_behaviour = "transparent")] -struct TransparentStructSkippedField { - inner: Vec, - #[ssz(skip_serializing, skip_deserializing)] - skipped: PhantomData, -} - -impl TransparentStructSkippedField { - fn new(inner: u8) -> Self { - Self { - inner: vec![inner], - skipped: PhantomData, - } - } -} - -#[test] -fn transparent_struct_skipped_field() { - assert_encode_decode( - &TransparentStructSkippedField::new(42), - &vec![42_u8].as_ssz_bytes(), - ); -} - -#[derive(PartialEq, Debug, Encode, Decode)] -#[ssz(struct_behaviour = "transparent")] -struct TransparentStructNewType(Vec); - -#[test] -fn transparent_struct_newtype() { - assert_encode_decode( - &TransparentStructNewType(vec![42_u8]), - &vec![42_u8].as_ssz_bytes(), - ); -} - -#[derive(PartialEq, Debug, Encode, Decode)] -#[ssz(struct_behaviour = "transparent")] -struct TransparentStructNewTypeSkippedField( - Vec, - #[ssz(skip_serializing, skip_deserializing)] PhantomData, -); - -impl TransparentStructNewTypeSkippedField { - fn new(inner: Vec) -> Self { - Self(inner, PhantomData) - } -} - -#[test] -fn transparent_struct_newtype_skipped_field() { - assert_encode_decode( - &TransparentStructNewTypeSkippedField::new(vec![42_u8]), - &vec![42_u8].as_ssz_bytes(), - ); -} diff --git a/consensus/ssz_types/Cargo.toml b/consensus/ssz_types/Cargo.toml deleted file mode 100644 index 2baa8994fb8..00000000000 --- a/consensus/ssz_types/Cargo.toml +++ /dev/null @@ -1,25 +0,0 @@ -[package] -name = "eth2_ssz_types" -version = "0.2.2" -authors = ["Paul Hauner "] -edition = "2021" -description = "Provides types with unique properties required for SSZ serialization and Merklization." -license = "Apache-2.0" - -[lib] -name = "ssz_types" - -[dependencies] -tree_hash = "0.4.1" -serde = "1.0.116" -serde_derive = "1.0.116" -eth2_serde_utils = "0.1.1" -eth2_ssz = "0.4.1" -typenum = "1.12.0" -arbitrary = { version = "1.0", features = ["derive"], optional = true } -derivative = "2.1.1" -smallvec = "1.8.0" - -[dev-dependencies] -serde_json = "1.0.58" -tree_hash_derive = "0.4.0" diff --git a/consensus/ssz_types/src/bitfield.rs b/consensus/ssz_types/src/bitfield.rs deleted file mode 100644 index b7bde225786..00000000000 --- a/consensus/ssz_types/src/bitfield.rs +++ /dev/null @@ -1,1332 +0,0 @@ -use crate::tree_hash::bitfield_bytes_tree_hash_root; -use crate::Error; -use core::marker::PhantomData; -use derivative::Derivative; -use eth2_serde_utils::hex::{encode as hex_encode, PrefixedHexVisitor}; -use serde::de::{Deserialize, Deserializer}; -use serde::ser::{Serialize, Serializer}; -use smallvec::{smallvec, SmallVec, ToSmallVec}; -use ssz::{Decode, Encode}; -use tree_hash::Hash256; -use typenum::Unsigned; - -/// Maximum number of bytes to store on the stack in a bitfield's `SmallVec`. -/// -/// The default of 32 bytes is enough to take us through to ~500K validators, as the byte length of -/// attestation bitfields is roughly `N // 32 slots // 64 committes // 8 bits`. -pub const SMALLVEC_LEN: usize = 32; - -/// A marker trait applied to `Variable` and `Fixed` that defines the behaviour of a `Bitfield`. -pub trait BitfieldBehaviour: Clone {} - -/// A marker struct used to declare SSZ `Variable` behaviour on a `Bitfield`. -/// -/// See the [`Bitfield`](struct.Bitfield.html) docs for usage. -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Variable { - _phantom: PhantomData, -} - -/// A marker struct used to declare SSZ `Fixed` behaviour on a `Bitfield`. -/// -/// See the [`Bitfield`](struct.Bitfield.html) docs for usage. -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Fixed { - _phantom: PhantomData, -} - -impl BitfieldBehaviour for Variable {} -impl BitfieldBehaviour for Fixed {} - -/// A heap-allocated, ordered, variable-length collection of `bool` values, limited to `N` bits. -pub type BitList = Bitfield>; - -/// A heap-allocated, ordered, fixed-length collection of `bool` values, with `N` bits. -/// -/// See [Bitfield](struct.Bitfield.html) documentation. -pub type BitVector = Bitfield>; - -/// A heap-allocated, ordered, fixed-length, collection of `bool` values. Use of -/// [`BitList`](type.BitList.html) or [`BitVector`](type.BitVector.html) type aliases is preferred -/// over direct use of this struct. -/// -/// The `T` type parameter is used to define length behaviour with the `Variable` or `Fixed` marker -/// structs. -/// -/// The length of the Bitfield is set at instantiation (i.e., runtime, not compile time). However, -/// use with a `Variable` sets a type-level (i.e., compile-time) maximum length and `Fixed` -/// provides a type-level fixed length. -/// -/// ## Example -/// -/// The example uses the following crate-level type aliases: -/// -/// - `BitList` is an alias for `Bitfield>` -/// - `BitVector` is an alias for `Bitfield>` -/// -/// ``` -/// use ssz_types::{BitVector, BitList, typenum}; -/// -/// // `BitList` has a type-level maximum length. The length of the list is specified at runtime -/// // and it must be less than or equal to `N`. After instantiation, `BitList` cannot grow or -/// // shrink. -/// type BitList8 = BitList; -/// -/// // Creating a `BitList` with a larger-than-`N` capacity returns `None`. -/// assert!(BitList8::with_capacity(9).is_err()); -/// -/// let mut bitlist = BitList8::with_capacity(4).unwrap(); // `BitList` permits a capacity of less than the maximum. -/// assert!(bitlist.set(3, true).is_ok()); // Setting inside the instantiation capacity is permitted. -/// assert!(bitlist.set(5, true).is_err()); // Setting outside that capacity is not. -/// -/// // `BitVector` has a type-level fixed length. Unlike `BitList`, it cannot be instantiated with a custom length -/// // or grow/shrink. -/// type BitVector8 = BitVector; -/// -/// let mut bitvector = BitVector8::new(); -/// assert_eq!(bitvector.len(), 8); // `BitVector` length is fixed at the type-level. -/// assert!(bitvector.set(7, true).is_ok()); // Setting inside the capacity is permitted. -/// assert!(bitvector.set(9, true).is_err()); // Setting outside the capacity is not. -/// -/// ``` -/// -/// ## Note -/// -/// The internal representation of the bitfield is the same as that required by SSZ. The lowest -/// byte (by `Vec` index) stores the lowest bit-indices and the right-most bit stores the lowest -/// bit-index. E.g., `smallvec![0b0000_0001, 0b0000_0010]` has bits `0, 9` set. -#[derive(Clone, Debug, Derivative)] -#[derivative(PartialEq, Eq, Hash(bound = ""))] -pub struct Bitfield { - bytes: SmallVec<[u8; SMALLVEC_LEN]>, - len: usize, - _phantom: PhantomData, -} - -impl Bitfield> { - /// Instantiate with capacity for `num_bits` boolean values. The length cannot be grown or - /// shrunk after instantiation. - /// - /// All bits are initialized to `false`. - /// - /// Returns `None` if `num_bits > N`. - pub fn with_capacity(num_bits: usize) -> Result { - if num_bits <= N::to_usize() { - Ok(Self { - bytes: smallvec![0; bytes_for_bit_len(num_bits)], - len: num_bits, - _phantom: PhantomData, - }) - } else { - Err(Error::OutOfBounds { - i: Self::max_len(), - len: Self::max_len(), - }) - } - } - - /// Equal to `N` regardless of the value supplied to `with_capacity`. - pub fn max_len() -> usize { - N::to_usize() - } - - /// Consumes `self`, returning a serialized representation. - /// - /// The output is faithful to the SSZ encoding of `self`, such that a leading `true` bit is - /// used to indicate the length of the bitfield. - /// - /// ## Example - /// ``` - /// use ssz_types::{BitList, typenum}; - /// use smallvec::SmallVec; - /// - /// type BitList8 = BitList; - /// - /// let b = BitList8::with_capacity(4).unwrap(); - /// - /// assert_eq!(b.into_bytes(), SmallVec::from_buf([0b0001_0000])); - /// ``` - pub fn into_bytes(self) -> SmallVec<[u8; SMALLVEC_LEN]> { - let len = self.len(); - let mut bytes = self.bytes; - - bytes.resize(bytes_for_bit_len(len + 1), 0); - - let mut bitfield: Bitfield> = Bitfield::from_raw_bytes(bytes, len + 1) - .unwrap_or_else(|_| { - unreachable!( - "Bitfield with {} bytes must have enough capacity for {} bits.", - bytes_for_bit_len(len + 1), - len + 1 - ) - }); - bitfield - .set(len, true) - .expect("len must be in bounds for bitfield."); - - bitfield.bytes - } - - /// Instantiates a new instance from `bytes`. Consumes the same format that `self.into_bytes()` - /// produces (SSZ). - /// - /// Returns `None` if `bytes` are not a valid encoding. - pub fn from_bytes(bytes: SmallVec<[u8; SMALLVEC_LEN]>) -> Result { - let bytes_len = bytes.len(); - let mut initial_bitfield: Bitfield> = { - let num_bits = bytes.len() * 8; - Bitfield::from_raw_bytes(bytes, num_bits)? - }; - - let len = initial_bitfield - .highest_set_bit() - .ok_or(Error::MissingLengthInformation)?; - - // The length bit should be in the last byte, or else it means we have too many bytes. - if len / 8 + 1 != bytes_len { - return Err(Error::InvalidByteCount { - given: bytes_len, - expected: len / 8 + 1, - }); - } - - if len <= Self::max_len() { - initial_bitfield - .set(len, false) - .expect("Bit has been confirmed to exist"); - - let mut bytes = initial_bitfield.into_raw_bytes(); - - bytes.truncate(bytes_for_bit_len(len)); - - Self::from_raw_bytes(bytes, len) - } else { - Err(Error::OutOfBounds { - i: Self::max_len(), - len: Self::max_len(), - }) - } - } - - /// Compute the intersection of two BitLists of potentially different lengths. - /// - /// Return a new BitList with length equal to the shorter of the two inputs. - pub fn intersection(&self, other: &Self) -> Self { - let min_len = std::cmp::min(self.len(), other.len()); - let mut result = Self::with_capacity(min_len).expect("min len always less than N"); - // Bitwise-and the bytes together, starting from the left of each vector. This takes care - // of masking out any entries beyond `min_len` as well, assuming the bitfield doesn't - // contain any set bits beyond its length. - for i in 0..result.bytes.len() { - result.bytes[i] = self.bytes[i] & other.bytes[i]; - } - result - } - - /// Compute the union of two BitLists of potentially different lengths. - /// - /// Return a new BitList with length equal to the longer of the two inputs. - pub fn union(&self, other: &Self) -> Self { - let max_len = std::cmp::max(self.len(), other.len()); - let mut result = Self::with_capacity(max_len).expect("max len always less than N"); - for i in 0..result.bytes.len() { - result.bytes[i] = - self.bytes.get(i).copied().unwrap_or(0) | other.bytes.get(i).copied().unwrap_or(0); - } - result - } -} - -impl Bitfield> { - /// Instantiate a new `Bitfield` with a fixed-length of `N` bits. - /// - /// All bits are initialized to `false`. - pub fn new() -> Self { - Self { - bytes: smallvec![0; bytes_for_bit_len(Self::capacity())], - len: Self::capacity(), - _phantom: PhantomData, - } - } - - /// Returns `N`, the number of bits in `Self`. - pub fn capacity() -> usize { - N::to_usize() - } - - /// Consumes `self`, returning a serialized representation. - /// - /// The output is faithful to the SSZ encoding of `self`. - /// - /// ## Example - /// ``` - /// use ssz_types::{BitVector, typenum}; - /// use smallvec::SmallVec; - /// - /// type BitVector4 = BitVector; - /// - /// assert_eq!(BitVector4::new().into_bytes(), SmallVec::from_buf([0b0000_0000])); - /// ``` - pub fn into_bytes(self) -> SmallVec<[u8; SMALLVEC_LEN]> { - self.into_raw_bytes() - } - - /// Instantiates a new instance from `bytes`. Consumes the same format that `self.into_bytes()` - /// produces (SSZ). - /// - /// Returns `None` if `bytes` are not a valid encoding. - pub fn from_bytes(bytes: SmallVec<[u8; SMALLVEC_LEN]>) -> Result { - Self::from_raw_bytes(bytes, Self::capacity()) - } - - /// Compute the intersection of two fixed-length `Bitfield`s. - /// - /// Return a new fixed-length `Bitfield`. - pub fn intersection(&self, other: &Self) -> Self { - let mut result = Self::new(); - // Bitwise-and the bytes together, starting from the left of each vector. This takes care - // of masking out any entries beyond `min_len` as well, assuming the bitfield doesn't - // contain any set bits beyond its length. - for i in 0..result.bytes.len() { - result.bytes[i] = self.bytes[i] & other.bytes[i]; - } - result - } - - /// Compute the union of two fixed-length `Bitfield`s. - /// - /// Return a new fixed-length `Bitfield`. - pub fn union(&self, other: &Self) -> Self { - let mut result = Self::new(); - for i in 0..result.bytes.len() { - result.bytes[i] = - self.bytes.get(i).copied().unwrap_or(0) | other.bytes.get(i).copied().unwrap_or(0); - } - result - } -} - -impl Default for Bitfield> { - fn default() -> Self { - Self::new() - } -} - -impl Bitfield { - /// Sets the `i`'th bit to `value`. - /// - /// Returns `None` if `i` is out-of-bounds of `self`. - pub fn set(&mut self, i: usize, value: bool) -> Result<(), Error> { - let len = self.len; - - if i < len { - let byte = self - .bytes - .get_mut(i / 8) - .ok_or(Error::OutOfBounds { i, len })?; - - if value { - *byte |= 1 << (i % 8) - } else { - *byte &= !(1 << (i % 8)) - } - - Ok(()) - } else { - Err(Error::OutOfBounds { i, len: self.len }) - } - } - - /// Returns the value of the `i`'th bit. - /// - /// Returns `Error` if `i` is out-of-bounds of `self`. - pub fn get(&self, i: usize) -> Result { - if i < self.len { - let byte = self - .bytes - .get(i / 8) - .ok_or(Error::OutOfBounds { i, len: self.len })?; - - Ok(*byte & 1 << (i % 8) > 0) - } else { - Err(Error::OutOfBounds { i, len: self.len }) - } - } - - /// Returns the number of bits stored in `self`. - pub fn len(&self) -> usize { - self.len - } - - /// Returns `true` if `self.len() == 0`. - pub fn is_empty(&self) -> bool { - self.len == 0 - } - - /// Returns the underlying bytes representation of the bitfield. - pub fn into_raw_bytes(self) -> SmallVec<[u8; SMALLVEC_LEN]> { - self.bytes - } - - /// Returns a view into the underlying bytes representation of the bitfield. - pub fn as_slice(&self) -> &[u8] { - &self.bytes - } - - /// Instantiates from the given `bytes`, which are the same format as output from - /// `self.into_raw_bytes()`. - /// - /// Returns `None` if: - /// - /// - `bytes` is not the minimal required bytes to represent a bitfield of `bit_len` bits. - /// - `bit_len` is not a multiple of 8 and `bytes` contains set bits that are higher than, or - /// equal to `bit_len`. - fn from_raw_bytes(bytes: SmallVec<[u8; SMALLVEC_LEN]>, bit_len: usize) -> Result { - if bit_len == 0 { - if bytes.len() == 1 && bytes[0] == 0 { - // A bitfield with `bit_len` 0 can only be represented by a single zero byte. - Ok(Self { - bytes, - len: 0, - _phantom: PhantomData, - }) - } else { - Err(Error::ExcessBits) - } - } else if bytes.len() != bytes_for_bit_len(bit_len) { - // The number of bytes must be the minimum required to represent `bit_len`. - Err(Error::InvalidByteCount { - given: bytes.len(), - expected: bytes_for_bit_len(bit_len), - }) - } else { - // Ensure there are no bits higher than `bit_len` that are set to true. - let (mask, _) = u8::max_value().overflowing_shr(8 - (bit_len as u32 % 8)); - - if (bytes.last().expect("Guarded against empty bytes") & !mask) == 0 { - Ok(Self { - bytes, - len: bit_len, - _phantom: PhantomData, - }) - } else { - Err(Error::ExcessBits) - } - } - } - - /// Returns the `Some(i)` where `i` is the highest index with a set bit. Returns `None` if - /// there are no set bits. - pub fn highest_set_bit(&self) -> Option { - self.bytes - .iter() - .enumerate() - .rev() - .find(|(_, byte)| **byte > 0) - .map(|(i, byte)| i * 8 + 7 - byte.leading_zeros() as usize) - } - - /// Returns an iterator across bitfield `bool` values, starting at the lowest index. - pub fn iter(&self) -> BitIter<'_, T> { - BitIter { - bitfield: self, - i: 0, - } - } - - /// Returns true if no bits are set. - pub fn is_zero(&self) -> bool { - self.bytes.iter().all(|byte| *byte == 0) - } - - /// Returns the number of bits that are set to `true`. - pub fn num_set_bits(&self) -> usize { - self.bytes - .iter() - .map(|byte| byte.count_ones() as usize) - .sum() - } - - /// Compute the difference of this Bitfield and another of potentially different length. - pub fn difference(&self, other: &Self) -> Self { - let mut result = self.clone(); - result.difference_inplace(other); - result - } - - /// Compute the difference of this Bitfield and another of potentially different length. - pub fn difference_inplace(&mut self, other: &Self) { - let min_byte_len = std::cmp::min(self.bytes.len(), other.bytes.len()); - - for i in 0..min_byte_len { - self.bytes[i] &= !other.bytes[i]; - } - } - - /// Shift the bits to higher indices, filling the lower indices with zeroes. - /// - /// The amount to shift by, `n`, must be less than or equal to `self.len()`. - pub fn shift_up(&mut self, n: usize) -> Result<(), Error> { - if n <= self.len() { - // Shift the bits up (starting from the high indices to avoid overwriting) - for i in (n..self.len()).rev() { - self.set(i, self.get(i - n)?)?; - } - // Zero the low bits - for i in 0..n { - self.set(i, false).unwrap(); - } - Ok(()) - } else { - Err(Error::OutOfBounds { - i: n, - len: self.len(), - }) - } - } -} - -/// Returns the minimum required bytes to represent a given number of bits. -/// -/// `bit_len == 0` requires a single byte. -fn bytes_for_bit_len(bit_len: usize) -> usize { - std::cmp::max(1, (bit_len + 7) / 8) -} - -/// An iterator over the bits in a `Bitfield`. -pub struct BitIter<'a, T> { - bitfield: &'a Bitfield, - i: usize, -} - -impl<'a, T: BitfieldBehaviour> Iterator for BitIter<'a, T> { - type Item = bool; - - fn next(&mut self) -> Option { - let res = self.bitfield.get(self.i).ok()?; - self.i += 1; - Some(res) - } -} - -impl Encode for Bitfield> { - fn is_ssz_fixed_len() -> bool { - false - } - - fn ssz_bytes_len(&self) -> usize { - // We could likely do better than turning this into bytes and reading the length, however - // it is kept this way for simplicity. - self.clone().into_bytes().len() - } - - fn ssz_append(&self, buf: &mut Vec) { - buf.extend_from_slice(&self.clone().into_bytes()) - } -} - -impl Decode for Bitfield> { - fn is_ssz_fixed_len() -> bool { - false - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - Self::from_bytes(bytes.to_smallvec()).map_err(|e| { - ssz::DecodeError::BytesInvalid(format!("BitList failed to decode: {:?}", e)) - }) - } -} - -impl Encode for Bitfield> { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_bytes_len(&self) -> usize { - self.as_slice().len() - } - - fn ssz_fixed_len() -> usize { - bytes_for_bit_len(N::to_usize()) - } - - fn ssz_append(&self, buf: &mut Vec) { - buf.extend_from_slice(&self.clone().into_bytes()) - } -} - -impl Decode for Bitfield> { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - bytes_for_bit_len(N::to_usize()) - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - Self::from_bytes(bytes.to_smallvec()).map_err(|e| { - ssz::DecodeError::BytesInvalid(format!("BitVector failed to decode: {:?}", e)) - }) - } -} - -impl Serialize for Bitfield> { - /// Serde serialization is compliant with the Ethereum YAML test format. - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&hex_encode(self.as_ssz_bytes())) - } -} - -impl<'de, N: Unsigned + Clone> Deserialize<'de> for Bitfield> { - /// Serde serialization is compliant with the Ethereum YAML test format. - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let bytes = deserializer.deserialize_str(PrefixedHexVisitor)?; - Self::from_ssz_bytes(&bytes) - .map_err(|e| serde::de::Error::custom(format!("Bitfield {:?}", e))) - } -} - -impl Serialize for Bitfield> { - /// Serde serialization is compliant with the Ethereum YAML test format. - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&hex_encode(self.as_ssz_bytes())) - } -} - -impl<'de, N: Unsigned + Clone> Deserialize<'de> for Bitfield> { - /// Serde serialization is compliant with the Ethereum YAML test format. - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let bytes = deserializer.deserialize_str(PrefixedHexVisitor)?; - Self::from_ssz_bytes(&bytes) - .map_err(|e| serde::de::Error::custom(format!("Bitfield {:?}", e))) - } -} - -impl tree_hash::TreeHash for Bitfield> { - fn tree_hash_type() -> tree_hash::TreeHashType { - tree_hash::TreeHashType::List - } - - fn tree_hash_packed_encoding(&self) -> tree_hash::PackedEncoding { - unreachable!("List should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("List should never be packed.") - } - - fn tree_hash_root(&self) -> Hash256 { - // Note: we use `as_slice` because it does _not_ have the length-delimiting bit set (or - // present). - let root = bitfield_bytes_tree_hash_root::(self.as_slice()); - tree_hash::mix_in_length(&root, self.len()) - } -} - -impl tree_hash::TreeHash for Bitfield> { - fn tree_hash_type() -> tree_hash::TreeHashType { - tree_hash::TreeHashType::Vector - } - - fn tree_hash_packed_encoding(&self) -> tree_hash::PackedEncoding { - unreachable!("Vector should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("Vector should never be packed.") - } - - fn tree_hash_root(&self) -> Hash256 { - bitfield_bytes_tree_hash_root::(self.as_slice()) - } -} - -#[cfg(feature = "arbitrary")] -impl arbitrary::Arbitrary<'_> for Bitfield> { - fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { - let size = N::to_usize(); - let mut vec = smallvec![0u8; size]; - u.fill_buffer(&mut vec)?; - Self::from_bytes(vec).map_err(|_| arbitrary::Error::IncorrectFormat) - } -} - -#[cfg(feature = "arbitrary")] -impl arbitrary::Arbitrary<'_> for Bitfield> { - fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { - let max_size = N::to_usize(); - let rand = usize::arbitrary(u)?; - let size = std::cmp::min(rand, max_size); - let mut vec = smallvec![0u8; size]; - u.fill_buffer(&mut vec)?; - Self::from_bytes(vec).map_err(|_| arbitrary::Error::IncorrectFormat) - } -} - -#[cfg(test)] -mod bitvector { - use super::*; - use crate::BitVector; - - pub type BitVector0 = BitVector; - pub type BitVector1 = BitVector; - pub type BitVector4 = BitVector; - pub type BitVector8 = BitVector; - pub type BitVector16 = BitVector; - pub type BitVector64 = BitVector; - - #[test] - fn ssz_encode() { - assert_eq!(BitVector0::new().as_ssz_bytes(), vec![0b0000_0000]); - assert_eq!(BitVector1::new().as_ssz_bytes(), vec![0b0000_0000]); - assert_eq!(BitVector4::new().as_ssz_bytes(), vec![0b0000_0000]); - assert_eq!(BitVector8::new().as_ssz_bytes(), vec![0b0000_0000]); - assert_eq!( - BitVector16::new().as_ssz_bytes(), - vec![0b0000_0000, 0b0000_0000] - ); - - let mut b = BitVector8::new(); - for i in 0..8 { - b.set(i, true).unwrap(); - } - assert_eq!(b.as_ssz_bytes(), vec![255]); - - let mut b = BitVector4::new(); - for i in 0..4 { - b.set(i, true).unwrap(); - } - assert_eq!(b.as_ssz_bytes(), vec![0b0000_1111]); - } - - #[test] - fn ssz_decode() { - assert!(BitVector0::from_ssz_bytes(&[0b0000_0000]).is_ok()); - assert!(BitVector0::from_ssz_bytes(&[0b0000_0001]).is_err()); - assert!(BitVector0::from_ssz_bytes(&[0b0000_0010]).is_err()); - - assert!(BitVector1::from_ssz_bytes(&[0b0000_0001]).is_ok()); - assert!(BitVector1::from_ssz_bytes(&[0b0000_0010]).is_err()); - assert!(BitVector1::from_ssz_bytes(&[0b0000_0100]).is_err()); - assert!(BitVector1::from_ssz_bytes(&[0b0000_0000, 0b0000_0000]).is_err()); - - assert!(BitVector8::from_ssz_bytes(&[0b0000_0000]).is_ok()); - assert!(BitVector8::from_ssz_bytes(&[1, 0b0000_0000]).is_err()); - assert!(BitVector8::from_ssz_bytes(&[0b0000_0000, 1]).is_err()); - assert!(BitVector8::from_ssz_bytes(&[0b0000_0001]).is_ok()); - assert!(BitVector8::from_ssz_bytes(&[0b0000_0010]).is_ok()); - assert!(BitVector8::from_ssz_bytes(&[0b0000_0100, 0b0000_0001]).is_err()); - assert!(BitVector8::from_ssz_bytes(&[0b0000_0100, 0b0000_0010]).is_err()); - assert!(BitVector8::from_ssz_bytes(&[0b0000_0100, 0b0000_0100]).is_err()); - - assert!(BitVector16::from_ssz_bytes(&[0b0000_0000]).is_err()); - assert!(BitVector16::from_ssz_bytes(&[0b0000_0000, 0b0000_0000]).is_ok()); - assert!(BitVector16::from_ssz_bytes(&[1, 0b0000_0000, 0b0000_0000]).is_err()); - } - - #[test] - fn intersection() { - let a = BitVector16::from_raw_bytes(smallvec![0b1100, 0b0001], 16).unwrap(); - let b = BitVector16::from_raw_bytes(smallvec![0b1011, 0b1001], 16).unwrap(); - let c = BitVector16::from_raw_bytes(smallvec![0b1000, 0b0001], 16).unwrap(); - - assert_eq!(a.intersection(&b), c); - assert_eq!(b.intersection(&a), c); - assert_eq!(a.intersection(&c), c); - assert_eq!(b.intersection(&c), c); - assert_eq!(a.intersection(&a), a); - assert_eq!(b.intersection(&b), b); - assert_eq!(c.intersection(&c), c); - } - - #[test] - fn intersection_diff_length() { - let a = BitVector16::from_bytes(smallvec![0b0010_1110, 0b0010_1011]).unwrap(); - let b = BitVector16::from_bytes(smallvec![0b0010_1101, 0b0000_0001]).unwrap(); - let c = BitVector16::from_bytes(smallvec![0b0010_1100, 0b0000_0001]).unwrap(); - - assert_eq!(a.len(), 16); - assert_eq!(b.len(), 16); - assert_eq!(c.len(), 16); - assert_eq!(a.intersection(&b), c); - assert_eq!(b.intersection(&a), c); - } - - #[test] - fn union() { - let a = BitVector16::from_raw_bytes(smallvec![0b1100, 0b0001], 16).unwrap(); - let b = BitVector16::from_raw_bytes(smallvec![0b1011, 0b1001], 16).unwrap(); - let c = BitVector16::from_raw_bytes(smallvec![0b1111, 0b1001], 16).unwrap(); - - assert_eq!(a.union(&b), c); - assert_eq!(b.union(&a), c); - assert_eq!(a.union(&a), a); - assert_eq!(b.union(&b), b); - assert_eq!(c.union(&c), c); - } - - #[test] - fn union_diff_length() { - let a = BitVector16::from_bytes(smallvec![0b0010_1011, 0b0010_1110]).unwrap(); - let b = BitVector16::from_bytes(smallvec![0b0000_0001, 0b0010_1101]).unwrap(); - let c = BitVector16::from_bytes(smallvec![0b0010_1011, 0b0010_1111]).unwrap(); - - assert_eq!(a.len(), c.len()); - assert_eq!(a.union(&b), c); - assert_eq!(b.union(&a), c); - } - - #[test] - fn ssz_round_trip() { - assert_round_trip(BitVector0::new()); - - let mut b = BitVector1::new(); - b.set(0, true).unwrap(); - assert_round_trip(b); - - let mut b = BitVector8::new(); - for j in 0..8 { - if j % 2 == 0 { - b.set(j, true).unwrap(); - } - } - assert_round_trip(b); - - let mut b = BitVector8::new(); - for j in 0..8 { - b.set(j, true).unwrap(); - } - assert_round_trip(b); - - let mut b = BitVector16::new(); - for j in 0..16 { - if j % 2 == 0 { - b.set(j, true).unwrap(); - } - } - assert_round_trip(b); - - let mut b = BitVector16::new(); - for j in 0..16 { - b.set(j, true).unwrap(); - } - assert_round_trip(b); - } - - fn assert_round_trip(t: T) { - assert_eq!(T::from_ssz_bytes(&t.as_ssz_bytes()).unwrap(), t); - } - - #[test] - fn ssz_bytes_len() { - for i in 0..64 { - let mut bitfield = BitVector64::new(); - for j in 0..i { - bitfield.set(j, true).expect("should set bit in bounds"); - } - let bytes = bitfield.as_ssz_bytes(); - assert_eq!(bitfield.ssz_bytes_len(), bytes.len(), "i = {}", i); - } - } - - #[test] - fn excess_bits_nimbus() { - let bad = vec![0b0001_1111]; - - assert!(BitVector4::from_ssz_bytes(&bad).is_err()); - } - - // Ensure that stack size of a BitVector is manageable. - #[test] - fn size_of() { - assert_eq!(std::mem::size_of::(), SMALLVEC_LEN + 24); - } -} - -#[cfg(test)] -#[allow(clippy::cognitive_complexity)] -mod bitlist { - use super::*; - use crate::BitList; - - pub type BitList0 = BitList; - pub type BitList1 = BitList; - pub type BitList8 = BitList; - pub type BitList16 = BitList; - pub type BitList1024 = BitList; - - #[test] - fn ssz_encode() { - assert_eq!( - BitList0::with_capacity(0).unwrap().as_ssz_bytes(), - vec![0b0000_0001], - ); - - assert_eq!( - BitList1::with_capacity(0).unwrap().as_ssz_bytes(), - vec![0b0000_0001], - ); - - assert_eq!( - BitList1::with_capacity(1).unwrap().as_ssz_bytes(), - vec![0b0000_0010], - ); - - assert_eq!( - BitList8::with_capacity(8).unwrap().as_ssz_bytes(), - vec![0b0000_0000, 0b0000_0001], - ); - - assert_eq!( - BitList8::with_capacity(7).unwrap().as_ssz_bytes(), - vec![0b1000_0000] - ); - - let mut b = BitList8::with_capacity(8).unwrap(); - for i in 0..8 { - b.set(i, true).unwrap(); - } - assert_eq!(b.as_ssz_bytes(), vec![255, 0b0000_0001]); - - let mut b = BitList8::with_capacity(8).unwrap(); - for i in 0..4 { - b.set(i, true).unwrap(); - } - assert_eq!(b.as_ssz_bytes(), vec![0b0000_1111, 0b0000_0001]); - - assert_eq!( - BitList16::with_capacity(16).unwrap().as_ssz_bytes(), - vec![0b0000_0000, 0b0000_0000, 0b0000_0001] - ); - } - - #[test] - fn ssz_decode() { - assert!(BitList0::from_ssz_bytes(&[]).is_err()); - assert!(BitList1::from_ssz_bytes(&[]).is_err()); - assert!(BitList8::from_ssz_bytes(&[]).is_err()); - assert!(BitList16::from_ssz_bytes(&[]).is_err()); - - assert!(BitList0::from_ssz_bytes(&[0b0000_0000]).is_err()); - assert!(BitList1::from_ssz_bytes(&[0b0000_0000, 0b0000_0000]).is_err()); - assert!(BitList8::from_ssz_bytes(&[0b0000_0000]).is_err()); - assert!(BitList16::from_ssz_bytes(&[0b0000_0000]).is_err()); - - assert!(BitList0::from_ssz_bytes(&[0b0000_0001]).is_ok()); - assert!(BitList0::from_ssz_bytes(&[0b0000_0010]).is_err()); - - assert!(BitList1::from_ssz_bytes(&[0b0000_0001]).is_ok()); - assert!(BitList1::from_ssz_bytes(&[0b0000_0010]).is_ok()); - assert!(BitList1::from_ssz_bytes(&[0b0000_0100]).is_err()); - - assert!(BitList8::from_ssz_bytes(&[0b0000_0001]).is_ok()); - assert!(BitList8::from_ssz_bytes(&[0b0000_0010]).is_ok()); - assert!(BitList8::from_ssz_bytes(&[0b0000_0001, 0b0000_0001]).is_ok()); - assert!(BitList8::from_ssz_bytes(&[0b0000_0001, 0b0000_0010]).is_err()); - assert!(BitList8::from_ssz_bytes(&[0b0000_0001, 0b0000_0100]).is_err()); - } - - #[test] - fn ssz_decode_extra_bytes() { - assert!(BitList0::from_ssz_bytes(&[0b0000_0001, 0b0000_0000]).is_err()); - assert!(BitList1::from_ssz_bytes(&[0b0000_0001, 0b0000_0000]).is_err()); - assert!(BitList8::from_ssz_bytes(&[0b0000_0001, 0b0000_0000]).is_err()); - assert!(BitList16::from_ssz_bytes(&[0b0000_0001, 0b0000_0000]).is_err()); - assert!(BitList1024::from_ssz_bytes(&[0b1000_0000, 0]).is_err()); - assert!(BitList1024::from_ssz_bytes(&[0b1000_0000, 0, 0]).is_err()); - assert!(BitList1024::from_ssz_bytes(&[0b1000_0000, 0, 0, 0, 0]).is_err()); - } - - #[test] - fn ssz_round_trip() { - assert_round_trip(BitList0::with_capacity(0).unwrap()); - - for i in 0..2 { - assert_round_trip(BitList1::with_capacity(i).unwrap()); - } - for i in 0..9 { - assert_round_trip(BitList8::with_capacity(i).unwrap()); - } - for i in 0..17 { - assert_round_trip(BitList16::with_capacity(i).unwrap()); - } - - let mut b = BitList1::with_capacity(1).unwrap(); - b.set(0, true).unwrap(); - assert_round_trip(b); - - for i in 0..8 { - let mut b = BitList8::with_capacity(i).unwrap(); - for j in 0..i { - if j % 2 == 0 { - b.set(j, true).unwrap(); - } - } - assert_round_trip(b); - - let mut b = BitList8::with_capacity(i).unwrap(); - for j in 0..i { - b.set(j, true).unwrap(); - } - assert_round_trip(b); - } - - for i in 0..16 { - let mut b = BitList16::with_capacity(i).unwrap(); - for j in 0..i { - if j % 2 == 0 { - b.set(j, true).unwrap(); - } - } - assert_round_trip(b); - - let mut b = BitList16::with_capacity(i).unwrap(); - for j in 0..i { - b.set(j, true).unwrap(); - } - assert_round_trip(b); - } - } - - fn assert_round_trip(t: T) { - assert_eq!(T::from_ssz_bytes(&t.as_ssz_bytes()).unwrap(), t); - } - - #[test] - fn from_raw_bytes() { - assert!(BitList1024::from_raw_bytes(smallvec![0b0000_0000], 0).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0000_0001], 1).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0000_0011], 2).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0000_0111], 3).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0000_1111], 4).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0001_1111], 5).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0011_1111], 6).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0111_1111], 7).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111], 8).is_ok()); - - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0000_0001], 9).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0000_0011], 10).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0000_0111], 11).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0000_1111], 12).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0001_1111], 13).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0011_1111], 14).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0111_1111], 15).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b1111_1111], 16).is_ok()); - - for i in 0..8 { - assert!(BitList1024::from_raw_bytes(smallvec![], i).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111], i).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0000_0000, 0b1111_1110], i).is_err()); - } - - assert!(BitList1024::from_raw_bytes(smallvec![0b0000_0001], 0).is_err()); - - assert!(BitList1024::from_raw_bytes(smallvec![0b0000_0001], 0).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0000_0011], 1).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0000_0111], 2).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0000_1111], 3).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0001_1111], 4).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0011_1111], 5).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0111_1111], 6).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111], 7).is_err()); - - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0000_0001], 8).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0000_0011], 9).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0000_0111], 10).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0000_1111], 11).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0001_1111], 12).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0011_1111], 13).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0111_1111], 14).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b1111_1111], 15).is_err()); - } - - fn test_set_unset(num_bits: usize) { - let mut bitfield = BitList1024::with_capacity(num_bits).unwrap(); - - for i in 0..=num_bits { - if i < num_bits { - // Starts as false - assert_eq!(bitfield.get(i), Ok(false)); - // Can be set true. - assert!(bitfield.set(i, true).is_ok()); - assert_eq!(bitfield.get(i), Ok(true)); - // Can be set false - assert!(bitfield.set(i, false).is_ok()); - assert_eq!(bitfield.get(i), Ok(false)); - } else { - assert!(bitfield.get(i).is_err()); - assert!(bitfield.set(i, true).is_err()); - assert!(bitfield.get(i).is_err()); - } - } - } - - fn test_bytes_round_trip(num_bits: usize) { - for i in 0..num_bits { - let mut bitfield = BitList1024::with_capacity(num_bits).unwrap(); - bitfield.set(i, true).unwrap(); - - let bytes = bitfield.clone().into_raw_bytes(); - assert_eq!(bitfield, Bitfield::from_raw_bytes(bytes, num_bits).unwrap()); - } - } - - #[test] - fn set_unset() { - for i in 0..8 * 5 { - test_set_unset(i) - } - } - - #[test] - fn bytes_round_trip() { - for i in 0..8 * 5 { - test_bytes_round_trip(i) - } - } - - /// Type-specialised `smallvec` macro for testing. - macro_rules! bytevec { - ($($x : expr),* $(,)*) => { - { - let __smallvec: SmallVec<[u8; SMALLVEC_LEN]> = smallvec!($($x),*); - __smallvec - } - }; - } - - #[test] - fn into_raw_bytes() { - let mut bitfield = BitList1024::with_capacity(9).unwrap(); - bitfield.set(0, true).unwrap(); - assert_eq!( - bitfield.clone().into_raw_bytes(), - bytevec![0b0000_0001, 0b0000_0000] - ); - bitfield.set(1, true).unwrap(); - assert_eq!( - bitfield.clone().into_raw_bytes(), - bytevec![0b0000_0011, 0b0000_0000] - ); - bitfield.set(2, true).unwrap(); - assert_eq!( - bitfield.clone().into_raw_bytes(), - bytevec![0b0000_0111, 0b0000_0000] - ); - bitfield.set(3, true).unwrap(); - assert_eq!( - bitfield.clone().into_raw_bytes(), - bytevec![0b0000_1111, 0b0000_0000] - ); - bitfield.set(4, true).unwrap(); - assert_eq!( - bitfield.clone().into_raw_bytes(), - bytevec![0b0001_1111, 0b0000_0000] - ); - bitfield.set(5, true).unwrap(); - assert_eq!( - bitfield.clone().into_raw_bytes(), - bytevec![0b0011_1111, 0b0000_0000] - ); - bitfield.set(6, true).unwrap(); - assert_eq!( - bitfield.clone().into_raw_bytes(), - bytevec![0b0111_1111, 0b0000_0000] - ); - bitfield.set(7, true).unwrap(); - assert_eq!( - bitfield.clone().into_raw_bytes(), - bytevec![0b1111_1111, 0b0000_0000] - ); - bitfield.set(8, true).unwrap(); - assert_eq!( - bitfield.into_raw_bytes(), - bytevec![0b1111_1111, 0b0000_0001] - ); - } - - #[test] - fn highest_set_bit() { - assert_eq!( - BitList1024::with_capacity(16).unwrap().highest_set_bit(), - None - ); - - assert_eq!( - BitList1024::from_raw_bytes(smallvec![0b0000_0001, 0b0000_0000], 16) - .unwrap() - .highest_set_bit(), - Some(0) - ); - - assert_eq!( - BitList1024::from_raw_bytes(smallvec![0b0000_0010, 0b0000_0000], 16) - .unwrap() - .highest_set_bit(), - Some(1) - ); - - assert_eq!( - BitList1024::from_raw_bytes(smallvec![0b0000_1000], 8) - .unwrap() - .highest_set_bit(), - Some(3) - ); - - assert_eq!( - BitList1024::from_raw_bytes(smallvec![0b0000_0000, 0b1000_0000], 16) - .unwrap() - .highest_set_bit(), - Some(15) - ); - } - - #[test] - fn intersection() { - let a = BitList1024::from_raw_bytes(smallvec![0b1100, 0b0001], 16).unwrap(); - let b = BitList1024::from_raw_bytes(smallvec![0b1011, 0b1001], 16).unwrap(); - let c = BitList1024::from_raw_bytes(smallvec![0b1000, 0b0001], 16).unwrap(); - - assert_eq!(a.intersection(&b), c); - assert_eq!(b.intersection(&a), c); - assert_eq!(a.intersection(&c), c); - assert_eq!(b.intersection(&c), c); - assert_eq!(a.intersection(&a), a); - assert_eq!(b.intersection(&b), b); - assert_eq!(c.intersection(&c), c); - } - - #[test] - fn intersection_diff_length() { - let a = BitList1024::from_bytes(smallvec![0b0010_1110, 0b0010_1011]).unwrap(); - let b = BitList1024::from_bytes(smallvec![0b0010_1101, 0b0000_0001]).unwrap(); - let c = BitList1024::from_bytes(smallvec![0b0010_1100, 0b0000_0001]).unwrap(); - let d = BitList1024::from_bytes(smallvec![0b0010_1110, 0b1111_1111, 0b1111_1111]).unwrap(); - - assert_eq!(a.len(), 13); - assert_eq!(b.len(), 8); - assert_eq!(c.len(), 8); - assert_eq!(d.len(), 23); - assert_eq!(a.intersection(&b), c); - assert_eq!(b.intersection(&a), c); - assert_eq!(a.intersection(&d), a); - assert_eq!(d.intersection(&a), a); - } - - #[test] - fn union() { - let a = BitList1024::from_raw_bytes(smallvec![0b1100, 0b0001], 16).unwrap(); - let b = BitList1024::from_raw_bytes(smallvec![0b1011, 0b1001], 16).unwrap(); - let c = BitList1024::from_raw_bytes(smallvec![0b1111, 0b1001], 16).unwrap(); - - assert_eq!(a.union(&b), c); - assert_eq!(b.union(&a), c); - assert_eq!(a.union(&a), a); - assert_eq!(b.union(&b), b); - assert_eq!(c.union(&c), c); - } - - #[test] - fn union_diff_length() { - let a = BitList1024::from_bytes(smallvec![0b0010_1011, 0b0010_1110]).unwrap(); - let b = BitList1024::from_bytes(smallvec![0b0000_0001, 0b0010_1101]).unwrap(); - let c = BitList1024::from_bytes(smallvec![0b0010_1011, 0b0010_1111]).unwrap(); - let d = BitList1024::from_bytes(smallvec![0b0010_1011, 0b1011_1110, 0b1000_1101]).unwrap(); - - assert_eq!(a.len(), c.len()); - assert_eq!(a.union(&b), c); - assert_eq!(b.union(&a), c); - assert_eq!(a.union(&d), d); - assert_eq!(d.union(&a), d); - } - - #[test] - fn difference() { - let a = BitList1024::from_raw_bytes(smallvec![0b1100, 0b0001], 16).unwrap(); - let b = BitList1024::from_raw_bytes(smallvec![0b1011, 0b1001], 16).unwrap(); - let a_b = BitList1024::from_raw_bytes(smallvec![0b0100, 0b0000], 16).unwrap(); - let b_a = BitList1024::from_raw_bytes(smallvec![0b0011, 0b1000], 16).unwrap(); - - assert_eq!(a.difference(&b), a_b); - assert_eq!(b.difference(&a), b_a); - assert!(a.difference(&a).is_zero()); - } - - #[test] - fn difference_diff_length() { - let a = BitList1024::from_raw_bytes(smallvec![0b0110, 0b1100, 0b0011], 24).unwrap(); - let b = BitList1024::from_raw_bytes(smallvec![0b1011, 0b1001], 16).unwrap(); - let a_b = BitList1024::from_raw_bytes(smallvec![0b0100, 0b0100, 0b0011], 24).unwrap(); - let b_a = BitList1024::from_raw_bytes(smallvec![0b1001, 0b0001], 16).unwrap(); - - assert_eq!(a.difference(&b), a_b); - assert_eq!(b.difference(&a), b_a); - } - - #[test] - fn shift_up() { - let mut a = BitList1024::from_raw_bytes(smallvec![0b1100_1111, 0b1101_0110], 16).unwrap(); - let mut b = BitList1024::from_raw_bytes(smallvec![0b1001_1110, 0b1010_1101], 16).unwrap(); - - a.shift_up(1).unwrap(); - assert_eq!(a, b); - a.shift_up(15).unwrap(); - assert!(a.is_zero()); - - b.shift_up(16).unwrap(); - assert!(b.is_zero()); - assert!(b.shift_up(17).is_err()); - } - - #[test] - fn num_set_bits() { - let a = BitList1024::from_raw_bytes(smallvec![0b1100, 0b0001], 16).unwrap(); - let b = BitList1024::from_raw_bytes(smallvec![0b1011, 0b1001], 16).unwrap(); - - assert_eq!(a.num_set_bits(), 3); - assert_eq!(b.num_set_bits(), 5); - } - - #[test] - fn iter() { - let mut bitfield = BitList1024::with_capacity(9).unwrap(); - bitfield.set(2, true).unwrap(); - bitfield.set(8, true).unwrap(); - - assert_eq!( - bitfield.iter().collect::>(), - vec![false, false, true, false, false, false, false, false, true] - ); - } - - #[test] - fn ssz_bytes_len() { - for i in 1..64 { - let mut bitfield = BitList1024::with_capacity(i).unwrap(); - for j in 0..i { - bitfield.set(j, true).expect("should set bit in bounds"); - } - let bytes = bitfield.as_ssz_bytes(); - assert_eq!(bitfield.ssz_bytes_len(), bytes.len(), "i = {}", i); - } - } - - // Ensure that the stack size of a BitList is manageable. - #[test] - fn size_of() { - assert_eq!(std::mem::size_of::(), SMALLVEC_LEN + 24); - } -} diff --git a/consensus/ssz_types/src/fixed_vector.rs b/consensus/ssz_types/src/fixed_vector.rs deleted file mode 100644 index 9625f27f3ab..00000000000 --- a/consensus/ssz_types/src/fixed_vector.rs +++ /dev/null @@ -1,446 +0,0 @@ -use crate::tree_hash::vec_tree_hash_root; -use crate::Error; -use derivative::Derivative; -use serde_derive::{Deserialize, Serialize}; -use std::marker::PhantomData; -use std::ops::{Deref, DerefMut, Index, IndexMut}; -use std::slice::SliceIndex; -use tree_hash::Hash256; -use typenum::Unsigned; - -pub use typenum; - -/// Emulates a SSZ `Vector` (distinct from a Rust `Vec`). -/// -/// An ordered, heap-allocated, fixed-length, homogeneous collection of `T`, with `N` values. -/// -/// This struct is backed by a Rust `Vec` but constrained such that it must be instantiated with a -/// fixed number of elements and you may not add or remove elements, only modify. -/// -/// The length of this struct is fixed at the type-level using -/// [typenum](https://crates.io/crates/typenum). -/// -/// ## Note -/// -/// Whilst it is possible with this library, SSZ declares that a `FixedVector` with a length of `0` -/// is illegal. -/// -/// ## Example -/// -/// ``` -/// use ssz_types::{FixedVector, typenum}; -/// -/// let base: Vec = vec![1, 2, 3, 4]; -/// -/// // Create a `FixedVector` from a `Vec` that has the expected length. -/// let exact: FixedVector<_, typenum::U4> = FixedVector::from(base.clone()); -/// assert_eq!(&exact[..], &[1, 2, 3, 4]); -/// -/// // Create a `FixedVector` from a `Vec` that is too long and the `Vec` is truncated. -/// let short: FixedVector<_, typenum::U3> = FixedVector::from(base.clone()); -/// assert_eq!(&short[..], &[1, 2, 3]); -/// -/// // Create a `FixedVector` from a `Vec` that is too short and the missing values are created -/// // using `std::default::Default`. -/// let long: FixedVector<_, typenum::U5> = FixedVector::from(base); -/// assert_eq!(&long[..], &[1, 2, 3, 4, 0]); -/// ``` -#[derive(Debug, Clone, Serialize, Deserialize, Derivative)] -#[derivative(PartialEq, Hash(bound = "T: std::hash::Hash"))] -#[serde(transparent)] -pub struct FixedVector { - vec: Vec, - _phantom: PhantomData, -} - -impl FixedVector { - /// Returns `Ok` if the given `vec` equals the fixed length of `Self`. Otherwise returns - /// `Err`. - pub fn new(vec: Vec) -> Result { - if vec.len() == Self::capacity() { - Ok(Self { - vec, - _phantom: PhantomData, - }) - } else { - Err(Error::OutOfBounds { - i: vec.len(), - len: Self::capacity(), - }) - } - } - - /// Create a new vector filled with clones of `elem`. - pub fn from_elem(elem: T) -> Self - where - T: Clone, - { - Self { - vec: vec![elem; N::to_usize()], - _phantom: PhantomData, - } - } - - /// Identical to `self.capacity`, returns the type-level constant length. - /// - /// Exists for compatibility with `Vec`. - pub fn len(&self) -> usize { - self.vec.len() - } - - /// True if the type-level constant length of `self` is zero. - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Returns the type-level constant length. - pub fn capacity() -> usize { - N::to_usize() - } -} - -impl From> for FixedVector { - fn from(mut vec: Vec) -> Self { - vec.resize_with(Self::capacity(), Default::default); - - Self { - vec, - _phantom: PhantomData, - } - } -} - -impl From> for Vec { - fn from(vector: FixedVector) -> Vec { - vector.vec - } -} - -impl Default for FixedVector { - fn default() -> Self { - Self { - vec: (0..N::to_usize()).map(|_| T::default()).collect(), - _phantom: PhantomData, - } - } -} - -impl> Index for FixedVector { - type Output = I::Output; - - #[inline] - fn index(&self, index: I) -> &Self::Output { - Index::index(&self.vec, index) - } -} - -impl> IndexMut for FixedVector { - #[inline] - fn index_mut(&mut self, index: I) -> &mut Self::Output { - IndexMut::index_mut(&mut self.vec, index) - } -} - -impl Deref for FixedVector { - type Target = [T]; - - fn deref(&self) -> &[T] { - &self.vec[..] - } -} - -// This implementation is required to use `get_mut` to access elements. -// -// It's safe because none of the methods on mutable slices allow changing the length -// of the backing vec. -impl DerefMut for FixedVector { - fn deref_mut(&mut self) -> &mut [T] { - &mut self.vec[..] - } -} - -impl tree_hash::TreeHash for FixedVector -where - T: tree_hash::TreeHash, -{ - fn tree_hash_type() -> tree_hash::TreeHashType { - tree_hash::TreeHashType::Vector - } - - fn tree_hash_packed_encoding(&self) -> tree_hash::PackedEncoding { - unreachable!("Vector should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("Vector should never be packed.") - } - - fn tree_hash_root(&self) -> Hash256 { - vec_tree_hash_root::(&self.vec) - } -} - -impl ssz::Encode for FixedVector -where - T: ssz::Encode, -{ - fn is_ssz_fixed_len() -> bool { - T::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - if ::is_ssz_fixed_len() { - T::ssz_fixed_len() * N::to_usize() - } else { - ssz::BYTES_PER_LENGTH_OFFSET - } - } - - fn ssz_bytes_len(&self) -> usize { - self.vec.ssz_bytes_len() - } - - fn ssz_append(&self, buf: &mut Vec) { - if T::is_ssz_fixed_len() { - buf.reserve(T::ssz_fixed_len() * self.len()); - - for item in &self.vec { - item.ssz_append(buf); - } - } else { - let mut encoder = - ssz::SszEncoder::container(buf, self.len() * ssz::BYTES_PER_LENGTH_OFFSET); - - for item in &self.vec { - encoder.append(item); - } - - encoder.finalize(); - } - } -} - -impl ssz::Decode for FixedVector -where - T: ssz::Decode, -{ - fn is_ssz_fixed_len() -> bool { - T::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - if ::is_ssz_fixed_len() { - T::ssz_fixed_len() * N::to_usize() - } else { - ssz::BYTES_PER_LENGTH_OFFSET - } - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - let fixed_len = N::to_usize(); - - if bytes.is_empty() { - Err(ssz::DecodeError::InvalidByteLength { - len: 0, - expected: 1, - }) - } else if T::is_ssz_fixed_len() { - let num_items = bytes - .len() - .checked_div(T::ssz_fixed_len()) - .ok_or(ssz::DecodeError::ZeroLengthItem)?; - - if num_items != fixed_len { - return Err(ssz::DecodeError::BytesInvalid(format!( - "FixedVector of {} items has {} items", - num_items, fixed_len - ))); - } - - bytes - .chunks(T::ssz_fixed_len()) - .map(|chunk| T::from_ssz_bytes(chunk)) - .collect::, _>>() - .and_then(|vec| { - Self::new(vec).map_err(|e| { - ssz::DecodeError::BytesInvalid(format!( - "Wrong number of FixedVector elements: {:?}", - e - )) - }) - }) - } else { - let vec = ssz::decode_list_of_variable_length_items(bytes, Some(fixed_len))?; - Self::new(vec).map_err(|e| { - ssz::DecodeError::BytesInvalid(format!( - "Wrong number of FixedVector elements: {:?}", - e - )) - }) - } - } -} - -#[cfg(feature = "arbitrary")] -impl<'a, T: arbitrary::Arbitrary<'a>, N: 'static + Unsigned> arbitrary::Arbitrary<'a> - for FixedVector -{ - fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { - let size = N::to_usize(); - let mut vec: Vec = Vec::with_capacity(size); - for _ in 0..size { - vec.push(::arbitrary(u)?); - } - Self::new(vec).map_err(|_| arbitrary::Error::IncorrectFormat) - } -} - -#[cfg(test)] -mod test { - use super::*; - use ssz::*; - use tree_hash::{merkle_root, TreeHash}; - use tree_hash_derive::TreeHash; - use typenum::*; - - #[test] - fn new() { - let vec = vec![42; 5]; - let fixed: Result, _> = FixedVector::new(vec); - assert!(fixed.is_err()); - - let vec = vec![42; 3]; - let fixed: Result, _> = FixedVector::new(vec); - assert!(fixed.is_err()); - - let vec = vec![42; 4]; - let fixed: Result, _> = FixedVector::new(vec); - assert!(fixed.is_ok()); - } - - #[test] - fn indexing() { - let vec = vec![1, 2]; - - let mut fixed: FixedVector = vec.clone().into(); - - assert_eq!(fixed[0], 1); - assert_eq!(&fixed[0..1], &vec[0..1]); - assert_eq!((fixed[..]).len(), 8192); - - fixed[1] = 3; - assert_eq!(fixed[1], 3); - } - - #[test] - fn length() { - let vec = vec![42; 5]; - let fixed: FixedVector = FixedVector::from(vec.clone()); - assert_eq!(&fixed[..], &vec[0..4]); - - let vec = vec![42; 3]; - let fixed: FixedVector = FixedVector::from(vec.clone()); - assert_eq!(&fixed[0..3], &vec[..]); - assert_eq!(&fixed[..], &vec![42, 42, 42, 0][..]); - - let vec = vec![]; - let fixed: FixedVector = FixedVector::from(vec); - assert_eq!(&fixed[..], &vec![0, 0, 0, 0][..]); - } - - #[test] - fn deref() { - let vec = vec![0, 2, 4, 6]; - let fixed: FixedVector = FixedVector::from(vec); - - assert_eq!(fixed.first(), Some(&0)); - assert_eq!(fixed.get(3), Some(&6)); - assert_eq!(fixed.get(4), None); - } - - #[test] - fn ssz_encode() { - let vec: FixedVector = vec![0; 2].into(); - assert_eq!(vec.as_ssz_bytes(), vec![0, 0, 0, 0]); - assert_eq!( as Encode>::ssz_fixed_len(), 4); - } - - fn ssz_round_trip(item: T) { - let encoded = &item.as_ssz_bytes(); - assert_eq!(item.ssz_bytes_len(), encoded.len()); - assert_eq!(T::from_ssz_bytes(encoded), Ok(item)); - } - - #[test] - fn ssz_round_trip_u16_len_8() { - ssz_round_trip::>(vec![42; 8].into()); - ssz_round_trip::>(vec![0; 8].into()); - } - - #[test] - fn tree_hash_u8() { - let fixed: FixedVector = FixedVector::from(vec![]); - assert_eq!(fixed.tree_hash_root(), merkle_root(&[0; 8], 0)); - - let fixed: FixedVector = FixedVector::from(vec![0; 1]); - assert_eq!(fixed.tree_hash_root(), merkle_root(&[0; 8], 0)); - - let fixed: FixedVector = FixedVector::from(vec![0; 8]); - assert_eq!(fixed.tree_hash_root(), merkle_root(&[0; 8], 0)); - - let fixed: FixedVector = FixedVector::from(vec![42; 16]); - assert_eq!(fixed.tree_hash_root(), merkle_root(&[42; 16], 0)); - - let source: Vec = (0..16).collect(); - let fixed: FixedVector = FixedVector::from(source.clone()); - assert_eq!(fixed.tree_hash_root(), merkle_root(&source, 0)); - } - - #[derive(Clone, Copy, TreeHash, Default)] - struct A { - a: u32, - b: u32, - } - - fn repeat(input: &[u8], n: usize) -> Vec { - let mut output = vec![]; - - for _ in 0..n { - output.append(&mut input.to_vec()); - } - - output - } - - #[test] - fn tree_hash_composite() { - let a = A { a: 0, b: 1 }; - - let fixed: FixedVector = FixedVector::from(vec![]); - assert_eq!(fixed.tree_hash_root(), merkle_root(&[0; 32], 0)); - - let fixed: FixedVector = FixedVector::from(vec![a]); - assert_eq!( - fixed.tree_hash_root(), - merkle_root(a.tree_hash_root().as_bytes(), 0) - ); - - let fixed: FixedVector = FixedVector::from(vec![a; 8]); - assert_eq!( - fixed.tree_hash_root(), - merkle_root(&repeat(a.tree_hash_root().as_bytes(), 8), 0) - ); - - let fixed: FixedVector = FixedVector::from(vec![a; 13]); - assert_eq!( - fixed.tree_hash_root(), - merkle_root(&repeat(a.tree_hash_root().as_bytes(), 13), 0) - ); - - let fixed: FixedVector = FixedVector::from(vec![a; 16]); - assert_eq!( - fixed.tree_hash_root(), - merkle_root(&repeat(a.tree_hash_root().as_bytes(), 16), 0) - ); - } -} diff --git a/consensus/ssz_types/src/lib.rs b/consensus/ssz_types/src/lib.rs deleted file mode 100644 index 3e181da8cb3..00000000000 --- a/consensus/ssz_types/src/lib.rs +++ /dev/null @@ -1,72 +0,0 @@ -//! Provides types with unique properties required for SSZ serialization and Merklization: -//! -//! - `FixedVector`: A heap-allocated list with a size that is fixed at compile time. -//! - `VariableList`: A heap-allocated list that cannot grow past a type-level maximum length. -//! - `BitList`: A heap-allocated bitfield that with a type-level _maximum_ length. -//! - `BitVector`: A heap-allocated bitfield that with a type-level _fixed__ length. -//! -//! These structs are required as SSZ serialization and Merklization rely upon type-level lengths -//! for padding and verification. -//! -//! Adheres to the Ethereum 2.0 [SSZ -//! specification](https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/ssz/simple-serialize.md) -//! at v0.12.1. -//! -//! ## Example -//! ``` -//! use ssz_types::*; -//! -//! pub struct Example { -//! bit_vector: BitVector, -//! bit_list: BitList, -//! variable_list: VariableList, -//! fixed_vector: FixedVector, -//! } -//! -//! let mut example = Example { -//! bit_vector: Bitfield::new(), -//! bit_list: Bitfield::with_capacity(4).unwrap(), -//! variable_list: <_>::from(vec![0, 1]), -//! fixed_vector: <_>::from(vec![2, 3]), -//! }; -//! -//! assert_eq!(example.bit_vector.len(), 8); -//! assert_eq!(example.bit_list.len(), 4); -//! assert_eq!(&example.variable_list[..], &[0, 1]); -//! assert_eq!(&example.fixed_vector[..], &[2, 3, 0, 0, 0, 0, 0, 0]); -//! -//! ``` - -#[macro_use] -mod bitfield; -mod fixed_vector; -pub mod serde_utils; -mod tree_hash; -mod variable_list; - -pub use bitfield::{BitList, BitVector, Bitfield}; -pub use fixed_vector::FixedVector; -pub use typenum; -pub use variable_list::VariableList; - -pub mod length { - pub use crate::bitfield::{Fixed, Variable}; -} - -/// Returned when an item encounters an error. -#[derive(PartialEq, Debug, Clone)] -pub enum Error { - OutOfBounds { - i: usize, - len: usize, - }, - /// A `BitList` does not have a set bit, therefore it's length is unknowable. - MissingLengthInformation, - /// A `BitList` has excess bits set to true. - ExcessBits, - /// A `BitList` has an invalid number of bytes for a given bit length. - InvalidByteCount { - given: usize, - expected: usize, - }, -} diff --git a/consensus/ssz_types/src/serde_utils/hex_fixed_vec.rs b/consensus/ssz_types/src/serde_utils/hex_fixed_vec.rs deleted file mode 100644 index 86077891bcd..00000000000 --- a/consensus/ssz_types/src/serde_utils/hex_fixed_vec.rs +++ /dev/null @@ -1,22 +0,0 @@ -use crate::FixedVector; -use eth2_serde_utils::hex::{self, PrefixedHexVisitor}; -use serde::{Deserializer, Serializer}; -use typenum::Unsigned; - -pub fn serialize(bytes: &FixedVector, serializer: S) -> Result -where - S: Serializer, - U: Unsigned, -{ - serializer.serialize_str(&hex::encode(&bytes[..])) -} - -pub fn deserialize<'de, D, U>(deserializer: D) -> Result, D::Error> -where - D: Deserializer<'de>, - U: Unsigned, -{ - let vec = deserializer.deserialize_string(PrefixedHexVisitor)?; - FixedVector::new(vec) - .map_err(|e| serde::de::Error::custom(format!("invalid fixed vector: {:?}", e))) -} diff --git a/consensus/ssz_types/src/serde_utils/hex_var_list.rs b/consensus/ssz_types/src/serde_utils/hex_var_list.rs deleted file mode 100644 index e3a3a14e06c..00000000000 --- a/consensus/ssz_types/src/serde_utils/hex_var_list.rs +++ /dev/null @@ -1,23 +0,0 @@ -//! Serialize `VariableList` as 0x-prefixed hex string. -use crate::VariableList; -use eth2_serde_utils::hex::{self, PrefixedHexVisitor}; -use serde::{Deserializer, Serializer}; -use typenum::Unsigned; - -pub fn serialize(bytes: &VariableList, serializer: S) -> Result -where - S: Serializer, - N: Unsigned, -{ - serializer.serialize_str(&hex::encode(&**bytes)) -} - -pub fn deserialize<'de, D, N>(deserializer: D) -> Result, D::Error> -where - D: Deserializer<'de>, - N: Unsigned, -{ - let bytes = deserializer.deserialize_str(PrefixedHexVisitor)?; - VariableList::new(bytes) - .map_err(|e| serde::de::Error::custom(format!("invalid variable list: {:?}", e))) -} diff --git a/consensus/ssz_types/src/serde_utils/list_of_hex_var_list.rs b/consensus/ssz_types/src/serde_utils/list_of_hex_var_list.rs deleted file mode 100644 index e2fd8ddf320..00000000000 --- a/consensus/ssz_types/src/serde_utils/list_of_hex_var_list.rs +++ /dev/null @@ -1,77 +0,0 @@ -//! Serialize `VaraibleList, N>` as list of 0x-prefixed hex string. -use crate::VariableList; -use serde::{ser::SerializeSeq, Deserialize, Deserializer, Serialize, Serializer}; -use std::marker::PhantomData; -use typenum::Unsigned; - -#[derive(Deserialize)] -#[serde(transparent)] -pub struct WrappedListOwned( - #[serde(with = "crate::serde_utils::hex_var_list")] VariableList, -); - -#[derive(Serialize)] -#[serde(transparent)] -pub struct WrappedListRef<'a, N: Unsigned>( - #[serde(with = "crate::serde_utils::hex_var_list")] &'a VariableList, -); - -pub fn serialize( - list: &VariableList, N>, - serializer: S, -) -> Result -where - S: Serializer, - M: Unsigned, - N: Unsigned, -{ - let mut seq = serializer.serialize_seq(Some(list.len()))?; - for bytes in list { - seq.serialize_element(&WrappedListRef(bytes))?; - } - seq.end() -} - -#[derive(Default)] -pub struct Visitor { - _phantom_m: PhantomData, - _phantom_n: PhantomData, -} - -impl<'a, M, N> serde::de::Visitor<'a> for Visitor -where - M: Unsigned, - N: Unsigned, -{ - type Value = VariableList, N>; - - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(formatter, "a list of 0x-prefixed hex bytes") - } - - fn visit_seq(self, mut seq: A) -> Result - where - A: serde::de::SeqAccess<'a>, - { - let mut list: VariableList, N> = <_>::default(); - - while let Some(val) = seq.next_element::>()? { - list.push(val.0).map_err(|e| { - serde::de::Error::custom(format!("failed to push value to list: {:?}.", e)) - })?; - } - - Ok(list) - } -} - -pub fn deserialize<'de, D, M, N>( - deserializer: D, -) -> Result, N>, D::Error> -where - D: Deserializer<'de>, - M: Unsigned, - N: Unsigned, -{ - deserializer.deserialize_seq(Visitor::default()) -} diff --git a/consensus/ssz_types/src/serde_utils/mod.rs b/consensus/ssz_types/src/serde_utils/mod.rs deleted file mode 100644 index cd6d49cc856..00000000000 --- a/consensus/ssz_types/src/serde_utils/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -pub mod hex_fixed_vec; -pub mod hex_var_list; -pub mod list_of_hex_var_list; -pub mod quoted_u64_fixed_vec; -pub mod quoted_u64_var_list; diff --git a/consensus/ssz_types/src/serde_utils/quoted_u64_fixed_vec.rs b/consensus/ssz_types/src/serde_utils/quoted_u64_fixed_vec.rs deleted file mode 100644 index 0eb265adc31..00000000000 --- a/consensus/ssz_types/src/serde_utils/quoted_u64_fixed_vec.rs +++ /dev/null @@ -1,113 +0,0 @@ -//! Formats `FixedVector` using quotes. -//! -//! E.g., `FixedVector::from(vec![0, 1, 2])` serializes as `["0", "1", "2"]`. -//! -//! Quotes can be optional during decoding. If `N` does not equal the length deserialization will fail. - -use crate::serde_utils::quoted_u64_var_list::deserialize_max; -use crate::FixedVector; -use eth2_serde_utils::quoted_u64_vec::QuotedIntWrapper; -use serde::ser::SerializeSeq; -use serde::{Deserializer, Serializer}; -use std::marker::PhantomData; -use typenum::Unsigned; - -pub struct QuotedIntFixedVecVisitor { - _phantom: PhantomData, -} - -impl<'a, N> serde::de::Visitor<'a> for QuotedIntFixedVecVisitor -where - N: Unsigned, -{ - type Value = FixedVector; - - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(formatter, "a list of quoted or unquoted integers") - } - - fn visit_seq(self, seq: A) -> Result - where - A: serde::de::SeqAccess<'a>, - { - let vec = deserialize_max(seq, N::to_usize())?; - let fix: FixedVector = FixedVector::new(vec) - .map_err(|e| serde::de::Error::custom(format!("FixedVector: {:?}", e)))?; - Ok(fix) - } -} - -pub fn serialize(value: &[u64], serializer: S) -> Result -where - S: Serializer, -{ - let mut seq = serializer.serialize_seq(Some(value.len()))?; - for &int in value { - seq.serialize_element(&QuotedIntWrapper { int })?; - } - seq.end() -} - -pub fn deserialize<'de, D, N>(deserializer: D) -> Result, D::Error> -where - D: Deserializer<'de>, - N: Unsigned, -{ - deserializer.deserialize_any(QuotedIntFixedVecVisitor { - _phantom: PhantomData, - }) -} - -#[cfg(test)] -mod test { - use super::*; - use serde_derive::{Deserialize, Serialize}; - use typenum::U4; - - #[derive(Debug, Serialize, Deserialize)] - struct Obj { - #[serde(with = "crate::serde_utils::quoted_u64_fixed_vec")] - values: FixedVector, - } - - #[test] - fn quoted_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": ["1", "2", "3", "4"] }"#).unwrap(); - let expected: FixedVector = FixedVector::from(vec![1, 2, 3, 4]); - assert_eq!(obj.values, expected); - } - - #[test] - fn unquoted_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": [1, 2, 3, 4] }"#).unwrap(); - let expected: FixedVector = FixedVector::from(vec![1, 2, 3, 4]); - assert_eq!(obj.values, expected); - } - - #[test] - fn mixed_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": ["1", 2, "3", "4"] }"#).unwrap(); - let expected: FixedVector = FixedVector::from(vec![1, 2, 3, 4]); - assert_eq!(obj.values, expected); - } - - #[test] - fn empty_list_err() { - serde_json::from_str::(r#"{ "values": [] }"#).unwrap_err(); - } - - #[test] - fn short_list_err() { - serde_json::from_str::(r#"{ "values": [1, 2] }"#).unwrap_err(); - } - - #[test] - fn long_list_err() { - serde_json::from_str::(r#"{ "values": [1, 2, 3, 4, 5] }"#).unwrap_err(); - } - - #[test] - fn whole_list_quoted_err() { - serde_json::from_str::(r#"{ "values": "[1, 2, 3, 4]" }"#).unwrap_err(); - } -} diff --git a/consensus/ssz_types/src/serde_utils/quoted_u64_var_list.rs b/consensus/ssz_types/src/serde_utils/quoted_u64_var_list.rs deleted file mode 100644 index 9e176b63593..00000000000 --- a/consensus/ssz_types/src/serde_utils/quoted_u64_var_list.rs +++ /dev/null @@ -1,139 +0,0 @@ -//! Formats `VariableList` using quotes. -//! -//! E.g., `VariableList::from(vec![0, 1, 2])` serializes as `["0", "1", "2"]`. -//! -//! Quotes can be optional during decoding. If the length of the `Vec` is greater than `N`, deserialization fails. - -use crate::VariableList; -use eth2_serde_utils::quoted_u64_vec::QuotedIntWrapper; -use serde::ser::SerializeSeq; -use serde::{Deserializer, Serializer}; -use std::marker::PhantomData; -use typenum::Unsigned; - -pub struct QuotedIntVarListVisitor { - _phantom: PhantomData, -} - -impl<'a, N> serde::de::Visitor<'a> for QuotedIntVarListVisitor -where - N: Unsigned, -{ - type Value = VariableList; - - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(formatter, "a list of quoted or unquoted integers") - } - - fn visit_seq(self, seq: A) -> Result - where - A: serde::de::SeqAccess<'a>, - { - let vec = deserialize_max(seq, N::to_usize())?; - let list: VariableList = VariableList::new(vec) - .map_err(|e| serde::de::Error::custom(format!("VariableList: {:?}", e)))?; - Ok(list) - } -} - -pub fn serialize(value: &[u64], serializer: S) -> Result -where - S: Serializer, -{ - let mut seq = serializer.serialize_seq(Some(value.len()))?; - for &int in value { - seq.serialize_element(&QuotedIntWrapper { int })?; - } - seq.end() -} - -pub fn deserialize<'de, D, N>(deserializer: D) -> Result, D::Error> -where - D: Deserializer<'de>, - N: Unsigned, -{ - deserializer.deserialize_any(QuotedIntVarListVisitor { - _phantom: PhantomData, - }) -} - -/// Returns a `Vec` of no more than `max_items` length. -pub(crate) fn deserialize_max<'a, A>(mut seq: A, max_items: usize) -> Result, A::Error> -where - A: serde::de::SeqAccess<'a>, -{ - let mut vec = vec![]; - let mut counter = 0; - - while let Some(val) = seq.next_element()? { - let val: QuotedIntWrapper = val; - counter += 1; - if counter > max_items { - return Err(serde::de::Error::custom(format!( - "Deserialization failed. Length cannot be greater than {}.", - max_items - ))); - } - - vec.push(val.int); - } - - Ok(vec) -} - -#[cfg(test)] -mod test { - use super::*; - use serde_derive::{Deserialize, Serialize}; - use typenum::U4; - - #[derive(Debug, Serialize, Deserialize)] - struct Obj { - #[serde(with = "crate::serde_utils::quoted_u64_var_list")] - values: VariableList, - } - - #[test] - fn quoted_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": ["1", "2", "3", "4"] }"#).unwrap(); - let expected: VariableList = VariableList::from(vec![1, 2, 3, 4]); - assert_eq!(obj.values, expected); - } - - #[test] - fn unquoted_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": [1, 2, 3, 4] }"#).unwrap(); - let expected: VariableList = VariableList::from(vec![1, 2, 3, 4]); - assert_eq!(obj.values, expected); - } - - #[test] - fn mixed_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": ["1", 2, "3", "4"] }"#).unwrap(); - let expected: VariableList = VariableList::from(vec![1, 2, 3, 4]); - assert_eq!(obj.values, expected); - } - - #[test] - fn empty_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": [] }"#).unwrap(); - assert!(obj.values.is_empty()); - } - - #[test] - fn short_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": [1, 2] }"#).unwrap(); - let expected: VariableList = VariableList::from(vec![1, 2]); - assert_eq!(obj.values, expected); - } - - #[test] - fn long_list_err() { - serde_json::from_str::(r#"{ "values": [1, 2, 3, 4, 5] }"#).unwrap_err(); - } - - #[test] - fn whole_list_quoted_err() { - serde_json::from_str::(r#"{ "values": "[1, 2, 3, 4]" }"#).unwrap_err(); - } -} diff --git a/consensus/ssz_types/src/tree_hash.rs b/consensus/ssz_types/src/tree_hash.rs deleted file mode 100644 index e08c1d62fb1..00000000000 --- a/consensus/ssz_types/src/tree_hash.rs +++ /dev/null @@ -1,58 +0,0 @@ -use tree_hash::{Hash256, MerkleHasher, TreeHash, TreeHashType, BYTES_PER_CHUNK}; -use typenum::Unsigned; - -/// A helper function providing common functionality between the `TreeHash` implementations for -/// `FixedVector` and `VariableList`. -pub fn vec_tree_hash_root(vec: &[T]) -> Hash256 -where - T: TreeHash, - N: Unsigned, -{ - match T::tree_hash_type() { - TreeHashType::Basic => { - let mut hasher = MerkleHasher::with_leaves( - (N::to_usize() + T::tree_hash_packing_factor() - 1) / T::tree_hash_packing_factor(), - ); - - for item in vec { - hasher - .write(&item.tree_hash_packed_encoding()) - .expect("ssz_types variable vec should not contain more elements than max"); - } - - hasher - .finish() - .expect("ssz_types variable vec should not have a remaining buffer") - } - TreeHashType::Container | TreeHashType::List | TreeHashType::Vector => { - let mut hasher = MerkleHasher::with_leaves(N::to_usize()); - - for item in vec { - hasher - .write(item.tree_hash_root().as_bytes()) - .expect("ssz_types vec should not contain more elements than max"); - } - - hasher - .finish() - .expect("ssz_types vec should not have a remaining buffer") - } - } -} - -/// A helper function providing common functionality for finding the Merkle root of some bytes that -/// represent a bitfield. -pub fn bitfield_bytes_tree_hash_root(bytes: &[u8]) -> Hash256 { - let byte_size = (N::to_usize() + 7) / 8; - let leaf_count = (byte_size + BYTES_PER_CHUNK - 1) / BYTES_PER_CHUNK; - - let mut hasher = MerkleHasher::with_leaves(leaf_count); - - hasher - .write(bytes) - .expect("bitfield should not exceed tree hash leaf limit"); - - hasher - .finish() - .expect("bitfield tree hash buffer should not exceed leaf limit") -} diff --git a/consensus/ssz_types/src/variable_list.rs b/consensus/ssz_types/src/variable_list.rs deleted file mode 100644 index 3361f750908..00000000000 --- a/consensus/ssz_types/src/variable_list.rs +++ /dev/null @@ -1,477 +0,0 @@ -use crate::tree_hash::vec_tree_hash_root; -use crate::Error; -use derivative::Derivative; -use serde_derive::{Deserialize, Serialize}; -use std::marker::PhantomData; -use std::ops::{Deref, DerefMut, Index, IndexMut}; -use std::slice::SliceIndex; -use tree_hash::Hash256; -use typenum::Unsigned; - -pub use typenum; - -/// Emulates a SSZ `List`. -/// -/// An ordered, heap-allocated, variable-length, homogeneous collection of `T`, with no more than -/// `N` values. -/// -/// This struct is backed by a Rust `Vec` but constrained such that it must be instantiated with a -/// fixed number of elements and you may not add or remove elements, only modify. -/// -/// The length of this struct is fixed at the type-level using -/// [typenum](https://crates.io/crates/typenum). -/// -/// ## Example -/// -/// ``` -/// use ssz_types::{VariableList, typenum}; -/// -/// let base: Vec = vec![1, 2, 3, 4]; -/// -/// // Create a `VariableList` from a `Vec` that has the expected length. -/// let exact: VariableList<_, typenum::U4> = VariableList::from(base.clone()); -/// assert_eq!(&exact[..], &[1, 2, 3, 4]); -/// -/// // Create a `VariableList` from a `Vec` that is too long and the `Vec` is truncated. -/// let short: VariableList<_, typenum::U3> = VariableList::from(base.clone()); -/// assert_eq!(&short[..], &[1, 2, 3]); -/// -/// // Create a `VariableList` from a `Vec` that is shorter than the maximum. -/// let mut long: VariableList<_, typenum::U5> = VariableList::from(base); -/// assert_eq!(&long[..], &[1, 2, 3, 4]); -/// -/// // Push a value to if it does not exceed the maximum -/// long.push(5).unwrap(); -/// assert_eq!(&long[..], &[1, 2, 3, 4, 5]); -/// -/// // Push a value to if it _does_ exceed the maximum. -/// assert!(long.push(6).is_err()); -/// ``` -#[derive(Debug, Clone, Serialize, Deserialize, Derivative)] -#[derivative(PartialEq, Eq, Hash(bound = "T: std::hash::Hash"))] -#[serde(transparent)] -pub struct VariableList { - vec: Vec, - _phantom: PhantomData, -} - -impl VariableList { - /// Returns `Some` if the given `vec` equals the fixed length of `Self`. Otherwise returns - /// `None`. - pub fn new(vec: Vec) -> Result { - if vec.len() <= N::to_usize() { - Ok(Self { - vec, - _phantom: PhantomData, - }) - } else { - Err(Error::OutOfBounds { - i: vec.len(), - len: Self::max_len(), - }) - } - } - - /// Create an empty list. - pub fn empty() -> Self { - Self { - vec: vec![], - _phantom: PhantomData, - } - } - - /// Returns the number of values presently in `self`. - pub fn len(&self) -> usize { - self.vec.len() - } - - /// True if `self` does not contain any values. - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Returns the type-level maximum length. - pub fn max_len() -> usize { - N::to_usize() - } - - /// Appends `value` to the back of `self`. - /// - /// Returns `Err(())` when appending `value` would exceed the maximum length. - pub fn push(&mut self, value: T) -> Result<(), Error> { - if self.vec.len() < Self::max_len() { - self.vec.push(value); - Ok(()) - } else { - Err(Error::OutOfBounds { - i: self.vec.len() + 1, - len: Self::max_len(), - }) - } - } -} - -impl From> for VariableList { - fn from(mut vec: Vec) -> Self { - vec.truncate(N::to_usize()); - - Self { - vec, - _phantom: PhantomData, - } - } -} - -impl From> for Vec { - fn from(list: VariableList) -> Vec { - list.vec - } -} - -impl Default for VariableList { - fn default() -> Self { - Self { - vec: Vec::default(), - _phantom: PhantomData, - } - } -} - -impl> Index for VariableList { - type Output = I::Output; - - #[inline] - fn index(&self, index: I) -> &Self::Output { - Index::index(&self.vec, index) - } -} - -impl> IndexMut for VariableList { - #[inline] - fn index_mut(&mut self, index: I) -> &mut Self::Output { - IndexMut::index_mut(&mut self.vec, index) - } -} - -impl Deref for VariableList { - type Target = [T]; - - fn deref(&self) -> &[T] { - &self.vec[..] - } -} - -impl DerefMut for VariableList { - fn deref_mut(&mut self) -> &mut [T] { - &mut self.vec[..] - } -} - -impl<'a, T, N: Unsigned> IntoIterator for &'a VariableList { - type Item = &'a T; - type IntoIter = std::slice::Iter<'a, T>; - - fn into_iter(self) -> Self::IntoIter { - self.iter() - } -} - -impl IntoIterator for VariableList { - type Item = T; - type IntoIter = std::vec::IntoIter; - - fn into_iter(self) -> Self::IntoIter { - self.vec.into_iter() - } -} - -impl tree_hash::TreeHash for VariableList -where - T: tree_hash::TreeHash, -{ - fn tree_hash_type() -> tree_hash::TreeHashType { - tree_hash::TreeHashType::List - } - - fn tree_hash_packed_encoding(&self) -> tree_hash::PackedEncoding { - unreachable!("List should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("List should never be packed.") - } - - fn tree_hash_root(&self) -> Hash256 { - let root = vec_tree_hash_root::(&self.vec); - - tree_hash::mix_in_length(&root, self.len()) - } -} - -impl ssz::Encode for VariableList -where - T: ssz::Encode, -{ - fn is_ssz_fixed_len() -> bool { - >::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - >::ssz_fixed_len() - } - - fn ssz_bytes_len(&self) -> usize { - self.vec.ssz_bytes_len() - } - - fn ssz_append(&self, buf: &mut Vec) { - self.vec.ssz_append(buf) - } -} - -impl ssz::Decode for VariableList -where - T: ssz::Decode, - N: Unsigned, -{ - fn is_ssz_fixed_len() -> bool { - false - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - let max_len = N::to_usize(); - - if bytes.is_empty() { - Ok(vec![].into()) - } else if T::is_ssz_fixed_len() { - let num_items = bytes - .len() - .checked_div(T::ssz_fixed_len()) - .ok_or(ssz::DecodeError::ZeroLengthItem)?; - - if num_items > max_len { - return Err(ssz::DecodeError::BytesInvalid(format!( - "VariableList of {} items exceeds maximum of {}", - num_items, max_len - ))); - } - - bytes - .chunks(T::ssz_fixed_len()) - .try_fold(Vec::with_capacity(num_items), |mut vec, chunk| { - vec.push(T::from_ssz_bytes(chunk)?); - Ok(vec) - }) - .map(Into::into) - } else { - ssz::decode_list_of_variable_length_items(bytes, Some(max_len)) - .map(|vec: Vec<_>| vec.into()) - } - } -} - -#[cfg(feature = "arbitrary")] -impl<'a, T: arbitrary::Arbitrary<'a>, N: 'static + Unsigned> arbitrary::Arbitrary<'a> - for VariableList -{ - fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { - let max_size = N::to_usize(); - let rand = usize::arbitrary(u)?; - let size = std::cmp::min(rand, max_size); - let mut vec: Vec = Vec::with_capacity(size); - for _ in 0..size { - vec.push(::arbitrary(u)?); - } - Self::new(vec).map_err(|_| arbitrary::Error::IncorrectFormat) - } -} - -#[cfg(test)] -mod test { - use super::*; - use ssz::*; - use tree_hash::{merkle_root, TreeHash}; - use tree_hash_derive::TreeHash; - use typenum::*; - - #[test] - fn new() { - let vec = vec![42; 5]; - let fixed: Result, _> = VariableList::new(vec); - assert!(fixed.is_err()); - - let vec = vec![42; 3]; - let fixed: Result, _> = VariableList::new(vec); - assert!(fixed.is_ok()); - - let vec = vec![42; 4]; - let fixed: Result, _> = VariableList::new(vec); - assert!(fixed.is_ok()); - } - - #[test] - fn indexing() { - let vec = vec![1, 2]; - - let mut fixed: VariableList = vec.clone().into(); - - assert_eq!(fixed[0], 1); - assert_eq!(&fixed[0..1], &vec[0..1]); - assert_eq!((fixed[..]).len(), 2); - - fixed[1] = 3; - assert_eq!(fixed[1], 3); - } - - #[test] - fn length() { - let vec = vec![42; 5]; - let fixed: VariableList = VariableList::from(vec.clone()); - assert_eq!(&fixed[..], &vec[0..4]); - - let vec = vec![42; 3]; - let fixed: VariableList = VariableList::from(vec.clone()); - assert_eq!(&fixed[0..3], &vec[..]); - assert_eq!(&fixed[..], &vec![42, 42, 42][..]); - - let vec = vec![]; - let fixed: VariableList = VariableList::from(vec); - assert_eq!(&fixed[..], &[] as &[u64]); - } - - #[test] - fn deref() { - let vec = vec![0, 2, 4, 6]; - let fixed: VariableList = VariableList::from(vec); - - assert_eq!(fixed.first(), Some(&0)); - assert_eq!(fixed.get(3), Some(&6)); - assert_eq!(fixed.get(4), None); - } - - #[test] - fn encode() { - let vec: VariableList = vec![0; 2].into(); - assert_eq!(vec.as_ssz_bytes(), vec![0, 0, 0, 0]); - assert_eq!( as Encode>::ssz_fixed_len(), 4); - } - - fn round_trip(item: T) { - let encoded = &item.as_ssz_bytes(); - assert_eq!(item.ssz_bytes_len(), encoded.len()); - assert_eq!(T::from_ssz_bytes(encoded), Ok(item)); - } - - #[test] - fn u16_len_8() { - round_trip::>(vec![42; 8].into()); - round_trip::>(vec![0; 8].into()); - } - - fn root_with_length(bytes: &[u8], len: usize) -> Hash256 { - let root = merkle_root(bytes, 0); - tree_hash::mix_in_length(&root, len) - } - - #[test] - fn tree_hash_u8() { - let fixed: VariableList = VariableList::from(vec![]); - assert_eq!(fixed.tree_hash_root(), root_with_length(&[0; 8], 0)); - - for i in 0..=1 { - let fixed: VariableList = VariableList::from(vec![0; i]); - assert_eq!(fixed.tree_hash_root(), root_with_length(&vec![0; i], i)); - } - - for i in 0..=8 { - let fixed: VariableList = VariableList::from(vec![0; i]); - assert_eq!(fixed.tree_hash_root(), root_with_length(&vec![0; i], i)); - } - - for i in 0..=13 { - let fixed: VariableList = VariableList::from(vec![0; i]); - assert_eq!(fixed.tree_hash_root(), root_with_length(&vec![0; i], i)); - } - - for i in 0..=16 { - let fixed: VariableList = VariableList::from(vec![0; i]); - assert_eq!(fixed.tree_hash_root(), root_with_length(&vec![0; i], i)); - } - - let source: Vec = (0..16).collect(); - let fixed: VariableList = VariableList::from(source.clone()); - assert_eq!(fixed.tree_hash_root(), root_with_length(&source, 16)); - } - - #[derive(Clone, Copy, TreeHash, Default)] - struct A { - a: u32, - b: u32, - } - - fn repeat(input: &[u8], n: usize) -> Vec { - let mut output = vec![]; - - for _ in 0..n { - output.append(&mut input.to_vec()); - } - - output - } - - fn padded_root_with_length(bytes: &[u8], len: usize, min_nodes: usize) -> Hash256 { - let root = merkle_root(bytes, min_nodes); - tree_hash::mix_in_length(&root, len) - } - - #[test] - fn tree_hash_composite() { - let a = A { a: 0, b: 1 }; - - let fixed: VariableList = VariableList::from(vec![]); - assert_eq!( - fixed.tree_hash_root(), - padded_root_with_length(&[0; 32], 0, 0), - ); - - for i in 0..=1 { - let fixed: VariableList = VariableList::from(vec![a; i]); - assert_eq!( - fixed.tree_hash_root(), - padded_root_with_length(&repeat(a.tree_hash_root().as_bytes(), i), i, 1), - "U1 {}", - i - ); - } - - for i in 0..=8 { - let fixed: VariableList = VariableList::from(vec![a; i]); - assert_eq!( - fixed.tree_hash_root(), - padded_root_with_length(&repeat(a.tree_hash_root().as_bytes(), i), i, 8), - "U8 {}", - i - ); - } - - for i in 0..=13 { - let fixed: VariableList = VariableList::from(vec![a; i]); - assert_eq!( - fixed.tree_hash_root(), - padded_root_with_length(&repeat(a.tree_hash_root().as_bytes(), i), i, 13), - "U13 {}", - i - ); - } - - for i in 0..=16 { - let fixed: VariableList = VariableList::from(vec![a; i]); - assert_eq!( - fixed.tree_hash_root(), - padded_root_with_length(&repeat(a.tree_hash_root().as_bytes(), i), i, 16), - "U16 {}", - i - ); - } - } -} diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index ccb41830be8..c16742782c6 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -13,15 +13,15 @@ tokio = { version = "1.14.0", features = ["rt-multi-thread"] } bls = { path = "../../crypto/bls" } integer-sqrt = "0.1.5" itertools = "0.10.0" -eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.1" -eth2_ssz_types = "0.2.2" +ethereum_ssz = "0.5.0" +ethereum_ssz_derive = "0.5.0" +ssz_types = "0.5.0" merkle_proof = { path = "../merkle_proof" } safe_arith = { path = "../safe_arith" } -tree_hash = "0.4.1" +tree_hash = "0.5.0" types = { path = "../types", default-features = false } rayon = "1.4.1" -eth2_hashing = "0.3.0" +ethereum_hashing = "1.0.0-beta.2" int_to_bytes = { path = "../int_to_bytes" } smallvec = "1.6.1" arbitrary = { version = "1.0", features = ["derive"], optional = true } @@ -39,7 +39,7 @@ arbitrary-fuzz = [ "types/arbitrary-fuzz", "bls/arbitrary", "merkle_proof/arbitrary", - "eth2_ssz/arbitrary", - "eth2_ssz_types/arbitrary", + "ethereum_ssz/arbitrary", + "ssz_types/arbitrary", "tree_hash/arbitrary", ] diff --git a/consensus/state_processing/src/common/deposit_data_tree.rs b/consensus/state_processing/src/common/deposit_data_tree.rs index aaad96fbd53..2e86556b0fb 100644 --- a/consensus/state_processing/src/common/deposit_data_tree.rs +++ b/consensus/state_processing/src/common/deposit_data_tree.rs @@ -1,4 +1,4 @@ -use eth2_hashing::hash; +use ethereum_hashing::hash; use int_to_bytes::int_to_bytes32; use merkle_proof::{MerkleTree, MerkleTreeError}; use safe_arith::SafeArith; diff --git a/consensus/state_processing/src/lib.rs b/consensus/state_processing/src/lib.rs index e4f36bedd8c..9641e8f96ec 100644 --- a/consensus/state_processing/src/lib.rs +++ b/consensus/state_processing/src/lib.rs @@ -41,4 +41,4 @@ pub use per_epoch_processing::{ errors::EpochProcessingError, process_epoch as per_epoch_processing, }; pub use per_slot_processing::{per_slot_processing, Error as SlotProcessingError}; -pub use verify_operation::{SigVerifiedOp, VerifyOperation}; +pub use verify_operation::{SigVerifiedOp, VerifyOperation, VerifyOperationAt}; diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 7d04cad90b7..4bee596615a 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -282,7 +282,8 @@ pub fn process_exits( // Verify and apply each exit in series. We iterate in series because higher-index exits may // become invalid due to the application of lower-index ones. for (i, exit) in voluntary_exits.iter().enumerate() { - verify_exit(state, exit, verify_signatures, spec).map_err(|e| e.into_with_index(i))?; + verify_exit(state, None, exit, verify_signatures, spec) + .map_err(|e| e.into_with_index(i))?; initiate_validator_exit(state, exit.message.validator_index as usize, spec)?; } diff --git a/consensus/state_processing/src/per_block_processing/tests.rs b/consensus/state_processing/src/per_block_processing/tests.rs index b7d28832db0..6eabbf0d44a 100644 --- a/consensus/state_processing/src/per_block_processing/tests.rs +++ b/consensus/state_processing/src/per_block_processing/tests.rs @@ -978,8 +978,14 @@ async fn fork_spanning_exit() { let head = harness.chain.canonical_head.cached_head(); let head_state = &head.snapshot.beacon_state; assert!(head_state.current_epoch() < spec.altair_fork_epoch.unwrap()); - verify_exit(head_state, &signed_exit, VerifySignatures::True, &spec) - .expect("phase0 exit verifies against phase0 state"); + verify_exit( + head_state, + None, + &signed_exit, + VerifySignatures::True, + &spec, + ) + .expect("phase0 exit verifies against phase0 state"); /* * Ensure the exit verifies after Altair. @@ -992,8 +998,14 @@ async fn fork_spanning_exit() { let head_state = &head.snapshot.beacon_state; assert!(head_state.current_epoch() >= spec.altair_fork_epoch.unwrap()); assert!(head_state.current_epoch() < spec.bellatrix_fork_epoch.unwrap()); - verify_exit(head_state, &signed_exit, VerifySignatures::True, &spec) - .expect("phase0 exit verifies against altair state"); + verify_exit( + head_state, + None, + &signed_exit, + VerifySignatures::True, + &spec, + ) + .expect("phase0 exit verifies against altair state"); /* * Ensure the exit no longer verifies after Bellatrix. @@ -1009,6 +1021,12 @@ async fn fork_spanning_exit() { let head = harness.chain.canonical_head.cached_head(); let head_state = &head.snapshot.beacon_state; assert!(head_state.current_epoch() >= spec.bellatrix_fork_epoch.unwrap()); - verify_exit(head_state, &signed_exit, VerifySignatures::True, &spec) - .expect_err("phase0 exit does not verify against bellatrix state"); + verify_exit( + head_state, + None, + &signed_exit, + VerifySignatures::True, + &spec, + ) + .expect_err("phase0 exit does not verify against bellatrix state"); } diff --git a/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs b/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs index bb26799250d..731a82aa951 100644 --- a/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs +++ b/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs @@ -1,7 +1,7 @@ use super::errors::{BlockOperationError, BlsExecutionChangeInvalid as Invalid}; use crate::per_block_processing::signature_sets::bls_execution_change_signature_set; use crate::VerifySignatures; -use eth2_hashing::hash; +use ethereum_hashing::hash; use types::*; type Result = std::result::Result>; diff --git a/consensus/state_processing/src/per_block_processing/verify_exit.rs b/consensus/state_processing/src/per_block_processing/verify_exit.rs index f17e5fcd230..9e9282912de 100644 --- a/consensus/state_processing/src/per_block_processing/verify_exit.rs +++ b/consensus/state_processing/src/per_block_processing/verify_exit.rs @@ -20,10 +20,12 @@ fn error(reason: ExitInvalid) -> BlockOperationError { /// Spec v0.12.1 pub fn verify_exit( state: &BeaconState, + current_epoch: Option, signed_exit: &SignedVoluntaryExit, verify_signatures: VerifySignatures, spec: &ChainSpec, ) -> Result<()> { + let current_epoch = current_epoch.unwrap_or(state.current_epoch()); let exit = &signed_exit.message; let validator = state @@ -33,7 +35,7 @@ pub fn verify_exit( // Verify the validator is active. verify!( - validator.is_active_at(state.current_epoch()), + validator.is_active_at(current_epoch), ExitInvalid::NotActive(exit.validator_index) ); @@ -45,9 +47,9 @@ pub fn verify_exit( // Exits must specify an epoch when they become valid; they are not valid before then. verify!( - state.current_epoch() >= exit.epoch, + current_epoch >= exit.epoch, ExitInvalid::FutureEpoch { - state: state.current_epoch(), + state: current_epoch, exit: exit.epoch } ); @@ -57,9 +59,9 @@ pub fn verify_exit( .activation_epoch .safe_add(spec.shard_committee_period)?; verify!( - state.current_epoch() >= earliest_exit_epoch, + current_epoch >= earliest_exit_epoch, ExitInvalid::TooYoungToExit { - current_epoch: state.current_epoch(), + current_epoch, earliest_exit_epoch, } ); diff --git a/consensus/state_processing/src/verify_operation.rs b/consensus/state_processing/src/verify_operation.rs index 50ac2ff3de5..864844080fb 100644 --- a/consensus/state_processing/src/verify_operation.rs +++ b/consensus/state_processing/src/verify_operation.rs @@ -134,7 +134,7 @@ impl VerifyOperation for SignedVoluntaryExit { state: &BeaconState, spec: &ChainSpec, ) -> Result, Self::Error> { - verify_exit(state, &self, VerifySignatures::True, spec)?; + verify_exit(state, None, &self, VerifySignatures::True, spec)?; Ok(SigVerifiedOp::new(self, state)) } @@ -205,3 +205,35 @@ impl VerifyOperation for SignedBlsToExecutionChange { smallvec![] } } + +/// Trait for operations that can be verified and transformed into a +/// `SigVerifiedOp`. +/// +/// The `At` suffix indicates that we can specify a particular epoch at which to +/// verify the operation. +pub trait VerifyOperationAt: VerifyOperation + Sized { + fn validate_at( + self, + state: &BeaconState, + validate_at_epoch: Epoch, + spec: &ChainSpec, + ) -> Result, Self::Error>; +} + +impl VerifyOperationAt for SignedVoluntaryExit { + fn validate_at( + self, + state: &BeaconState, + validate_at_epoch: Epoch, + spec: &ChainSpec, + ) -> Result, Self::Error> { + verify_exit( + state, + Some(validate_at_epoch), + &self, + VerifySignatures::True, + spec, + )?; + Ok(SigVerifiedOp::new(self, state)) + } +} diff --git a/consensus/swap_or_not_shuffle/Cargo.toml b/consensus/swap_or_not_shuffle/Cargo.toml index 9a7d58b77d9..303e5cfba17 100644 --- a/consensus/swap_or_not_shuffle/Cargo.toml +++ b/consensus/swap_or_not_shuffle/Cargo.toml @@ -12,7 +12,7 @@ harness = false criterion = "0.3.3" [dependencies] -eth2_hashing = "0.3.0" +ethereum_hashing = "1.0.0-beta.2" ethereum-types = "0.14.1" [features] diff --git a/consensus/swap_or_not_shuffle/src/compute_shuffled_index.rs b/consensus/swap_or_not_shuffle/src/compute_shuffled_index.rs index f43edfe8644..e71f3ca18e7 100644 --- a/consensus/swap_or_not_shuffle/src/compute_shuffled_index.rs +++ b/consensus/swap_or_not_shuffle/src/compute_shuffled_index.rs @@ -1,5 +1,5 @@ use crate::Hash256; -use eth2_hashing::{Context, Sha256Context}; +use ethereum_hashing::{Context, Sha256Context}; use std::cmp::max; /// Return `p(index)` in a pseudorandom permutation `p` of `0...list_size-1` with ``seed`` as entropy. diff --git a/consensus/swap_or_not_shuffle/src/shuffle_list.rs b/consensus/swap_or_not_shuffle/src/shuffle_list.rs index edc6dd6377c..2b9a2565547 100644 --- a/consensus/swap_or_not_shuffle/src/shuffle_list.rs +++ b/consensus/swap_or_not_shuffle/src/shuffle_list.rs @@ -1,5 +1,5 @@ use crate::Hash256; -use eth2_hashing::hash_fixed; +use ethereum_hashing::hash_fixed; use std::mem; const SEED_SIZE: usize = 32; diff --git a/consensus/tree_hash/Cargo.toml b/consensus/tree_hash/Cargo.toml deleted file mode 100644 index b2630d4bf60..00000000000 --- a/consensus/tree_hash/Cargo.toml +++ /dev/null @@ -1,23 +0,0 @@ -[package] -name = "tree_hash" -version = "0.4.1" -authors = ["Paul Hauner "] -edition = "2021" -license = "Apache-2.0" -description = "Efficient Merkle-hashing as used in Ethereum 2.0" - -[dev-dependencies] -rand = "0.8.5" -tree_hash_derive = "0.4.0" -types = { path = "../types" } -beacon_chain = { path = "../../beacon_node/beacon_chain" } -eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.1" - -[dependencies] -ethereum-types = "0.14.1" -eth2_hashing = "0.3.0" -smallvec = "1.6.1" - -[features] -arbitrary = ["ethereum-types/arbitrary"] diff --git a/consensus/tree_hash/examples/flamegraph_beacon_state.rs b/consensus/tree_hash/examples/flamegraph_beacon_state.rs deleted file mode 100644 index e5b505bb91c..00000000000 --- a/consensus/tree_hash/examples/flamegraph_beacon_state.rs +++ /dev/null @@ -1,50 +0,0 @@ -use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; -use types::{BeaconState, EthSpec, MainnetEthSpec}; - -const TREE_HASH_LOOPS: usize = 1_000; -const VALIDATOR_COUNT: usize = 1_000; - -fn get_harness() -> BeaconChainHarness> { - let harness = BeaconChainHarness::builder(T::default()) - .default_spec() - .deterministic_keypairs(VALIDATOR_COUNT) - .fresh_ephemeral_store() - .build(); - - harness.advance_slot(); - - harness -} - -fn build_state() -> BeaconState { - let state = get_harness::().chain.head_beacon_state_cloned(); - - assert_eq!(state.as_base().unwrap().validators.len(), VALIDATOR_COUNT); - assert_eq!(state.as_base().unwrap().balances.len(), VALIDATOR_COUNT); - assert!(state - .as_base() - .unwrap() - .previous_epoch_attestations - .is_empty()); - assert!(state - .as_base() - .unwrap() - .current_epoch_attestations - .is_empty()); - assert!(state.as_base().unwrap().eth1_data_votes.is_empty()); - assert!(state.as_base().unwrap().historical_roots.is_empty()); - - state -} - -fn main() { - let state = build_state::(); - - // This vec is an attempt to ensure the compiler doesn't optimize-out the hashing. - let mut vec = Vec::with_capacity(TREE_HASH_LOOPS); - - for _ in 0..TREE_HASH_LOOPS { - let root = state.canonical_root(); - vec.push(root[0]); - } -} diff --git a/consensus/tree_hash/src/impls.rs b/consensus/tree_hash/src/impls.rs deleted file mode 100644 index 899356f8331..00000000000 --- a/consensus/tree_hash/src/impls.rs +++ /dev/null @@ -1,222 +0,0 @@ -use super::*; -use ethereum_types::{H160, H256, U128, U256}; - -fn int_to_hash256(int: u64) -> Hash256 { - let mut bytes = [0; HASHSIZE]; - bytes[0..8].copy_from_slice(&int.to_le_bytes()); - Hash256::from_slice(&bytes) -} - -macro_rules! impl_for_bitsize { - ($type: ident, $bit_size: expr) => { - impl TreeHash for $type { - fn tree_hash_type() -> TreeHashType { - TreeHashType::Basic - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - PackedEncoding::from_slice(&self.to_le_bytes()) - } - - fn tree_hash_packing_factor() -> usize { - HASHSIZE / ($bit_size / 8) - } - - #[allow(clippy::cast_lossless)] // Lint does not apply to all uses of this macro. - fn tree_hash_root(&self) -> Hash256 { - int_to_hash256(*self as u64) - } - } - }; -} - -impl_for_bitsize!(u8, 8); -impl_for_bitsize!(u16, 16); -impl_for_bitsize!(u32, 32); -impl_for_bitsize!(u64, 64); -impl_for_bitsize!(usize, 64); - -impl TreeHash for bool { - fn tree_hash_type() -> TreeHashType { - TreeHashType::Basic - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - (*self as u8).tree_hash_packed_encoding() - } - - fn tree_hash_packing_factor() -> usize { - u8::tree_hash_packing_factor() - } - - fn tree_hash_root(&self) -> Hash256 { - int_to_hash256(*self as u64) - } -} - -/// Only valid for byte types less than 32 bytes. -macro_rules! impl_for_lt_32byte_u8_array { - ($len: expr) => { - impl TreeHash for [u8; $len] { - fn tree_hash_type() -> TreeHashType { - TreeHashType::Vector - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - unreachable!("bytesN should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("bytesN should never be packed.") - } - - fn tree_hash_root(&self) -> Hash256 { - let mut result = [0; 32]; - result[0..$len].copy_from_slice(&self[..]); - Hash256::from_slice(&result) - } - } - }; -} - -impl_for_lt_32byte_u8_array!(4); -impl_for_lt_32byte_u8_array!(32); - -impl TreeHash for [u8; 48] { - fn tree_hash_type() -> TreeHashType { - TreeHashType::Vector - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - unreachable!("Vector should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("Vector should never be packed.") - } - - fn tree_hash_root(&self) -> Hash256 { - let values_per_chunk = BYTES_PER_CHUNK; - let minimum_chunk_count = (48 + values_per_chunk - 1) / values_per_chunk; - merkle_root(self, minimum_chunk_count) - } -} - -impl TreeHash for U128 { - fn tree_hash_type() -> TreeHashType { - TreeHashType::Basic - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - let mut result = [0; 16]; - self.to_little_endian(&mut result); - PackedEncoding::from_slice(&result) - } - - fn tree_hash_packing_factor() -> usize { - 2 - } - - fn tree_hash_root(&self) -> Hash256 { - let mut result = [0; HASHSIZE]; - self.to_little_endian(&mut result[0..16]); - Hash256::from_slice(&result) - } -} - -impl TreeHash for U256 { - fn tree_hash_type() -> TreeHashType { - TreeHashType::Basic - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - let mut result = [0; 32]; - self.to_little_endian(&mut result); - PackedEncoding::from_slice(&result) - } - - fn tree_hash_packing_factor() -> usize { - 1 - } - - fn tree_hash_root(&self) -> Hash256 { - let mut result = [0; 32]; - self.to_little_endian(&mut result[..]); - Hash256::from_slice(&result) - } -} - -impl TreeHash for H160 { - fn tree_hash_type() -> TreeHashType { - TreeHashType::Vector - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - let mut result = [0; 32]; - result[0..20].copy_from_slice(self.as_bytes()); - PackedEncoding::from_slice(&result) - } - - fn tree_hash_packing_factor() -> usize { - 1 - } - - fn tree_hash_root(&self) -> Hash256 { - let mut result = [0; 32]; - result[0..20].copy_from_slice(self.as_bytes()); - Hash256::from_slice(&result) - } -} - -impl TreeHash for H256 { - fn tree_hash_type() -> TreeHashType { - TreeHashType::Vector - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - PackedEncoding::from_slice(self.as_bytes()) - } - - fn tree_hash_packing_factor() -> usize { - 1 - } - - fn tree_hash_root(&self) -> Hash256 { - *self - } -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn bool() { - let mut true_bytes: Vec = vec![1]; - true_bytes.append(&mut vec![0; 31]); - - let false_bytes: Vec = vec![0; 32]; - - assert_eq!(true.tree_hash_root().as_bytes(), true_bytes.as_slice()); - assert_eq!(false.tree_hash_root().as_bytes(), false_bytes.as_slice()); - } - - #[test] - fn int_to_bytes() { - assert_eq!(int_to_hash256(0).as_bytes(), &[0; 32]); - assert_eq!( - int_to_hash256(1).as_bytes(), - &[ - 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0 - ] - ); - assert_eq!( - int_to_hash256(u64::max_value()).as_bytes(), - &[ - 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 - ] - ); - } -} diff --git a/consensus/tree_hash/src/lib.rs b/consensus/tree_hash/src/lib.rs deleted file mode 100644 index ec40de91602..00000000000 --- a/consensus/tree_hash/src/lib.rs +++ /dev/null @@ -1,208 +0,0 @@ -pub mod impls; -mod merkle_hasher; -mod merkleize_padded; -mod merkleize_standard; - -pub use merkle_hasher::{Error, MerkleHasher}; -pub use merkleize_padded::merkleize_padded; -pub use merkleize_standard::merkleize_standard; - -use eth2_hashing::{hash_fixed, ZERO_HASHES, ZERO_HASHES_MAX_INDEX}; -use smallvec::SmallVec; - -pub const BYTES_PER_CHUNK: usize = 32; -pub const HASHSIZE: usize = 32; -pub const MERKLE_HASH_CHUNK: usize = 2 * BYTES_PER_CHUNK; -pub const MAX_UNION_SELECTOR: u8 = 127; -pub const SMALLVEC_SIZE: usize = 32; - -pub type Hash256 = ethereum_types::H256; -pub type PackedEncoding = SmallVec<[u8; SMALLVEC_SIZE]>; - -/// Convenience method for `MerkleHasher` which also provides some fast-paths for small trees. -/// -/// `minimum_leaf_count` will only be used if it is greater than or equal to the minimum number of leaves that can be created from `bytes`. -pub fn merkle_root(bytes: &[u8], minimum_leaf_count: usize) -> Hash256 { - let leaves = std::cmp::max( - (bytes.len() + (HASHSIZE - 1)) / HASHSIZE, - minimum_leaf_count, - ); - - if leaves == 0 { - // If there are no bytes then the hash is always zero. - Hash256::zero() - } else if leaves == 1 { - // If there is only one leaf, the hash is always those leaf bytes padded out to 32-bytes. - let mut hash = [0; HASHSIZE]; - hash[0..bytes.len()].copy_from_slice(bytes); - Hash256::from_slice(&hash) - } else if leaves == 2 { - // If there are only two leaves (this is common with BLS pubkeys), we can avoid some - // overhead with `MerkleHasher` and just do a simple 3-node tree here. - let mut leaves = [0; HASHSIZE * 2]; - leaves[0..bytes.len()].copy_from_slice(bytes); - - Hash256::from_slice(&hash_fixed(&leaves)) - } else { - // If there are 3 or more leaves, use `MerkleHasher`. - let mut hasher = MerkleHasher::with_leaves(leaves); - hasher - .write(bytes) - .expect("the number of leaves is adequate for the number of bytes"); - hasher - .finish() - .expect("the number of leaves is adequate for the number of bytes") - } -} - -/// Returns the node created by hashing `root` and `length`. -/// -/// Used in `TreeHash` for inserting the length of a list above it's root. -pub fn mix_in_length(root: &Hash256, length: usize) -> Hash256 { - let usize_len = std::mem::size_of::(); - - let mut length_bytes = [0; BYTES_PER_CHUNK]; - length_bytes[0..usize_len].copy_from_slice(&length.to_le_bytes()); - - Hash256::from_slice(ð2_hashing::hash32_concat(root.as_bytes(), &length_bytes)[..]) -} - -/// Returns `Some(root)` created by hashing `root` and `selector`, if `selector <= -/// MAX_UNION_SELECTOR`. Otherwise, returns `None`. -/// -/// Used in `TreeHash` for the "union" type. -/// -/// ## Specification -/// -/// ```ignore,text -/// mix_in_selector: Given a Merkle root root and a type selector selector ("uint256" little-endian -/// serialization) return hash(root + selector). -/// ``` -/// -/// https://github.com/ethereum/consensus-specs/blob/v1.1.0-beta.3/ssz/simple-serialize.md#union -pub fn mix_in_selector(root: &Hash256, selector: u8) -> Option { - if selector > MAX_UNION_SELECTOR { - return None; - } - - let mut chunk = [0; BYTES_PER_CHUNK]; - chunk[0] = selector; - - let root = eth2_hashing::hash32_concat(root.as_bytes(), &chunk); - Some(Hash256::from_slice(&root)) -} - -/// Returns a cached padding node for a given height. -fn get_zero_hash(height: usize) -> &'static [u8] { - if height <= ZERO_HASHES_MAX_INDEX { - &ZERO_HASHES[height] - } else { - panic!("Tree exceeds MAX_TREE_DEPTH of {}", ZERO_HASHES_MAX_INDEX) - } -} - -#[derive(Debug, PartialEq, Clone)] -pub enum TreeHashType { - Basic, - Vector, - List, - Container, -} - -pub trait TreeHash { - fn tree_hash_type() -> TreeHashType; - - fn tree_hash_packed_encoding(&self) -> PackedEncoding; - - fn tree_hash_packing_factor() -> usize; - - fn tree_hash_root(&self) -> Hash256; -} - -/// Punch through references. -impl<'a, T> TreeHash for &'a T -where - T: TreeHash, -{ - fn tree_hash_type() -> TreeHashType { - T::tree_hash_type() - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - T::tree_hash_packed_encoding(*self) - } - - fn tree_hash_packing_factor() -> usize { - T::tree_hash_packing_factor() - } - - fn tree_hash_root(&self) -> Hash256 { - T::tree_hash_root(*self) - } -} - -#[macro_export] -macro_rules! tree_hash_ssz_encoding_as_vector { - ($type: ident) => { - impl tree_hash::TreeHash for $type { - fn tree_hash_type() -> tree_hash::TreeHashType { - tree_hash::TreeHashType::Vector - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - unreachable!("Vector should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("Vector should never be packed.") - } - - fn tree_hash_root(&self) -> Vec { - tree_hash::merkle_root(&ssz::ssz_encode(self)) - } - } - }; -} - -#[macro_export] -macro_rules! tree_hash_ssz_encoding_as_list { - ($type: ident) => { - impl tree_hash::TreeHash for $type { - fn tree_hash_type() -> tree_hash::TreeHashType { - tree_hash::TreeHashType::List - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - unreachable!("List should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("List should never be packed.") - } - - fn tree_hash_root(&self) -> Vec { - ssz::ssz_encode(self).tree_hash_root() - } - } - }; -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn mix_length() { - let hash = { - let mut preimage = vec![42; BYTES_PER_CHUNK]; - preimage.append(&mut vec![42]); - preimage.append(&mut vec![0; BYTES_PER_CHUNK - 1]); - eth2_hashing::hash(&preimage) - }; - - assert_eq!( - mix_in_length(&Hash256::from_slice(&[42; BYTES_PER_CHUNK]), 42).as_bytes(), - &hash[..] - ); - } -} diff --git a/consensus/tree_hash/src/merkle_hasher.rs b/consensus/tree_hash/src/merkle_hasher.rs deleted file mode 100644 index 2acaf1c3b8f..00000000000 --- a/consensus/tree_hash/src/merkle_hasher.rs +++ /dev/null @@ -1,573 +0,0 @@ -use crate::{get_zero_hash, Hash256, HASHSIZE}; -use eth2_hashing::{Context, Sha256Context, HASH_LEN}; -use smallvec::{smallvec, SmallVec}; -use std::mem; - -type SmallVec8 = SmallVec<[T; 8]>; - -#[derive(Clone, Debug, PartialEq)] -pub enum Error { - /// The maximum number of leaves defined by the initialization `depth` has been exceed. - MaximumLeavesExceeded { max_leaves: usize }, -} - -/// Helper struct to store either a hash digest or a slice. -/// -/// Should be used as a left or right value for some node. -enum Preimage<'a> { - Digest([u8; HASH_LEN]), - Slice(&'a [u8]), -} - -impl<'a> Preimage<'a> { - /// Returns a 32-byte slice. - fn as_bytes(&self) -> &[u8] { - match self { - Preimage::Digest(digest) => digest.as_ref(), - Preimage::Slice(slice) => slice, - } - } -} - -/// A node that has had a left child supplied, but not a right child. -struct HalfNode { - /// The hasher context. - context: Context, - /// The tree id of the node. The root node has in id of `1` and ids increase moving down the - /// tree from left to right. - id: usize, -} - -impl HalfNode { - /// Create a new half-node from the given `left` value. - fn new(id: usize, left: Preimage) -> Self { - let mut context = Context::new(); - context.update(left.as_bytes()); - - Self { context, id } - } - - /// Complete the half-node by providing a `right` value. Returns a digest of the left and right - /// nodes. - fn finish(mut self, right: Preimage) -> [u8; HASH_LEN] { - self.context.update(right.as_bytes()); - self.context.finalize() - } -} - -/// Provides a Merkle-root hasher that allows for streaming bytes (i.e., providing any-length byte -/// slices without need to separate into leaves). Efficiently handles cases where not all leaves -/// have been provided by assuming all non-provided leaves are `[0; 32]` and pre-computing the -/// zero-value hashes at all depths of the tree. -/// -/// This algorithm aims to allocate as little memory as possible and it does this by "folding" up -/// the tree as each leaf is provided. Consider this step-by-step functional diagram of hashing a -/// tree with depth three: -/// -/// ## Functional Diagram -/// -/// Nodes that are `-` have not been defined and do not occupy memory. Nodes that are `L` are -/// leaves that are provided but are not stored. Nodes that have integers (`1`, `2`) are stored in -/// our struct. Finally, nodes that are `X` were stored, but are now removed. -/// -/// ### Start -/// -/// ```ignore -/// - -/// / \ -/// - - -/// / \ / \ -/// - - - - -/// ``` -/// -/// ### Provide first leaf -/// -/// ```ignore -/// - -/// / \ -/// 2 - -/// / \ / \ -/// L - - - -/// ``` -/// -/// ### Provide second leaf -/// -/// ```ignore -/// 1 -/// / \ -/// X - -/// / \ / \ -/// L L - - -/// ``` -/// -/// ### Provide third leaf -/// -/// ```ignore -/// 1 -/// / \ -/// X 3 -/// / \ / \ -/// L L L - -/// ``` -/// -/// ### Provide fourth and final leaf -/// -/// ```ignore -/// 1 -/// / \ -/// X X -/// / \ / \ -/// L L L L -/// ``` -/// -pub struct MerkleHasher { - /// Stores the nodes that are half-complete and awaiting a right node. - /// - /// A smallvec of size 8 means we can hash a tree with 256 leaves without allocating on the - /// heap. Each half-node is 232 bytes, so this smallvec may store 1856 bytes on the stack. - half_nodes: SmallVec8, - /// The depth of the tree that will be produced. - /// - /// Depth is counted top-down (i.e., the root node is at depth 0). A tree with 1 leaf has a - /// depth of 1, a tree with 4 leaves has a depth of 3. - depth: usize, - /// The next leaf that we are expecting to process. - next_leaf: usize, - /// A buffer of bytes that are waiting to be written to a leaf. - buffer: SmallVec<[u8; 32]>, - /// Set to Some(root) when the root of the tree is known. - root: Option, -} - -/// Returns the parent of node with id `i`. -fn get_parent(i: usize) -> usize { - i / 2 -} - -/// Gets the depth of a node with an id of `i`. -/// -/// It is a logic error to provide `i == 0`. -/// -/// E.g., if `i` is 1, depth is 0. If `i` is is 1, depth is 1. -fn get_depth(i: usize) -> usize { - let total_bits = mem::size_of::() * 8; - total_bits - i.leading_zeros() as usize - 1 -} - -impl MerkleHasher { - /// Instantiate a hasher for a tree with a given number of leaves. - /// - /// `num_leaves` will be rounded to the next power of two. E.g., if `num_leaves == 6`, then the - /// tree will _actually_ be able to accomodate 8 leaves and the resulting hasher is exactly the - /// same as one that was instantiated with `Self::with_leaves(8)`. - /// - /// ## Notes - /// - /// If `num_leaves == 0`, a tree of depth 1 will be created. If no leaves are provided it will - /// return a root of `[0; 32]`. - pub fn with_leaves(num_leaves: usize) -> Self { - let depth = get_depth(num_leaves.next_power_of_two()) + 1; - Self::with_depth(depth) - } - - /// Instantiates a new, empty hasher for a tree with `depth` layers which will have capacity - /// for `1 << (depth - 1)` leaf nodes. - /// - /// It is not possible to grow the depth of the tree after instantiation. - /// - /// ## Panics - /// - /// Panics if `depth == 0`. - fn with_depth(depth: usize) -> Self { - assert!(depth > 0, "merkle tree cannot have a depth of zero"); - - Self { - half_nodes: SmallVec::with_capacity(depth - 1), - depth, - next_leaf: 1 << (depth - 1), - buffer: SmallVec::with_capacity(32), - root: None, - } - } - - /// Write some bytes to the hasher. - /// - /// ## Errors - /// - /// Returns an error if the given bytes would create a leaf that would exceed the maximum - /// permissible number of leaves defined by the initialization `depth`. E.g., a tree of `depth - /// == 2` can only accept 2 leaves. A tree of `depth == 14` can only accept 8,192 leaves. - pub fn write(&mut self, bytes: &[u8]) -> Result<(), Error> { - let mut ptr = 0; - while ptr <= bytes.len() { - let slice = &bytes[ptr..std::cmp::min(bytes.len(), ptr + HASHSIZE)]; - - if self.buffer.is_empty() && slice.len() == HASHSIZE { - self.process_leaf(slice)?; - ptr += HASHSIZE - } else if self.buffer.len() + slice.len() < HASHSIZE { - self.buffer.extend_from_slice(slice); - ptr += HASHSIZE - } else { - let buf_len = self.buffer.len(); - let required = HASHSIZE - buf_len; - - let mut leaf = [0; HASHSIZE]; - leaf[..buf_len].copy_from_slice(&self.buffer); - leaf[buf_len..].copy_from_slice(&slice[0..required]); - - self.process_leaf(&leaf)?; - self.buffer = smallvec![]; - - ptr += required - } - } - - Ok(()) - } - - /// Process the next leaf in the tree. - /// - /// ## Errors - /// - /// Returns an error if the given leaf would exceed the maximum permissible number of leaves - /// defined by the initialization `depth`. E.g., a tree of `depth == 2` can only accept 2 - /// leaves. A tree of `depth == 14` can only accept 8,192 leaves. - fn process_leaf(&mut self, leaf: &[u8]) -> Result<(), Error> { - assert_eq!(leaf.len(), HASHSIZE, "a leaf must be 32 bytes"); - - let max_leaves = 1 << (self.depth + 1); - - if self.next_leaf > max_leaves { - return Err(Error::MaximumLeavesExceeded { max_leaves }); - } else if self.next_leaf == 1 { - // A tree of depth one has a root that is equal to the first given leaf. - self.root = Some(Hash256::from_slice(leaf)) - } else if self.next_leaf % 2 == 0 { - self.process_left_node(self.next_leaf, Preimage::Slice(leaf)) - } else { - self.process_right_node(self.next_leaf, Preimage::Slice(leaf)) - } - - self.next_leaf += 1; - - Ok(()) - } - - /// Returns the root of the Merkle tree. - /// - /// If not all leaves have been provided, the tree will be efficiently completed under the - /// assumption that all not-yet-provided leaves are equal to `[0; 32]`. - /// - /// ## Errors - /// - /// Returns an error if the bytes remaining in the buffer would create a leaf that would exceed - /// the maximum permissible number of leaves defined by the initialization `depth`. - pub fn finish(mut self) -> Result { - if !self.buffer.is_empty() { - let mut leaf = [0; HASHSIZE]; - leaf[..self.buffer.len()].copy_from_slice(&self.buffer); - self.process_leaf(&leaf)? - } - - // If the tree is incomplete, we must complete it by providing zero-hashes. - loop { - if let Some(root) = self.root { - break Ok(root); - } else if let Some(node) = self.half_nodes.last() { - let right_child = node.id * 2 + 1; - self.process_right_node(right_child, self.zero_hash(right_child)); - } else if self.next_leaf == 1 { - // The next_leaf can only be 1 if the tree has a depth of one. If have been no - // leaves supplied, assume a root of zero. - break Ok(Hash256::zero()); - } else { - // The only scenario where there are (a) no half nodes and (b) a tree of depth - // two or more is where no leaves have been supplied at all. - // - // Once we supply this first zero-hash leaf then all future operations will be - // triggered via the `process_right_node` branch. - self.process_left_node(self.next_leaf, self.zero_hash(self.next_leaf)) - } - } - } - - /// Process a node that will become the left-hand node of some parent. The supplied `id` is - /// that of the node (not the parent). The `preimage` is the value of the node (i.e., if this - /// is a leaf node it will be the value of that leaf). - /// - /// In this scenario, the only option is to push a new half-node. - fn process_left_node(&mut self, id: usize, preimage: Preimage) { - self.half_nodes - .push(HalfNode::new(get_parent(id), preimage)) - } - - /// Process a node that will become the right-hand node of some parent. The supplied `id` is - /// that of the node (not the parent). The `preimage` is the value of the node (i.e., if this - /// is a leaf node it will be the value of that leaf). - /// - /// This operation will always complete one node, then it will attempt to crawl up the tree and - /// collapse all other completed nodes. For example, consider a tree of depth 3 (see diagram - /// below). When providing the node with id `7`, the node with id `3` will be completed which - /// will also provide the right-node for the `1` node. This function will complete both of - /// those nodes and ultimately find the root of the tree. - /// - /// ```ignore - /// 1 <-- completed - /// / \ - /// 2 3 <-- completed - /// / \ / \ - /// 4 5 6 7 <-- supplied right node - /// ``` - fn process_right_node(&mut self, id: usize, mut preimage: Preimage) { - let mut parent = get_parent(id); - - loop { - match self.half_nodes.last() { - Some(node) if node.id == parent => { - preimage = Preimage::Digest( - self.half_nodes - .pop() - .expect("if .last() is Some then .pop() must succeed") - .finish(preimage), - ); - if parent == 1 { - self.root = Some(Hash256::from_slice(preimage.as_bytes())); - break; - } else { - parent = get_parent(parent); - } - } - _ => { - self.half_nodes.push(HalfNode::new(parent, preimage)); - break; - } - } - } - } - - /// Returns a "zero hash" from a pre-computed set for the given node. - /// - /// Note: this node is not always zero, instead it is the result of hashing up a tree where the - /// leaves are all zeros. E.g., in a tree of depth 2, the `zero_hash` of a node at depth 1 - /// will be `[0; 32]`. However, the `zero_hash` for a node at depth 0 will be - /// `hash(concat([0; 32], [0; 32])))`. - fn zero_hash(&self, id: usize) -> Preimage<'static> { - Preimage::Slice(get_zero_hash(self.depth - (get_depth(id) + 1))) - } -} - -#[cfg(test)] -mod test { - use super::*; - use crate::merkleize_padded; - - /// This test is just to ensure that the stack size of the `Context` remains the same. We choose - /// our smallvec size based upon this, so it's good to know if it suddenly changes in size. - #[test] - fn context_size() { - assert_eq!( - mem::size_of::(), - 224, - "Halfnode size should be as expected" - ); - } - - fn compare_with_reference(leaves: &[Hash256], depth: usize) { - let reference_bytes = leaves - .iter() - .flat_map(|hash| hash.as_bytes()) - .copied() - .collect::>(); - - let reference_root = merkleize_padded(&reference_bytes, 1 << (depth - 1)); - - let merklizer_root_32_bytes = { - let mut m = MerkleHasher::with_depth(depth); - for leaf in leaves.iter() { - m.write(leaf.as_bytes()).expect("should process leaf"); - } - m.finish().expect("should finish") - }; - - assert_eq!( - reference_root, merklizer_root_32_bytes, - "32 bytes should match reference root" - ); - - let merklizer_root_individual_3_bytes = { - let mut m = MerkleHasher::with_depth(depth); - for bytes in reference_bytes.chunks(3) { - m.write(bytes).expect("should process byte"); - } - m.finish().expect("should finish") - }; - - assert_eq!( - reference_root, merklizer_root_individual_3_bytes, - "3 bytes should match reference root" - ); - - let merklizer_root_individual_single_bytes = { - let mut m = MerkleHasher::with_depth(depth); - for byte in reference_bytes.iter() { - m.write(&[*byte]).expect("should process byte"); - } - m.finish().expect("should finish") - }; - - assert_eq!( - reference_root, merklizer_root_individual_single_bytes, - "single bytes should match reference root" - ); - } - - /// A simple wrapper to compare MerkleHasher to the reference function by just giving a number - /// of leaves and a depth. - fn compare_reference_with_len(leaves: u64, depth: usize) { - let leaves = (0..leaves) - .map(Hash256::from_low_u64_be) - .collect::>(); - compare_with_reference(&leaves, depth) - } - - /// Compares the `MerkleHasher::with_depth` and `MerkleHasher::with_leaves` generate consistent - /// results. - fn compare_new_with_leaf_count(num_leaves: u64, depth: usize) { - let leaves = (0..num_leaves) - .map(Hash256::from_low_u64_be) - .collect::>(); - - let from_depth = { - let mut m = MerkleHasher::with_depth(depth); - for leaf in leaves.iter() { - m.write(leaf.as_bytes()).expect("should process leaf"); - } - m.finish() - }; - - let from_num_leaves = { - let mut m = MerkleHasher::with_leaves(num_leaves as usize); - for leaf in leaves.iter() { - m.process_leaf(leaf.as_bytes()) - .expect("should process leaf"); - } - m.finish() - }; - - assert_eq!( - from_depth, from_num_leaves, - "hash generated by depth should match that from num leaves" - ); - } - - #[test] - fn with_leaves() { - compare_new_with_leaf_count(1, 1); - compare_new_with_leaf_count(2, 2); - compare_new_with_leaf_count(3, 3); - compare_new_with_leaf_count(4, 3); - compare_new_with_leaf_count(5, 4); - compare_new_with_leaf_count(6, 4); - compare_new_with_leaf_count(7, 4); - compare_new_with_leaf_count(8, 4); - compare_new_with_leaf_count(9, 5); - compare_new_with_leaf_count(10, 5); - compare_new_with_leaf_count(11, 5); - compare_new_with_leaf_count(12, 5); - compare_new_with_leaf_count(13, 5); - compare_new_with_leaf_count(14, 5); - compare_new_with_leaf_count(15, 5); - } - - #[test] - fn depth() { - assert_eq!(get_depth(1), 0); - assert_eq!(get_depth(2), 1); - assert_eq!(get_depth(3), 1); - assert_eq!(get_depth(4), 2); - assert_eq!(get_depth(5), 2); - assert_eq!(get_depth(6), 2); - assert_eq!(get_depth(7), 2); - assert_eq!(get_depth(8), 3); - } - - #[test] - fn with_0_leaves() { - let hasher = MerkleHasher::with_leaves(0); - assert_eq!(hasher.finish().unwrap(), Hash256::zero()); - } - - #[test] - #[should_panic] - fn too_many_leaves() { - compare_reference_with_len(2, 1); - } - - #[test] - fn full_trees() { - compare_reference_with_len(1, 1); - compare_reference_with_len(2, 2); - compare_reference_with_len(4, 3); - compare_reference_with_len(8, 4); - compare_reference_with_len(16, 5); - compare_reference_with_len(32, 6); - compare_reference_with_len(64, 7); - compare_reference_with_len(128, 8); - compare_reference_with_len(256, 9); - compare_reference_with_len(256, 9); - compare_reference_with_len(8192, 14); - } - - #[test] - fn incomplete_trees() { - compare_reference_with_len(0, 1); - - compare_reference_with_len(0, 2); - compare_reference_with_len(1, 2); - - for i in 0..=4 { - compare_reference_with_len(i, 3); - } - - for i in 0..=7 { - compare_reference_with_len(i, 4); - } - - for i in 0..=15 { - compare_reference_with_len(i, 5); - } - - for i in 0..=32 { - compare_reference_with_len(i, 6); - } - - for i in 0..=64 { - compare_reference_with_len(i, 7); - } - - compare_reference_with_len(0, 14); - compare_reference_with_len(13, 14); - compare_reference_with_len(8191, 14); - } - - #[test] - fn remaining_buffer() { - let a = { - let mut m = MerkleHasher::with_leaves(2); - m.write(&[1]).expect("should write"); - m.finish().expect("should finish") - }; - - let b = { - let mut m = MerkleHasher::with_leaves(2); - let mut leaf = vec![1]; - leaf.extend_from_slice(&[0; 31]); - m.write(&leaf).expect("should write"); - m.write(&[0; 32]).expect("should write"); - m.finish().expect("should finish") - }; - - assert_eq!(a, b, "should complete buffer"); - } -} diff --git a/consensus/tree_hash/src/merkleize_padded.rs b/consensus/tree_hash/src/merkleize_padded.rs deleted file mode 100644 index f7dce399497..00000000000 --- a/consensus/tree_hash/src/merkleize_padded.rs +++ /dev/null @@ -1,330 +0,0 @@ -use super::{get_zero_hash, Hash256, BYTES_PER_CHUNK}; -use eth2_hashing::{hash32_concat, hash_fixed}; - -/// Merkleize `bytes` and return the root, optionally padding the tree out to `min_leaves` number of -/// leaves. -/// -/// **Note**: This function is generally worse than using the `crate::merkle_root` which uses -/// `MerkleHasher`. We only keep this function around for reference testing. -/// -/// First all nodes are extracted from `bytes` and then a padding node is added until the number of -/// leaf chunks is greater than or equal to `min_leaves`. Callers may set `min_leaves` to `0` if no -/// adding additional chunks should be added to the given `bytes`. -/// -/// If `bytes.len() <= BYTES_PER_CHUNK`, no hashing is done and `bytes` is returned, potentially -/// padded out to `BYTES_PER_CHUNK` length with `0`. -/// -/// ## CPU Performance -/// -/// A cache of `MAX_TREE_DEPTH` hashes are stored to avoid re-computing the hashes of padding nodes -/// (or their parents). Therefore, adding padding nodes only incurs one more hash per additional -/// height of the tree. -/// -/// ## Memory Performance -/// -/// This algorithm has two interesting memory usage properties: -/// -/// 1. The maximum memory footprint is roughly `O(V / 2)` memory, where `V` is the number of leaf -/// chunks with values (i.e., leaves that are not padding). The means adding padding nodes to -/// the tree does not increase the memory footprint. -/// 2. At each height of the tree half of the memory is freed until only a single chunk is stored. -/// 3. The input `bytes` are not copied into another list before processing. -/// -/// _Note: there are some minor memory overheads, including a handful of usizes and a list of -/// `MAX_TREE_DEPTH` hashes as `lazy_static` constants._ -pub fn merkleize_padded(bytes: &[u8], min_leaves: usize) -> Hash256 { - // If the bytes are just one chunk or less, pad to one chunk and return without hashing. - if bytes.len() <= BYTES_PER_CHUNK && min_leaves <= 1 { - let mut o = bytes.to_vec(); - o.resize(BYTES_PER_CHUNK, 0); - return Hash256::from_slice(&o); - } - - assert!( - bytes.len() > BYTES_PER_CHUNK || min_leaves > 1, - "Merkle hashing only needs to happen if there is more than one chunk" - ); - - // The number of leaves that can be made directly from `bytes`. - let leaves_with_values = (bytes.len() + (BYTES_PER_CHUNK - 1)) / BYTES_PER_CHUNK; - - // The number of parents that have at least one non-padding leaf. - // - // Since there is more than one node in this tree (see prior assertion), there should always be - // one or more initial parent nodes. - let initial_parents_with_values = std::cmp::max(1, next_even_number(leaves_with_values) / 2); - - // The number of leaves in the full tree (including padding nodes). - let num_leaves = std::cmp::max(leaves_with_values, min_leaves).next_power_of_two(); - - // The number of levels in the tree. - // - // A tree with a single node has `height == 1`. - let height = num_leaves.trailing_zeros() as usize + 1; - - assert!(height >= 2, "The tree should have two or more heights"); - - // A buffer/scratch-space used for storing each round of hashes at each height. - // - // This buffer is kept as small as possible; it will shrink so it never stores a padding node. - let mut chunks = ChunkStore::with_capacity(initial_parents_with_values); - - // Create a parent in the `chunks` buffer for every two chunks in `bytes`. - // - // I.e., do the first round of hashing, hashing from the `bytes` slice and filling the `chunks` - // struct. - for i in 0..initial_parents_with_values { - let start = i * BYTES_PER_CHUNK * 2; - - // Hash two chunks, creating a parent chunk. - let hash = match bytes.get(start..start + BYTES_PER_CHUNK * 2) { - // All bytes are available, hash as usual. - Some(slice) => hash_fixed(slice), - // Unable to get all the bytes, get a small slice and pad it out. - None => { - let mut preimage = bytes - .get(start..) - .expect("`i` can only be larger than zero if there are bytes to read") - .to_vec(); - preimage.resize(BYTES_PER_CHUNK * 2, 0); - hash_fixed(&preimage) - } - }; - - assert_eq!( - hash.len(), - BYTES_PER_CHUNK, - "Hashes should be exactly one chunk" - ); - - // Store the parent node. - chunks - .set(i, &hash) - .expect("Buffer should always have capacity for parent nodes") - } - - // Iterate through all heights above the leaf nodes and either (a) hash two children or, (b) - // hash a left child and a right padding node. - // - // Skip the 0'th height because the leaves have already been processed. Skip the highest-height - // in the tree as it is the root does not require hashing. - // - // The padding nodes for each height are cached via `lazy static` to simulate non-adjacent - // padding nodes (i.e., avoid doing unnecessary hashing). - for height in 1..height - 1 { - let child_nodes = chunks.len(); - let parent_nodes = next_even_number(child_nodes) / 2; - - // For each pair of nodes stored in `chunks`: - // - // - If two nodes are available, hash them to form a parent. - // - If one node is available, hash it and a cached padding node to form a parent. - for i in 0..parent_nodes { - let (left, right) = match (chunks.get(i * 2), chunks.get(i * 2 + 1)) { - (Ok(left), Ok(right)) => (left, right), - (Ok(left), Err(_)) => (left, get_zero_hash(height)), - // Deriving `parent_nodes` from `chunks.len()` has ensured that we never encounter the - // scenario where we expect two nodes but there are none. - (Err(_), Err(_)) => unreachable!("Parent must have one child"), - // `chunks` is a contiguous array so it is impossible for an index to be missing - // when a higher index is present. - (Err(_), Ok(_)) => unreachable!("Parent must have a left child"), - }; - - assert!( - left.len() == right.len() && right.len() == BYTES_PER_CHUNK, - "Both children should be `BYTES_PER_CHUNK` bytes." - ); - - let hash = hash32_concat(left, right); - - // Store a parent node. - chunks - .set(i, &hash) - .expect("Buf is adequate size for parent"); - } - - // Shrink the buffer so it neatly fits the number of new nodes created in this round. - // - // The number of `parent_nodes` is either decreasing or stable. It never increases. - chunks.truncate(parent_nodes); - } - - // There should be a single chunk left in the buffer and it is the Merkle root. - let root = chunks.into_vec(); - - assert_eq!(root.len(), BYTES_PER_CHUNK, "Only one chunk should remain"); - - Hash256::from_slice(&root) -} - -/// A helper struct for storing words of `BYTES_PER_CHUNK` size in a flat byte array. -#[derive(Debug)] -struct ChunkStore(Vec); - -impl ChunkStore { - /// Creates a new instance with `chunks` padding nodes. - fn with_capacity(chunks: usize) -> Self { - Self(vec![0; chunks * BYTES_PER_CHUNK]) - } - - /// Set the `i`th chunk to `value`. - /// - /// Returns `Err` if `value.len() != BYTES_PER_CHUNK` or `i` is out-of-bounds. - fn set(&mut self, i: usize, value: &[u8]) -> Result<(), ()> { - if i < self.len() && value.len() == BYTES_PER_CHUNK { - let slice = &mut self.0[i * BYTES_PER_CHUNK..i * BYTES_PER_CHUNK + BYTES_PER_CHUNK]; - slice.copy_from_slice(value); - Ok(()) - } else { - Err(()) - } - } - - /// Gets the `i`th chunk. - /// - /// Returns `Err` if `i` is out-of-bounds. - fn get(&self, i: usize) -> Result<&[u8], ()> { - if i < self.len() { - Ok(&self.0[i * BYTES_PER_CHUNK..i * BYTES_PER_CHUNK + BYTES_PER_CHUNK]) - } else { - Err(()) - } - } - - /// Returns the number of chunks presently stored in `self`. - fn len(&self) -> usize { - self.0.len() / BYTES_PER_CHUNK - } - - /// Truncates 'self' to `num_chunks` chunks. - /// - /// Functionally identical to `Vec::truncate`. - fn truncate(&mut self, num_chunks: usize) { - self.0.truncate(num_chunks * BYTES_PER_CHUNK) - } - - /// Consumes `self`, returning the underlying byte array. - fn into_vec(self) -> Vec { - self.0 - } -} - -/// Returns the next even number following `n`. If `n` is even, `n` is returned. -fn next_even_number(n: usize) -> usize { - n + n % 2 -} - -#[cfg(test)] -mod test { - use super::*; - use crate::ZERO_HASHES_MAX_INDEX; - - pub fn reference_root(bytes: &[u8]) -> Hash256 { - crate::merkleize_standard(bytes) - } - - macro_rules! common_tests { - ($get_bytes: ident) => { - #[test] - fn zero_value_0_nodes() { - test_against_reference(&$get_bytes(0 * BYTES_PER_CHUNK), 0); - } - - #[test] - fn zero_value_1_nodes() { - test_against_reference(&$get_bytes(1 * BYTES_PER_CHUNK), 0); - } - - #[test] - fn zero_value_2_nodes() { - test_against_reference(&$get_bytes(2 * BYTES_PER_CHUNK), 0); - } - - #[test] - fn zero_value_3_nodes() { - test_against_reference(&$get_bytes(3 * BYTES_PER_CHUNK), 0); - } - - #[test] - fn zero_value_4_nodes() { - test_against_reference(&$get_bytes(4 * BYTES_PER_CHUNK), 0); - } - - #[test] - fn zero_value_8_nodes() { - test_against_reference(&$get_bytes(8 * BYTES_PER_CHUNK), 0); - } - - #[test] - fn zero_value_9_nodes() { - test_against_reference(&$get_bytes(9 * BYTES_PER_CHUNK), 0); - } - - #[test] - fn zero_value_8_nodes_varying_min_length() { - for i in 0..64 { - test_against_reference(&$get_bytes(8 * BYTES_PER_CHUNK), i); - } - } - - #[test] - fn zero_value_range_of_nodes() { - for i in 0..32 * BYTES_PER_CHUNK { - test_against_reference(&$get_bytes(i), 0); - } - } - - #[test] - fn max_tree_depth_min_nodes() { - let input = vec![0; 10 * BYTES_PER_CHUNK]; - let min_nodes = 2usize.pow(ZERO_HASHES_MAX_INDEX as u32); - assert_eq!( - merkleize_padded(&input, min_nodes).as_bytes(), - get_zero_hash(ZERO_HASHES_MAX_INDEX) - ); - } - }; - } - - mod zero_value { - use super::*; - - fn zero_bytes(bytes: usize) -> Vec { - vec![0; bytes] - } - - common_tests!(zero_bytes); - } - - mod random_value { - use super::*; - use rand::RngCore; - - fn random_bytes(bytes: usize) -> Vec { - let mut bytes = Vec::with_capacity(bytes); - rand::thread_rng().fill_bytes(&mut bytes); - bytes - } - - common_tests!(random_bytes); - } - - fn test_against_reference(input: &[u8], min_nodes: usize) { - let mut reference_input = input.to_vec(); - reference_input.resize( - std::cmp::max( - reference_input.len(), - min_nodes.next_power_of_two() * BYTES_PER_CHUNK, - ), - 0, - ); - - assert_eq!( - reference_root(&reference_input), - merkleize_padded(input, min_nodes), - "input.len(): {:?}", - input.len() - ); - } -} diff --git a/consensus/tree_hash/src/merkleize_standard.rs b/consensus/tree_hash/src/merkleize_standard.rs deleted file mode 100644 index 6dd046991ed..00000000000 --- a/consensus/tree_hash/src/merkleize_standard.rs +++ /dev/null @@ -1,81 +0,0 @@ -use super::*; -use eth2_hashing::hash; - -/// Merkleizes bytes and returns the root, using a simple algorithm that does not optimize to avoid -/// processing or storing padding bytes. -/// -/// **Note**: This function is generally worse than using the `crate::merkle_root` which uses -/// `MerkleHasher`. We only keep this function around for reference testing. -/// -/// The input `bytes` will be padded to ensure that the number of leaves is a power-of-two. -/// -/// ## CPU Performance -/// -/// Will hash all nodes in the tree, even if they are padding and pre-determined. -/// -/// ## Memory Performance -/// -/// - Duplicates the input `bytes`. -/// - Stores all internal nodes, even if they are padding. -/// - Does not free up unused memory during operation. -pub fn merkleize_standard(bytes: &[u8]) -> Hash256 { - // If the bytes are just one chunk (or less than one chunk) just return them. - if bytes.len() <= HASHSIZE { - let mut o = bytes.to_vec(); - o.resize(HASHSIZE, 0); - return Hash256::from_slice(&o[0..HASHSIZE]); - } - - let leaves = num_sanitized_leaves(bytes.len()); - let nodes = num_nodes(leaves); - let internal_nodes = nodes - leaves; - - let num_bytes = std::cmp::max(internal_nodes, 1) * HASHSIZE + bytes.len(); - - let mut o: Vec = vec![0; internal_nodes * HASHSIZE]; - - o.append(&mut bytes.to_vec()); - - assert_eq!(o.len(), num_bytes); - - let empty_chunk_hash = hash(&[0; MERKLE_HASH_CHUNK]); - - let mut i = nodes * HASHSIZE; - let mut j = internal_nodes * HASHSIZE; - - while i >= MERKLE_HASH_CHUNK { - i -= MERKLE_HASH_CHUNK; - - j -= HASHSIZE; - let hash = match o.get(i..i + MERKLE_HASH_CHUNK) { - // All bytes are available, hash as usual. - Some(slice) => hash(slice), - // Unable to get all the bytes. - None => { - match o.get(i..) { - // Able to get some of the bytes, pad them out. - Some(slice) => { - let mut bytes = slice.to_vec(); - bytes.resize(MERKLE_HASH_CHUNK, 0); - hash(&bytes) - } - // Unable to get any bytes, use the empty-chunk hash. - None => empty_chunk_hash.clone(), - } - } - }; - - o[j..j + HASHSIZE].copy_from_slice(&hash); - } - - Hash256::from_slice(&o[0..HASHSIZE]) -} - -fn num_sanitized_leaves(num_bytes: usize) -> usize { - let leaves = (num_bytes + HASHSIZE - 1) / HASHSIZE; - leaves.next_power_of_two() -} - -fn num_nodes(num_leaves: usize) -> usize { - 2 * num_leaves - 1 -} diff --git a/consensus/tree_hash/tests/tests.rs b/consensus/tree_hash/tests/tests.rs deleted file mode 100644 index 8b2a4b21be8..00000000000 --- a/consensus/tree_hash/tests/tests.rs +++ /dev/null @@ -1,128 +0,0 @@ -use ssz_derive::Encode; -use tree_hash::{Hash256, MerkleHasher, PackedEncoding, TreeHash, BYTES_PER_CHUNK}; -use tree_hash_derive::TreeHash; - -#[derive(Encode)] -struct HashVec { - vec: Vec, -} - -impl From> for HashVec { - fn from(vec: Vec) -> Self { - Self { vec } - } -} - -impl tree_hash::TreeHash for HashVec { - fn tree_hash_type() -> tree_hash::TreeHashType { - tree_hash::TreeHashType::List - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - unreachable!("List should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("List should never be packed.") - } - - fn tree_hash_root(&self) -> Hash256 { - let mut hasher = - MerkleHasher::with_leaves((self.vec.len() + BYTES_PER_CHUNK - 1) / BYTES_PER_CHUNK); - - for item in &self.vec { - hasher.write(&item.tree_hash_packed_encoding()).unwrap() - } - - let root = hasher.finish().unwrap(); - - tree_hash::mix_in_length(&root, self.vec.len()) - } -} - -fn mix_in_selector(a: Hash256, selector: u8) -> Hash256 { - let mut b = [0; 32]; - b[0] = selector; - - Hash256::from_slice(ð2_hashing::hash32_concat(a.as_bytes(), &b)) -} - -fn u8_hash_concat(v1: u8, v2: u8) -> Hash256 { - let mut a = [0; 32]; - let mut b = [0; 32]; - - a[0] = v1; - b[0] = v2; - - Hash256::from_slice(ð2_hashing::hash32_concat(&a, &b)) -} - -fn u8_hash(x: u8) -> Hash256 { - let mut a = [0; 32]; - a[0] = x; - Hash256::from_slice(&a) -} - -#[derive(TreeHash)] -#[tree_hash(enum_behaviour = "transparent")] -enum FixedTrans { - A(u8), - B(u8), -} - -#[test] -fn fixed_trans() { - assert_eq!(FixedTrans::A(2).tree_hash_root(), u8_hash(2)); - assert_eq!(FixedTrans::B(2).tree_hash_root(), u8_hash(2)); -} - -#[derive(TreeHash)] -#[tree_hash(enum_behaviour = "union")] -enum FixedUnion { - A(u8), - B(u8), -} - -#[test] -fn fixed_union() { - assert_eq!(FixedUnion::A(2).tree_hash_root(), u8_hash_concat(2, 0)); - assert_eq!(FixedUnion::B(2).tree_hash_root(), u8_hash_concat(2, 1)); -} - -#[derive(TreeHash)] -#[tree_hash(enum_behaviour = "transparent")] -enum VariableTrans { - A(HashVec), - B(HashVec), -} - -#[test] -fn variable_trans() { - assert_eq!( - VariableTrans::A(HashVec::from(vec![2])).tree_hash_root(), - u8_hash_concat(2, 1) - ); - assert_eq!( - VariableTrans::B(HashVec::from(vec![2])).tree_hash_root(), - u8_hash_concat(2, 1) - ); -} - -#[derive(TreeHash)] -#[tree_hash(enum_behaviour = "union")] -enum VariableUnion { - A(HashVec), - B(HashVec), -} - -#[test] -fn variable_union() { - assert_eq!( - VariableUnion::A(HashVec::from(vec![2])).tree_hash_root(), - mix_in_selector(u8_hash_concat(2, 1), 0) - ); - assert_eq!( - VariableUnion::B(HashVec::from(vec![2])).tree_hash_root(), - mix_in_selector(u8_hash_concat(2, 1), 1) - ); -} diff --git a/consensus/tree_hash_derive/Cargo.toml b/consensus/tree_hash_derive/Cargo.toml deleted file mode 100644 index 5f3396eb163..00000000000 --- a/consensus/tree_hash_derive/Cargo.toml +++ /dev/null @@ -1,15 +0,0 @@ -[package] -name = "tree_hash_derive" -version = "0.4.0" -authors = ["Paul Hauner "] -edition = "2021" -description = "Procedural derive macros to accompany the tree_hash crate." -license = "Apache-2.0" - -[lib] -proc-macro = true - -[dependencies] -syn = "1.0.42" -quote = "1.0.7" -darling = "0.13.0" diff --git a/consensus/tree_hash_derive/src/lib.rs b/consensus/tree_hash_derive/src/lib.rs deleted file mode 100644 index 85ece80fb56..00000000000 --- a/consensus/tree_hash_derive/src/lib.rs +++ /dev/null @@ -1,336 +0,0 @@ -use darling::FromDeriveInput; -use proc_macro::TokenStream; -use quote::quote; -use std::convert::TryInto; -use syn::{parse_macro_input, Attribute, DataEnum, DataStruct, DeriveInput, Meta}; - -/// The highest possible union selector value (higher values are reserved for backwards compatible -/// extensions). -const MAX_UNION_SELECTOR: u8 = 127; - -#[derive(Debug, FromDeriveInput)] -#[darling(attributes(tree_hash))] -struct StructOpts { - #[darling(default)] - enum_behaviour: Option, -} - -const ENUM_TRANSPARENT: &str = "transparent"; -const ENUM_UNION: &str = "union"; -const ENUM_VARIANTS: &[&str] = &[ENUM_TRANSPARENT, ENUM_UNION]; -const NO_ENUM_BEHAVIOUR_ERROR: &str = "enums require an \"enum_behaviour\" attribute, \ - e.g., #[tree_hash(enum_behaviour = \"transparent\")]"; - -enum EnumBehaviour { - Transparent, - Union, -} - -impl EnumBehaviour { - pub fn new(s: Option) -> Option { - s.map(|s| match s.as_ref() { - ENUM_TRANSPARENT => EnumBehaviour::Transparent, - ENUM_UNION => EnumBehaviour::Union, - other => panic!( - "{} is an invalid enum_behaviour, use either {:?}", - other, ENUM_VARIANTS - ), - }) - } -} - -/// Return a Vec of `syn::Ident` for each named field in the struct, whilst filtering out fields -/// that should not be hashed. -/// -/// # Panics -/// Any unnamed struct field (like in a tuple struct) will raise a panic at compile time. -fn get_hashable_fields(struct_data: &syn::DataStruct) -> Vec<&syn::Ident> { - get_hashable_fields_and_their_caches(struct_data) - .into_iter() - .map(|(ident, _, _)| ident) - .collect() -} - -/// Return a Vec of the hashable fields of a struct, and each field's type and optional cache field. -fn get_hashable_fields_and_their_caches( - struct_data: &syn::DataStruct, -) -> Vec<(&syn::Ident, syn::Type, Option)> { - struct_data - .fields - .iter() - .filter_map(|f| { - if should_skip_hashing(f) { - None - } else { - let ident = f - .ident - .as_ref() - .expect("tree_hash_derive only supports named struct fields"); - let opt_cache_field = get_cache_field_for(f); - Some((ident, f.ty.clone(), opt_cache_field)) - } - }) - .collect() -} - -/// Parse the cached_tree_hash attribute for a field. -/// -/// Extract the cache field name from `#[cached_tree_hash(cache_field_name)]` -/// -/// Return `Some(cache_field_name)` if the field has a cached tree hash attribute, -/// or `None` otherwise. -fn get_cache_field_for(field: &syn::Field) -> Option { - use syn::{MetaList, NestedMeta}; - - let parsed_attrs = cached_tree_hash_attr_metas(&field.attrs); - if let [Meta::List(MetaList { nested, .. })] = &parsed_attrs[..] { - nested.iter().find_map(|x| match x { - NestedMeta::Meta(Meta::Path(path)) => path.get_ident().cloned(), - _ => None, - }) - } else { - None - } -} - -/// Process the `cached_tree_hash` attributes from a list of attributes into structured `Meta`s. -fn cached_tree_hash_attr_metas(attrs: &[Attribute]) -> Vec { - attrs - .iter() - .filter(|attr| attr.path.is_ident("cached_tree_hash")) - .flat_map(|attr| attr.parse_meta()) - .collect() -} - -/// Returns true if some field has an attribute declaring it should not be hashed. -/// -/// The field attribute is: `#[tree_hash(skip_hashing)]` -fn should_skip_hashing(field: &syn::Field) -> bool { - field.attrs.iter().any(|attr| { - attr.path.is_ident("tree_hash") - && attr.tokens.to_string().replace(' ', "") == "(skip_hashing)" - }) -} - -/// Implements `tree_hash::TreeHash` for some `struct`. -/// -/// Fields are hashed in the order they are defined. -#[proc_macro_derive(TreeHash, attributes(tree_hash))] -pub fn tree_hash_derive(input: TokenStream) -> TokenStream { - let item = parse_macro_input!(input as DeriveInput); - let opts = StructOpts::from_derive_input(&item).unwrap(); - let enum_opt = EnumBehaviour::new(opts.enum_behaviour); - - match &item.data { - syn::Data::Struct(s) => { - if enum_opt.is_some() { - panic!("enum_behaviour is invalid for structs"); - } - tree_hash_derive_struct(&item, s) - } - syn::Data::Enum(s) => match enum_opt.expect(NO_ENUM_BEHAVIOUR_ERROR) { - EnumBehaviour::Transparent => tree_hash_derive_enum_transparent(&item, s), - EnumBehaviour::Union => tree_hash_derive_enum_union(&item, s), - }, - _ => panic!("tree_hash_derive only supports structs and enums."), - } -} - -fn tree_hash_derive_struct(item: &DeriveInput, struct_data: &DataStruct) -> TokenStream { - let name = &item.ident; - let (impl_generics, ty_generics, where_clause) = &item.generics.split_for_impl(); - - let idents = get_hashable_fields(struct_data); - let num_leaves = idents.len(); - - let output = quote! { - impl #impl_generics tree_hash::TreeHash for #name #ty_generics #where_clause { - fn tree_hash_type() -> tree_hash::TreeHashType { - tree_hash::TreeHashType::Container - } - - fn tree_hash_packed_encoding(&self) -> tree_hash::PackedEncoding { - unreachable!("Struct should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("Struct should never be packed.") - } - - fn tree_hash_root(&self) -> tree_hash::Hash256 { - let mut hasher = tree_hash::MerkleHasher::with_leaves(#num_leaves); - - #( - hasher.write(self.#idents.tree_hash_root().as_bytes()) - .expect("tree hash derive should not apply too many leaves"); - )* - - hasher.finish().expect("tree hash derive should not have a remaining buffer") - } - } - }; - output.into() -} - -/// Derive `TreeHash` for an enum in the "transparent" method. -/// -/// The "transparent" method is distinct from the "union" method specified in the SSZ specification. -/// When using "transparent", the enum will be ignored and the contained field will be hashed as if -/// the enum does not exist. -/// -///## Limitations -/// -/// Only supports: -/// - Enums with a single field per variant, where -/// - All fields are "container" types. -/// -/// ## Panics -/// -/// Will panic at compile-time if the single field requirement isn't met, but will panic *at run -/// time* if the container type requirement isn't met. -fn tree_hash_derive_enum_transparent( - derive_input: &DeriveInput, - enum_data: &DataEnum, -) -> TokenStream { - let name = &derive_input.ident; - let (impl_generics, ty_generics, where_clause) = &derive_input.generics.split_for_impl(); - - let (patterns, type_exprs): (Vec<_>, Vec<_>) = enum_data - .variants - .iter() - .map(|variant| { - let variant_name = &variant.ident; - - if variant.fields.len() != 1 { - panic!("TreeHash can only be derived for enums with 1 field per variant"); - } - - let pattern = quote! { - #name::#variant_name(ref inner) - }; - - let ty = &(&variant.fields).into_iter().next().unwrap().ty; - let type_expr = quote! { - <#ty as tree_hash::TreeHash>::tree_hash_type() - }; - (pattern, type_expr) - }) - .unzip(); - - let output = quote! { - impl #impl_generics tree_hash::TreeHash for #name #ty_generics #where_clause { - fn tree_hash_type() -> tree_hash::TreeHashType { - #( - assert_eq!( - #type_exprs, - tree_hash::TreeHashType::Container, - "all variants must be of container type" - ); - )* - tree_hash::TreeHashType::Container - } - - fn tree_hash_packed_encoding(&self) -> tree_hash::PackedEncoding { - unreachable!("Enum should never be packed") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("Enum should never be packed") - } - - fn tree_hash_root(&self) -> tree_hash::Hash256 { - match self { - #( - #patterns => inner.tree_hash_root(), - )* - } - } - } - }; - output.into() -} - -/// Derive `TreeHash` for an `enum` following the "union" SSZ spec. -/// -/// The union selector will be determined based upon the order in which the enum variants are -/// defined. E.g., the top-most variant in the enum will have a selector of `0`, the variant -/// beneath it will have a selector of `1` and so on. -/// -/// # Limitations -/// -/// Only supports enums where each variant has a single field. -fn tree_hash_derive_enum_union(derive_input: &DeriveInput, enum_data: &DataEnum) -> TokenStream { - let name = &derive_input.ident; - let (impl_generics, ty_generics, where_clause) = &derive_input.generics.split_for_impl(); - - let patterns: Vec<_> = enum_data - .variants - .iter() - .map(|variant| { - let variant_name = &variant.ident; - - if variant.fields.len() != 1 { - panic!("TreeHash can only be derived for enums with 1 field per variant"); - } - - quote! { - #name::#variant_name(ref inner) - } - }) - .collect(); - - let union_selectors = compute_union_selectors(patterns.len()); - - let output = quote! { - impl #impl_generics tree_hash::TreeHash for #name #ty_generics #where_clause { - fn tree_hash_type() -> tree_hash::TreeHashType { - tree_hash::TreeHashType::Container - } - - fn tree_hash_packed_encoding(&self) -> tree_hash::PackedEncoding { - unreachable!("Enum should never be packed") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("Enum should never be packed") - } - - fn tree_hash_root(&self) -> tree_hash::Hash256 { - match self { - #( - #patterns => { - let root = inner.tree_hash_root(); - let selector = #union_selectors; - tree_hash::mix_in_selector(&root, selector) - .expect("derive macro should prevent out-of-bounds selectors") - }, - )* - } - } - } - }; - output.into() -} - -fn compute_union_selectors(num_variants: usize) -> Vec { - let union_selectors = (0..num_variants) - .map(|i| { - i.try_into() - .expect("union selector exceeds u8::max_value, union has too many variants") - }) - .collect::>(); - - let highest_selector = union_selectors - .last() - .copied() - .expect("0-variant union is not permitted"); - - assert!( - highest_selector <= MAX_UNION_SELECTOR, - "union selector {} exceeds limit of {}, enum has too many variants", - highest_selector, - MAX_UNION_SELECTOR - ); - - union_selectors -} diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index 46b88af66f6..91ad3089f1c 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -15,7 +15,7 @@ compare_fields = { path = "../../common/compare_fields" } compare_fields_derive = { path = "../../common/compare_fields_derive" } eth2_interop_keypairs = { path = "../../common/eth2_interop_keypairs" } ethereum-types = { version = "0.14.1", features = ["arbitrary"] } -eth2_hashing = "0.3.0" +ethereum_hashing = "1.0.0-beta.2" hex = "0.4.2" int_to_bytes = { path = "../int_to_bytes" } log = "0.4.11" @@ -25,13 +25,13 @@ safe_arith = { path = "../safe_arith" } serde = {version = "1.0.116" , features = ["rc"] } serde_derive = "1.0.116" slog = "2.5.2" -eth2_ssz = { version = "0.4.1", features = ["arbitrary"] } -eth2_ssz_derive = "0.3.1" -eth2_ssz_types = { version = "0.2.2", features = ["arbitrary"] } +ethereum_ssz = { version = "0.5.0", features = ["arbitrary"] } +ethereum_ssz_derive = "0.5.0" +ssz_types = { version = "0.5.0", features = ["arbitrary"] } swap_or_not_shuffle = { path = "../swap_or_not_shuffle", features = ["arbitrary"] } test_random_derive = { path = "../../common/test_random_derive" } -tree_hash = { version = "0.4.1", features = ["arbitrary"] } -tree_hash_derive = "0.4.0" +tree_hash = { version = "0.5.0", features = ["arbitrary"] } +tree_hash_derive = "0.5.0" rand_xorshift = "0.3.0" cached_tree_hash = { path = "../cached_tree_hash" } serde_yaml = "0.8.13" @@ -41,7 +41,7 @@ rusqlite = { version = "0.28.0", features = ["bundled"], optional = true } # The arbitrary dependency is enabled by default since Capella to avoid complexity introduced by # `AbstractExecPayload` arbitrary = { version = "1.0", features = ["derive"] } -eth2_serde_utils = "0.1.1" +ethereum_serde_utils = "0.5.0" regex = "1.5.5" lazy_static = "1.4.0" parking_lot = "0.12.0" diff --git a/consensus/types/src/aggregate_and_proof.rs b/consensus/types/src/aggregate_and_proof.rs index 39a0a28c0ce..20d66cd4471 100644 --- a/consensus/types/src/aggregate_and_proof.rs +++ b/consensus/types/src/aggregate_and_proof.rs @@ -27,7 +27,7 @@ use tree_hash_derive::TreeHash; #[arbitrary(bound = "T: EthSpec")] pub struct AggregateAndProof { /// The index of the validator that created the attestation. - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub aggregator_index: u64, /// The aggregate attestation. pub aggregate: Attestation, diff --git a/consensus/types/src/attestation_data.rs b/consensus/types/src/attestation_data.rs index c6a661c85dd..286502b4497 100644 --- a/consensus/types/src/attestation_data.rs +++ b/consensus/types/src/attestation_data.rs @@ -27,7 +27,7 @@ use tree_hash_derive::TreeHash; )] pub struct AttestationData { pub slot: Slot, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub index: u64, // LMD GHOST vote diff --git a/consensus/types/src/attestation_duty.rs b/consensus/types/src/attestation_duty.rs index 87a9c932a45..93a4c147b67 100644 --- a/consensus/types/src/attestation_duty.rs +++ b/consensus/types/src/attestation_duty.rs @@ -12,6 +12,6 @@ pub struct AttestationDuty { /// The total number of attesters in the committee. pub committee_len: usize, /// The committee count at `attestation_slot`. - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub committees_at_slot: u64, } diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index 4bf9e641c03..1b40fe76d4d 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -58,7 +58,7 @@ pub struct BeaconBlock = FullPayload #[superstruct(getter(copy))] pub slot: Slot, #[superstruct(getter(copy))] - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub proposer_index: u64, #[superstruct(getter(copy))] pub parent_root: Hash256, diff --git a/consensus/types/src/beacon_block_header.rs b/consensus/types/src/beacon_block_header.rs index c6d6678f31a..f2ef0a3dccd 100644 --- a/consensus/types/src/beacon_block_header.rs +++ b/consensus/types/src/beacon_block_header.rs @@ -26,7 +26,7 @@ use tree_hash_derive::TreeHash; )] pub struct BeaconBlockHeader { pub slot: Slot, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub proposer_index: u64, pub parent_root: Hash256, pub state_root: Hash256, diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 921dafbbc6d..4a9da364047 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -5,7 +5,7 @@ use crate::*; use compare_fields::CompareFields; use compare_fields_derive::CompareFields; use derivative::Derivative; -use eth2_hashing::hash; +use ethereum_hashing::hash; use int_to_bytes::{int_to_bytes4, int_to_bytes8}; use pubkey_cache::PubkeyCache; use safe_arith::{ArithError, SafeArith}; @@ -210,7 +210,7 @@ where { // Versioning #[superstruct(getter(copy))] - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub genesis_time: u64, #[superstruct(getter(copy))] pub genesis_validators_root: Hash256, @@ -232,7 +232,7 @@ where pub eth1_data: Eth1Data, pub eth1_data_votes: VariableList, #[superstruct(getter(copy))] - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub eth1_deposit_index: u64, // Registry @@ -296,10 +296,10 @@ where // Capella #[superstruct(only(Capella), partial_getter(copy))] - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub next_withdrawal_index: u64, #[superstruct(only(Capella), partial_getter(copy))] - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub next_withdrawal_validator_index: u64, // Deep history valid from Capella onwards. #[superstruct(only(Capella))] diff --git a/consensus/types/src/beacon_state/tree_hash_cache.rs b/consensus/types/src/beacon_state/tree_hash_cache.rs index efc6573d2bc..d1d63e3c806 100644 --- a/consensus/types/src/beacon_state/tree_hash_cache.rs +++ b/consensus/types/src/beacon_state/tree_hash_cache.rs @@ -403,7 +403,7 @@ impl ValidatorsListTreeHashCache { validators.len(), ), list_arena, - values: ParallelValidatorTreeHash::new::(validators), + values: ParallelValidatorTreeHash::new(validators), } } @@ -468,7 +468,7 @@ impl ParallelValidatorTreeHash { /// /// Allocates the necessary memory to store all of the cached Merkle trees but does perform any /// hashing. - fn new(validators: &[Validator]) -> Self { + fn new(validators: &[Validator]) -> Self { let num_arenas = std::cmp::max( 1, (validators.len() + VALIDATORS_PER_ARENA - 1) / VALIDATORS_PER_ARENA, diff --git a/consensus/types/src/bls_to_execution_change.rs b/consensus/types/src/bls_to_execution_change.rs index b279515bd1f..3ed9ee9255e 100644 --- a/consensus/types/src/bls_to_execution_change.rs +++ b/consensus/types/src/bls_to_execution_change.rs @@ -21,7 +21,7 @@ use tree_hash_derive::TreeHash; TestRandom, )] pub struct BlsToExecutionChange { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, pub from_bls_pubkey: PublicKeyBytes, pub to_execution_address: Address, diff --git a/consensus/types/src/builder_bid.rs b/consensus/types/src/builder_bid.rs index e922e81c706..8723c2afed9 100644 --- a/consensus/types/src/builder_bid.rs +++ b/consensus/types/src/builder_bid.rs @@ -16,7 +16,7 @@ use tree_hash_derive::TreeHash; pub struct BuilderBid> { #[serde_as(as = "BlindedPayloadAsHeader")] pub header: Payload, - #[serde(with = "eth2_serde_utils::quoted_u256")] + #[serde(with = "serde_utils::quoted_u256")] pub value: Uint256, pub pubkey: PublicKeyBytes, #[serde(skip)] @@ -50,7 +50,7 @@ impl> ForkVersionDeserialize #[derive(Deserialize)] struct Helper { header: serde_json::Value, - #[serde(with = "eth2_serde_utils::quoted_u256")] + #[serde(with = "serde_utils::quoted_u256")] value: Uint256, pubkey: PublicKeyBytes, } diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index bbb0b9712b2..163b07dcd15 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -1,9 +1,9 @@ use crate::application_domain::{ApplicationDomain, APPLICATION_DOMAIN_BUILDER}; use crate::*; -use eth2_serde_utils::quoted_u64::MaybeQuoted; use int_to_bytes::int_to_bytes4; use serde::{Deserializer, Serialize, Serializer}; use serde_derive::Deserialize; +use serde_utils::quoted_u64::MaybeQuoted; use std::fs::File; use std::path::Path; use tree_hash::TreeHash; @@ -615,7 +615,7 @@ impl ChainSpec { * Capella hard fork params */ capella_fork_version: [0x03, 00, 00, 00], - capella_fork_epoch: None, + capella_fork_epoch: Some(Epoch::new(194048)), max_validators_per_withdrawals_sweep: 16384, /* @@ -895,33 +895,33 @@ pub struct Config { pub preset_base: String, #[serde(default = "default_terminal_total_difficulty")] - #[serde(with = "eth2_serde_utils::quoted_u256")] + #[serde(with = "serde_utils::quoted_u256")] pub terminal_total_difficulty: Uint256, #[serde(default = "default_terminal_block_hash")] pub terminal_block_hash: ExecutionBlockHash, #[serde(default = "default_terminal_block_hash_activation_epoch")] pub terminal_block_hash_activation_epoch: Epoch, #[serde(default = "default_safe_slots_to_import_optimistically")] - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub safe_slots_to_import_optimistically: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] min_genesis_active_validator_count: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] min_genesis_time: u64, - #[serde(with = "eth2_serde_utils::bytes_4_hex")] + #[serde(with = "serde_utils::bytes_4_hex")] genesis_fork_version: [u8; 4], - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] genesis_delay: u64, - #[serde(with = "eth2_serde_utils::bytes_4_hex")] + #[serde(with = "serde_utils::bytes_4_hex")] altair_fork_version: [u8; 4], #[serde(serialize_with = "serialize_fork_epoch")] #[serde(deserialize_with = "deserialize_fork_epoch")] pub altair_fork_epoch: Option>, #[serde(default = "default_bellatrix_fork_version")] - #[serde(with = "eth2_serde_utils::bytes_4_hex")] + #[serde(with = "serde_utils::bytes_4_hex")] bellatrix_fork_version: [u8; 4], #[serde(default)] #[serde(serialize_with = "serialize_fork_epoch")] @@ -929,41 +929,41 @@ pub struct Config { pub bellatrix_fork_epoch: Option>, #[serde(default = "default_capella_fork_version")] - #[serde(with = "eth2_serde_utils::bytes_4_hex")] + #[serde(with = "serde_utils::bytes_4_hex")] capella_fork_version: [u8; 4], #[serde(default)] #[serde(serialize_with = "serialize_fork_epoch")] #[serde(deserialize_with = "deserialize_fork_epoch")] pub capella_fork_epoch: Option>, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] seconds_per_slot: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] seconds_per_eth1_block: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] min_validator_withdrawability_delay: Epoch, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] shard_committee_period: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] eth1_follow_distance: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] inactivity_score_bias: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] inactivity_score_recovery_rate: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] ejection_balance: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] min_per_epoch_churn_limit: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] churn_limit_quotient: u64, #[serde(skip_serializing_if = "Option::is_none")] proposer_score_boost: Option>, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] deposit_chain_id: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] deposit_network_id: u64, deposit_contract_address: Address, } diff --git a/consensus/types/src/contribution_and_proof.rs b/consensus/types/src/contribution_and_proof.rs index 167b0857c5a..7e757f89b1a 100644 --- a/consensus/types/src/contribution_and_proof.rs +++ b/consensus/types/src/contribution_and_proof.rs @@ -25,7 +25,7 @@ use tree_hash_derive::TreeHash; #[arbitrary(bound = "T: EthSpec")] pub struct ContributionAndProof { /// The index of the validator that created the sync contribution. - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub aggregator_index: u64, /// The aggregate contribution. pub contribution: SyncCommitteeContribution, diff --git a/consensus/types/src/deposit_data.rs b/consensus/types/src/deposit_data.rs index 1969311671f..d75643f6597 100644 --- a/consensus/types/src/deposit_data.rs +++ b/consensus/types/src/deposit_data.rs @@ -26,7 +26,7 @@ use tree_hash_derive::TreeHash; pub struct DepositData { pub pubkey: PublicKeyBytes, pub withdrawal_credentials: Hash256, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub amount: u64, pub signature: SignatureBytes, } diff --git a/consensus/types/src/deposit_message.rs b/consensus/types/src/deposit_message.rs index 63073401c22..1096cfaa283 100644 --- a/consensus/types/src/deposit_message.rs +++ b/consensus/types/src/deposit_message.rs @@ -25,7 +25,7 @@ use tree_hash_derive::TreeHash; pub struct DepositMessage { pub pubkey: PublicKeyBytes, pub withdrawal_credentials: Hash256, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub amount: u64, } diff --git a/consensus/types/src/deposit_tree_snapshot.rs b/consensus/types/src/deposit_tree_snapshot.rs index 21bbab81fff..aea4677f265 100644 --- a/consensus/types/src/deposit_tree_snapshot.rs +++ b/consensus/types/src/deposit_tree_snapshot.rs @@ -1,5 +1,5 @@ use crate::*; -use eth2_hashing::{hash32_concat, ZERO_HASHES}; +use ethereum_hashing::{hash32_concat, ZERO_HASHES}; use int_to_bytes::int_to_bytes32; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; diff --git a/consensus/types/src/enr_fork_id.rs b/consensus/types/src/enr_fork_id.rs index 3556e31a9fc..409383c9048 100644 --- a/consensus/types/src/enr_fork_id.rs +++ b/consensus/types/src/enr_fork_id.rs @@ -24,9 +24,9 @@ use tree_hash_derive::TreeHash; TestRandom, )] pub struct EnrForkId { - #[serde(with = "eth2_serde_utils::bytes_4_hex")] + #[serde(with = "serde_utils::bytes_4_hex")] pub fork_digest: [u8; 4], - #[serde(with = "eth2_serde_utils::bytes_4_hex")] + #[serde(with = "serde_utils::bytes_4_hex")] pub next_fork_version: [u8; 4], pub next_fork_epoch: Epoch, } diff --git a/consensus/types/src/eth1_data.rs b/consensus/types/src/eth1_data.rs index 6b2396e112c..d8f476b99b5 100644 --- a/consensus/types/src/eth1_data.rs +++ b/consensus/types/src/eth1_data.rs @@ -26,7 +26,7 @@ use tree_hash_derive::TreeHash; )] pub struct Eth1Data { pub deposit_root: Hash256, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub deposit_count: u64, pub block_hash: Hash256, } diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index c2b5295d67d..77ef6407e87 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -60,21 +60,21 @@ pub struct ExecutionPayload { pub logs_bloom: FixedVector, #[superstruct(getter(copy))] pub prev_randao: Hash256, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] #[superstruct(getter(copy))] pub block_number: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] #[superstruct(getter(copy))] pub gas_limit: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] #[superstruct(getter(copy))] pub gas_used: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] #[superstruct(getter(copy))] pub timestamp: u64, #[serde(with = "ssz_types::serde_utils::hex_var_list")] pub extra_data: VariableList, - #[serde(with = "eth2_serde_utils::quoted_u256")] + #[serde(with = "serde_utils::quoted_u256")] #[superstruct(getter(copy))] pub base_fee_per_gas: Uint256, #[superstruct(getter(copy))] @@ -152,3 +152,12 @@ impl ForkVersionDeserialize for ExecutionPayload { }) } } + +impl ExecutionPayload { + pub fn fork_name(&self) -> ForkName { + match self { + ExecutionPayload::Merge(_) => ForkName::Merge, + ExecutionPayload::Capella(_) => ForkName::Capella, + } + } +} diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index d193a6cd8e7..1fb29db9d3a 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -53,21 +53,21 @@ pub struct ExecutionPayloadHeader { pub logs_bloom: FixedVector, #[superstruct(getter(copy))] pub prev_randao: Hash256, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] #[superstruct(getter(copy))] pub block_number: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] #[superstruct(getter(copy))] pub gas_limit: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] #[superstruct(getter(copy))] pub gas_used: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] #[superstruct(getter(copy))] pub timestamp: u64, #[serde(with = "ssz_types::serde_utils::hex_var_list")] pub extra_data: VariableList, - #[serde(with = "eth2_serde_utils::quoted_u256")] + #[serde(with = "serde_utils::quoted_u256")] #[superstruct(getter(copy))] pub base_fee_per_gas: Uint256, #[superstruct(getter(copy))] diff --git a/consensus/types/src/fork.rs b/consensus/types/src/fork.rs index de332f0cada..4650881f72d 100644 --- a/consensus/types/src/fork.rs +++ b/consensus/types/src/fork.rs @@ -24,9 +24,9 @@ use tree_hash_derive::TreeHash; TestRandom, )] pub struct Fork { - #[serde(with = "eth2_serde_utils::bytes_4_hex")] + #[serde(with = "serde_utils::bytes_4_hex")] pub previous_version: [u8; 4], - #[serde(with = "eth2_serde_utils::bytes_4_hex")] + #[serde(with = "serde_utils::bytes_4_hex")] pub current_version: [u8; 4], pub epoch: Epoch, } diff --git a/consensus/types/src/fork_data.rs b/consensus/types/src/fork_data.rs index cc790393159..bf9c48cd7eb 100644 --- a/consensus/types/src/fork_data.rs +++ b/consensus/types/src/fork_data.rs @@ -23,7 +23,7 @@ use tree_hash_derive::TreeHash; TestRandom, )] pub struct ForkData { - #[serde(with = "eth2_serde_utils::bytes_4_hex")] + #[serde(with = "serde_utils::bytes_4_hex")] pub current_version: [u8; 4], pub genesis_validators_root: Hash256, } diff --git a/consensus/types/src/fork_versioned_response.rs b/consensus/types/src/fork_versioned_response.rs index 07ff40b27ef..2d97dc12194 100644 --- a/consensus/types/src/fork_versioned_response.rs +++ b/consensus/types/src/fork_versioned_response.rs @@ -6,14 +6,15 @@ use std::sync::Arc; // Deserialize is only implemented for types that implement ForkVersionDeserialize #[derive(Debug, PartialEq, Clone, Serialize)] -pub struct ExecutionOptimisticForkVersionedResponse { +pub struct ExecutionOptimisticFinalizedForkVersionedResponse { #[serde(skip_serializing_if = "Option::is_none")] pub version: Option, pub execution_optimistic: Option, + pub finalized: Option, pub data: T, } -impl<'de, F> serde::Deserialize<'de> for ExecutionOptimisticForkVersionedResponse +impl<'de, F> serde::Deserialize<'de> for ExecutionOptimisticFinalizedForkVersionedResponse where F: ForkVersionDeserialize, { @@ -25,6 +26,7 @@ where struct Helper { version: Option, execution_optimistic: Option, + finalized: Option, data: serde_json::Value, } @@ -34,9 +36,10 @@ where None => serde_json::from_value(helper.data).map_err(serde::de::Error::custom)?, }; - Ok(ExecutionOptimisticForkVersionedResponse { + Ok(ExecutionOptimisticFinalizedForkVersionedResponse { version: helper.version, execution_optimistic: helper.execution_optimistic, + finalized: helper.finalized, data, }) } diff --git a/consensus/types/src/graffiti.rs b/consensus/types/src/graffiti.rs index 6288cdbe807..bd4abe37d8f 100644 --- a/consensus/types/src/graffiti.rs +++ b/consensus/types/src/graffiti.rs @@ -27,7 +27,7 @@ impl Graffiti { impl fmt::Display for Graffiti { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", eth2_serde_utils::hex::encode(self.0)) + write!(f, "{}", serde_utils::hex::encode(self.0)) } } @@ -96,7 +96,7 @@ pub mod serde_graffiti { where S: Serializer, { - serializer.serialize_str(ð2_serde_utils::hex::encode(bytes)) + serializer.serialize_str(&serde_utils::hex::encode(bytes)) } pub fn deserialize<'de, D>(deserializer: D) -> Result<[u8; GRAFFITI_BYTES_LEN], D::Error> @@ -105,7 +105,7 @@ pub mod serde_graffiti { { let s: String = Deserialize::deserialize(deserializer)?; - let bytes = eth2_serde_utils::hex::decode(&s).map_err(D::Error::custom)?; + let bytes = serde_utils::hex::decode(&s).map_err(D::Error::custom)?; if bytes.len() != GRAFFITI_BYTES_LEN { return Err(D::Error::custom(format!( diff --git a/consensus/types/src/indexed_attestation.rs b/consensus/types/src/indexed_attestation.rs index 16ffb1ad8fa..c59cbef307e 100644 --- a/consensus/types/src/indexed_attestation.rs +++ b/consensus/types/src/indexed_attestation.rs @@ -72,9 +72,9 @@ impl Hash for IndexedAttestation { mod quoted_variable_list_u64 { use super::*; use crate::Unsigned; - use eth2_serde_utils::quoted_u64_vec::{QuotedIntVecVisitor, QuotedIntWrapper}; use serde::ser::SerializeSeq; use serde::{Deserializer, Serializer}; + use serde_utils::quoted_u64_vec::{QuotedIntVecVisitor, QuotedIntWrapper}; pub fn serialize(value: &VariableList, serializer: S) -> Result where diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 82407424411..aefb45490a8 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -143,9 +143,7 @@ pub use crate::fork::Fork; pub use crate::fork_context::ForkContext; pub use crate::fork_data::ForkData; pub use crate::fork_name::{ForkName, InconsistentFork}; -pub use crate::fork_versioned_response::{ - ExecutionOptimisticForkVersionedResponse, ForkVersionDeserialize, ForkVersionedResponse, -}; +pub use crate::fork_versioned_response::{ForkVersionDeserialize, ForkVersionedResponse}; pub use crate::graffiti::{Graffiti, GRAFFITI_BYTES_LEN}; pub use crate::historical_batch::HistoricalBatch; pub use crate::indexed_attestation::IndexedAttestation; diff --git a/consensus/types/src/participation_flags.rs b/consensus/types/src/participation_flags.rs index bd98f8da078..4f170a60be8 100644 --- a/consensus/types/src/participation_flags.rs +++ b/consensus/types/src/participation_flags.rs @@ -9,7 +9,7 @@ use tree_hash::{PackedEncoding, TreeHash, TreeHashType}; #[serde(transparent)] #[derive(arbitrary::Arbitrary)] pub struct ParticipationFlags { - #[serde(with = "eth2_serde_utils::quoted_u8")] + #[serde(with = "serde_utils::quoted_u8")] bits: u8, } diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index 6c739c969d1..2795c7f1092 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -12,7 +12,7 @@ use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; -#[derive(Debug)] +#[derive(Debug, PartialEq)] pub enum BlockType { Blinded, Full, diff --git a/consensus/types/src/pending_attestation.rs b/consensus/types/src/pending_attestation.rs index 1b9903ebbe5..88db0ec4d33 100644 --- a/consensus/types/src/pending_attestation.rs +++ b/consensus/types/src/pending_attestation.rs @@ -25,9 +25,9 @@ use tree_hash_derive::TreeHash; pub struct PendingAttestation { pub aggregation_bits: BitList, pub data: AttestationData, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub inclusion_delay: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub proposer_index: u64, } diff --git a/consensus/types/src/preset.rs b/consensus/types/src/preset.rs index 20c78f05159..e65dd8f60de 100644 --- a/consensus/types/src/preset.rs +++ b/consensus/types/src/preset.rs @@ -12,71 +12,71 @@ use serde_derive::{Deserialize, Serialize}; #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(rename_all = "UPPERCASE")] pub struct BasePreset { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_committees_per_slot: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub target_committee_size: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_validators_per_committee: u64, - #[serde(with = "eth2_serde_utils::quoted_u8")] + #[serde(with = "serde_utils::quoted_u8")] pub shuffle_round_count: u8, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub hysteresis_quotient: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub hysteresis_downward_multiplier: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub hysteresis_upward_multiplier: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub safe_slots_to_update_justified: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub min_deposit_amount: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_effective_balance: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub effective_balance_increment: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub min_attestation_inclusion_delay: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub slots_per_epoch: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub min_seed_lookahead: Epoch, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_seed_lookahead: Epoch, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub epochs_per_eth1_voting_period: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub slots_per_historical_root: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub min_epochs_to_inactivity_penalty: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub epochs_per_historical_vector: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub epochs_per_slashings_vector: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub historical_roots_limit: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_registry_limit: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub base_reward_factor: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub whistleblower_reward_quotient: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub proposer_reward_quotient: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub inactivity_penalty_quotient: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub min_slashing_penalty_quotient: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub proportional_slashing_multiplier: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_proposer_slashings: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_attester_slashings: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_attestations: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_deposits: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_voluntary_exits: u64, } @@ -123,17 +123,17 @@ impl BasePreset { #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(rename_all = "UPPERCASE")] pub struct AltairPreset { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub inactivity_penalty_quotient_altair: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub min_slashing_penalty_quotient_altair: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub proportional_slashing_multiplier_altair: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub sync_committee_size: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub epochs_per_sync_committee_period: Epoch, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub min_sync_committee_participants: u64, } @@ -153,19 +153,19 @@ impl AltairPreset { #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(rename_all = "UPPERCASE")] pub struct BellatrixPreset { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub inactivity_penalty_quotient_bellatrix: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub min_slashing_penalty_quotient_bellatrix: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub proportional_slashing_multiplier_bellatrix: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_bytes_per_transaction: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_transactions_per_payload: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub bytes_per_logs_bloom: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_extra_data_bytes: u64, } @@ -187,11 +187,11 @@ impl BellatrixPreset { #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(rename_all = "UPPERCASE")] pub struct CapellaPreset { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_bls_to_execution_changes: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_withdrawals_per_payload: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_validators_per_withdrawals_sweep: u64, } diff --git a/consensus/types/src/proposer_preparation_data.rs b/consensus/types/src/proposer_preparation_data.rs index 6179828a950..2828b0d4d55 100644 --- a/consensus/types/src/proposer_preparation_data.rs +++ b/consensus/types/src/proposer_preparation_data.rs @@ -6,7 +6,7 @@ use serde::{Deserialize, Serialize}; #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] pub struct ProposerPreparationData { /// The validators index. - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, /// The fee-recipient address. pub fee_recipient: Address, diff --git a/consensus/types/src/selection_proof.rs b/consensus/types/src/selection_proof.rs index f8bc8ba69fb..2a404b3b963 100644 --- a/consensus/types/src/selection_proof.rs +++ b/consensus/types/src/selection_proof.rs @@ -1,7 +1,7 @@ use crate::{ ChainSpec, Domain, EthSpec, Fork, Hash256, PublicKey, SecretKey, Signature, SignedRoot, Slot, }; -use eth2_hashing::hash; +use ethereum_hashing::hash; use safe_arith::{ArithError, SafeArith}; use ssz::Encode; use std::cmp; diff --git a/consensus/types/src/slot_epoch.rs b/consensus/types/src/slot_epoch.rs index 2716367c7eb..e9f1e192b47 100644 --- a/consensus/types/src/slot_epoch.rs +++ b/consensus/types/src/slot_epoch.rs @@ -38,7 +38,7 @@ use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Rem, Sub, SubAssi Deserialize, )] #[serde(transparent)] -pub struct Slot(#[serde(with = "eth2_serde_utils::quoted_u64")] u64); +pub struct Slot(#[serde(with = "serde_utils::quoted_u64")] u64); #[derive( arbitrary::Arbitrary, @@ -54,7 +54,7 @@ pub struct Slot(#[serde(with = "eth2_serde_utils::quoted_u64")] u64); Deserialize, )] #[serde(transparent)] -pub struct Epoch(#[serde(with = "eth2_serde_utils::quoted_u64")] u64); +pub struct Epoch(#[serde(with = "serde_utils::quoted_u64")] u64); impl_common!(Slot); impl_common!(Epoch); diff --git a/consensus/types/src/subnet_id.rs b/consensus/types/src/subnet_id.rs index fd06eb78a12..b885f89f7d4 100644 --- a/consensus/types/src/subnet_id.rs +++ b/consensus/types/src/subnet_id.rs @@ -20,7 +20,7 @@ lazy_static! { #[derive(arbitrary::Arbitrary, Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] #[serde(transparent)] -pub struct SubnetId(#[serde(with = "eth2_serde_utils::quoted_u64")] u64); +pub struct SubnetId(#[serde(with = "serde_utils::quoted_u64")] u64); pub fn subnet_id_to_string(i: u64) -> &'static str { if i < MAX_SUBNET_ID as u64 { @@ -85,7 +85,7 @@ impl SubnetId { let subscription_event_idx = epoch.as_u64() / spec.epochs_per_subnet_subscription; let permutation_seed = - eth2_hashing::hash(&int_to_bytes::int_to_bytes8(subscription_event_idx)); + ethereum_hashing::hash(&int_to_bytes::int_to_bytes8(subscription_event_idx)); let num_subnets = 1 << spec.attestation_subnet_prefix_bits(); diff --git a/consensus/types/src/sync_aggregator_selection_data.rs b/consensus/types/src/sync_aggregator_selection_data.rs index 9e72438be20..b101068123b 100644 --- a/consensus/types/src/sync_aggregator_selection_data.rs +++ b/consensus/types/src/sync_aggregator_selection_data.rs @@ -21,7 +21,7 @@ use tree_hash_derive::TreeHash; )] pub struct SyncAggregatorSelectionData { pub slot: Slot, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub subcommittee_index: u64, } diff --git a/consensus/types/src/sync_committee_contribution.rs b/consensus/types/src/sync_committee_contribution.rs index ef8b52becfc..425f8f116d4 100644 --- a/consensus/types/src/sync_committee_contribution.rs +++ b/consensus/types/src/sync_committee_contribution.rs @@ -32,7 +32,7 @@ pub enum Error { pub struct SyncCommitteeContribution { pub slot: Slot, pub beacon_block_root: Hash256, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub subcommittee_index: u64, pub aggregation_bits: BitVector, pub signature: AggregateSignature, diff --git a/consensus/types/src/sync_committee_message.rs b/consensus/types/src/sync_committee_message.rs index 5c2fb083743..d0301cdf638 100644 --- a/consensus/types/src/sync_committee_message.rs +++ b/consensus/types/src/sync_committee_message.rs @@ -23,7 +23,7 @@ use tree_hash_derive::TreeHash; pub struct SyncCommitteeMessage { pub slot: Slot, pub beacon_block_root: Hash256, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, // Signature by the validator over `beacon_block_root`. pub signature: Signature, diff --git a/consensus/types/src/sync_committee_subscription.rs b/consensus/types/src/sync_committee_subscription.rs index 7f5ed063f62..8e040279d73 100644 --- a/consensus/types/src/sync_committee_subscription.rs +++ b/consensus/types/src/sync_committee_subscription.rs @@ -7,10 +7,10 @@ use ssz_derive::{Decode, Encode}; #[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)] pub struct SyncCommitteeSubscription { /// The validators index. - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, /// The sync committee indices. - #[serde(with = "eth2_serde_utils::quoted_u64_vec")] + #[serde(with = "serde_utils::quoted_u64_vec")] pub sync_committee_indices: Vec, /// Epoch until which this subscription is required. pub until_epoch: Epoch, diff --git a/consensus/types/src/sync_duty.rs b/consensus/types/src/sync_duty.rs index bdb07845968..e3ffe62bfd1 100644 --- a/consensus/types/src/sync_duty.rs +++ b/consensus/types/src/sync_duty.rs @@ -7,9 +7,9 @@ use std::collections::HashSet; #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct SyncDuty { pub pubkey: PublicKeyBytes, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, - #[serde(with = "eth2_serde_utils::quoted_u64_vec")] + #[serde(with = "serde_utils::quoted_u64_vec")] pub validator_sync_committee_indices: Vec, } diff --git a/consensus/types/src/sync_selection_proof.rs b/consensus/types/src/sync_selection_proof.rs index 570abace1eb..7cae3946c6b 100644 --- a/consensus/types/src/sync_selection_proof.rs +++ b/consensus/types/src/sync_selection_proof.rs @@ -5,7 +5,7 @@ use crate::{ ChainSpec, Domain, EthSpec, Fork, Hash256, PublicKey, SecretKey, Signature, SignedRoot, Slot, SyncAggregatorSelectionData, }; -use eth2_hashing::hash; +use ethereum_hashing::hash; use safe_arith::{ArithError, SafeArith}; use ssz::Encode; use ssz_types::typenum::Unsigned; diff --git a/consensus/types/src/sync_subnet_id.rs b/consensus/types/src/sync_subnet_id.rs index 11bcf268941..5af756ae013 100644 --- a/consensus/types/src/sync_subnet_id.rs +++ b/consensus/types/src/sync_subnet_id.rs @@ -21,7 +21,7 @@ lazy_static! { #[derive(arbitrary::Arbitrary, Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] #[serde(transparent)] -pub struct SyncSubnetId(#[serde(with = "eth2_serde_utils::quoted_u64")] u64); +pub struct SyncSubnetId(#[serde(with = "serde_utils::quoted_u64")] u64); pub fn sync_subnet_id_to_string(i: u64) -> &'static str { if i < SYNC_COMMITTEE_SUBNET_COUNT { diff --git a/consensus/types/src/validator.rs b/consensus/types/src/validator.rs index 43b892cdf3d..6860397fb5b 100644 --- a/consensus/types/src/validator.rs +++ b/consensus/types/src/validator.rs @@ -25,7 +25,7 @@ use tree_hash_derive::TreeHash; pub struct Validator { pub pubkey: PublicKeyBytes, pub withdrawal_credentials: Hash256, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub effective_balance: u64, pub slashed: bool, pub activation_eligibility_epoch: Epoch, diff --git a/consensus/types/src/validator_registration_data.rs b/consensus/types/src/validator_registration_data.rs index 5a3450df081..de7f26cc632 100644 --- a/consensus/types/src/validator_registration_data.rs +++ b/consensus/types/src/validator_registration_data.rs @@ -13,9 +13,9 @@ pub struct SignedValidatorRegistrationData { #[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode, TreeHash)] pub struct ValidatorRegistrationData { pub fee_recipient: Address, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub gas_limit: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub timestamp: u64, pub pubkey: PublicKeyBytes, } diff --git a/consensus/types/src/voluntary_exit.rs b/consensus/types/src/voluntary_exit.rs index 20c84986c29..02686fef9ad 100644 --- a/consensus/types/src/voluntary_exit.rs +++ b/consensus/types/src/voluntary_exit.rs @@ -27,7 +27,7 @@ use tree_hash_derive::TreeHash; pub struct VoluntaryExit { /// Earliest epoch when voluntary exit can be processed. pub epoch: Epoch, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, } diff --git a/consensus/types/src/withdrawal.rs b/consensus/types/src/withdrawal.rs index 5221ff63f09..eed7c7e277f 100644 --- a/consensus/types/src/withdrawal.rs +++ b/consensus/types/src/withdrawal.rs @@ -20,12 +20,12 @@ use tree_hash_derive::TreeHash; TestRandom, )] pub struct Withdrawal { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub index: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, pub address: Address, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub amount: u64, } diff --git a/crypto/bls/Cargo.toml b/crypto/bls/Cargo.toml index c3331824d9e..a610f257cdb 100644 --- a/crypto/bls/Cargo.toml +++ b/crypto/bls/Cargo.toml @@ -5,15 +5,15 @@ authors = ["Paul Hauner "] edition = "2021" [dependencies] -eth2_ssz = "0.4.1" -tree_hash = "0.4.1" +ethereum_ssz = "0.5.0" +tree_hash = "0.5.0" milagro_bls = { git = "https://github.com/sigp/milagro_bls", tag = "v1.4.2", optional = true } rand = "0.7.3" serde = "1.0.116" serde_derive = "1.0.116" -eth2_serde_utils = "0.1.1" +ethereum_serde_utils = "0.5.0" hex = "0.4.2" -eth2_hashing = "0.3.0" +ethereum_hashing = "1.0.0-beta.2" ethereum-types = "0.14.1" arbitrary = { version = "1.0", features = ["derive"], optional = true } zeroize = { version = "1.4.2", features = ["zeroize_derive"] } diff --git a/crypto/bls/src/generic_aggregate_signature.rs b/crypto/bls/src/generic_aggregate_signature.rs index a61529af250..e6e53253f64 100644 --- a/crypto/bls/src/generic_aggregate_signature.rs +++ b/crypto/bls/src/generic_aggregate_signature.rs @@ -4,9 +4,9 @@ use crate::{ generic_signature::{GenericSignature, TSignature}, Error, Hash256, INFINITY_SIGNATURE, SIGNATURE_BYTES_LEN, }; -use eth2_serde_utils::hex::encode as hex_encode; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; +use serde_utils::hex::encode as hex_encode; use ssz::{Decode, Encode}; use std::fmt; use std::hash::{Hash, Hasher}; diff --git a/crypto/bls/src/generic_public_key.rs b/crypto/bls/src/generic_public_key.rs index 847d039c62b..462e4cb2cb0 100644 --- a/crypto/bls/src/generic_public_key.rs +++ b/crypto/bls/src/generic_public_key.rs @@ -1,8 +1,8 @@ use crate::generic_public_key_bytes::GenericPublicKeyBytes; use crate::Error; -use eth2_serde_utils::hex::encode as hex_encode; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; +use serde_utils::hex::encode as hex_encode; use ssz::{Decode, Encode}; use std::fmt; use std::hash::{Hash, Hasher}; diff --git a/crypto/bls/src/generic_public_key_bytes.rs b/crypto/bls/src/generic_public_key_bytes.rs index c2f318ab65d..59b0ffc43f1 100644 --- a/crypto/bls/src/generic_public_key_bytes.rs +++ b/crypto/bls/src/generic_public_key_bytes.rs @@ -2,9 +2,9 @@ use crate::{ generic_public_key::{GenericPublicKey, TPublicKey}, Error, PUBLIC_KEY_BYTES_LEN, }; -use eth2_serde_utils::hex::encode as hex_encode; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; +use serde_utils::hex::encode as hex_encode; use ssz::{Decode, Encode}; use std::convert::TryInto; use std::fmt; diff --git a/crypto/bls/src/generic_signature.rs b/crypto/bls/src/generic_signature.rs index 01e5ed1d481..05e0a222bd5 100644 --- a/crypto/bls/src/generic_signature.rs +++ b/crypto/bls/src/generic_signature.rs @@ -2,9 +2,9 @@ use crate::{ generic_public_key::{GenericPublicKey, TPublicKey}, Error, Hash256, }; -use eth2_serde_utils::hex::encode as hex_encode; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; +use serde_utils::hex::encode as hex_encode; use ssz::{Decode, Encode}; use std::fmt; use std::hash::{Hash, Hasher}; diff --git a/crypto/bls/src/generic_signature_bytes.rs b/crypto/bls/src/generic_signature_bytes.rs index aa33c90d0c3..8f9f2a4d88e 100644 --- a/crypto/bls/src/generic_signature_bytes.rs +++ b/crypto/bls/src/generic_signature_bytes.rs @@ -3,9 +3,9 @@ use crate::{ generic_signature::{GenericSignature, TSignature}, Error, INFINITY_SIGNATURE, SIGNATURE_BYTES_LEN, }; -use eth2_serde_utils::hex::encode as hex_encode; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; +use serde_utils::hex::encode as hex_encode; use ssz::{Decode, Encode}; use std::convert::TryInto; use std::fmt; diff --git a/crypto/bls/src/get_withdrawal_credentials.rs b/crypto/bls/src/get_withdrawal_credentials.rs index 98106434f14..d5e6470504a 100644 --- a/crypto/bls/src/get_withdrawal_credentials.rs +++ b/crypto/bls/src/get_withdrawal_credentials.rs @@ -1,5 +1,5 @@ use crate::PublicKey; -use eth2_hashing::hash; +use ethereum_hashing::hash; use ssz::Encode; /// Returns the withdrawal credentials for a given public key. diff --git a/crypto/eth2_hashing/.cargo/config b/crypto/eth2_hashing/.cargo/config deleted file mode 100644 index 4ec2f3b8620..00000000000 --- a/crypto/eth2_hashing/.cargo/config +++ /dev/null @@ -1,2 +0,0 @@ -[target.wasm32-unknown-unknown] -runner = 'wasm-bindgen-test-runner' diff --git a/crypto/eth2_hashing/Cargo.toml b/crypto/eth2_hashing/Cargo.toml deleted file mode 100644 index db296c70fe9..00000000000 --- a/crypto/eth2_hashing/Cargo.toml +++ /dev/null @@ -1,24 +0,0 @@ -[package] -name = "eth2_hashing" -version = "0.3.0" -authors = ["Paul Hauner "] -edition = "2021" -license = "Apache-2.0" -description = "Hashing primitives used in Ethereum 2.0" - -[dependencies] -lazy_static = { version = "1.4.0", optional = true } -cpufeatures = { version = "0.2.5", optional = true } -ring = "0.16.19" -sha2 = "0.10" - -[dev-dependencies] -rustc-hex = "2.1.0" - -[target.'cfg(target_arch = "wasm32")'.dev-dependencies] -wasm-bindgen-test = "0.3.18" - -[features] -default = ["zero_hash_cache", "detect-cpufeatures"] -zero_hash_cache = ["lazy_static"] -detect-cpufeatures = ["cpufeatures"] diff --git a/crypto/eth2_hashing/src/lib.rs b/crypto/eth2_hashing/src/lib.rs deleted file mode 100644 index 36a3d141391..00000000000 --- a/crypto/eth2_hashing/src/lib.rs +++ /dev/null @@ -1,251 +0,0 @@ -//! Optimized SHA256 for use in Ethereum 2.0. -//! -//! The initial purpose of this crate was to provide an abstraction over the hash function used in -//! Ethereum 2.0. The hash function changed during the specification process, so defining it once in -//! this crate made it easy to replace. -//! -//! Now this crate serves primarily as a wrapper over two SHA256 crates: `sha2` and `ring` – -//! which it switches between at runtime based on the availability of SHA intrinsics. - -pub use self::DynamicContext as Context; -use sha2::Digest; - -#[cfg(feature = "zero_hash_cache")] -use lazy_static::lazy_static; - -/// Length of a SHA256 hash in bytes. -pub const HASH_LEN: usize = 32; - -/// Returns the digest of `input` using the best available implementation. -pub fn hash(input: &[u8]) -> Vec { - DynamicImpl::best().hash(input) -} - -/// Hash function returning a fixed-size array (to save on allocations). -/// -/// Uses the best available implementation based on CPU features. -pub fn hash_fixed(input: &[u8]) -> [u8; HASH_LEN] { - DynamicImpl::best().hash_fixed(input) -} - -/// Compute the hash of two slices concatenated. -pub fn hash32_concat(h1: &[u8], h2: &[u8]) -> [u8; 32] { - let mut ctxt = DynamicContext::new(); - ctxt.update(h1); - ctxt.update(h2); - ctxt.finalize() -} - -/// Context trait for abstracting over implementation contexts. -pub trait Sha256Context { - fn new() -> Self; - - fn update(&mut self, bytes: &[u8]); - - fn finalize(self) -> [u8; HASH_LEN]; -} - -/// Top-level trait implemented by both `sha2` and `ring` implementations. -pub trait Sha256 { - type Context: Sha256Context; - - fn hash(&self, input: &[u8]) -> Vec; - - fn hash_fixed(&self, input: &[u8]) -> [u8; HASH_LEN]; -} - -/// Implementation of SHA256 using the `sha2` crate (fastest on CPUs with SHA extensions). -struct Sha2CrateImpl; - -impl Sha256Context for sha2::Sha256 { - fn new() -> Self { - sha2::Digest::new() - } - - fn update(&mut self, bytes: &[u8]) { - sha2::Digest::update(self, bytes) - } - - fn finalize(self) -> [u8; HASH_LEN] { - sha2::Digest::finalize(self).into() - } -} - -impl Sha256 for Sha2CrateImpl { - type Context = sha2::Sha256; - - fn hash(&self, input: &[u8]) -> Vec { - Self::Context::digest(input).into_iter().collect() - } - - fn hash_fixed(&self, input: &[u8]) -> [u8; HASH_LEN] { - Self::Context::digest(input).into() - } -} - -/// Implementation of SHA256 using the `ring` crate (fastest on CPUs without SHA extensions). -pub struct RingImpl; - -impl Sha256Context for ring::digest::Context { - fn new() -> Self { - Self::new(&ring::digest::SHA256) - } - - fn update(&mut self, bytes: &[u8]) { - self.update(bytes) - } - - fn finalize(self) -> [u8; HASH_LEN] { - let mut output = [0; HASH_LEN]; - output.copy_from_slice(self.finish().as_ref()); - output - } -} - -impl Sha256 for RingImpl { - type Context = ring::digest::Context; - - fn hash(&self, input: &[u8]) -> Vec { - ring::digest::digest(&ring::digest::SHA256, input) - .as_ref() - .into() - } - - fn hash_fixed(&self, input: &[u8]) -> [u8; HASH_LEN] { - let mut ctxt = Self::Context::new(&ring::digest::SHA256); - ctxt.update(input); - ctxt.finalize() - } -} - -/// Default dynamic implementation that switches between available implementations. -pub enum DynamicImpl { - Sha2, - Ring, -} - -// Runtime latch for detecting the availability of SHA extensions on x86_64. -// -// Inspired by the runtime switch within the `sha2` crate itself. -#[cfg(all(feature = "detect-cpufeatures", target_arch = "x86_64"))] -cpufeatures::new!(x86_sha_extensions, "sha", "sse2", "ssse3", "sse4.1"); - -#[inline(always)] -pub fn have_sha_extensions() -> bool { - #[cfg(all(feature = "detect-cpufeatures", target_arch = "x86_64"))] - return x86_sha_extensions::get(); - - #[cfg(not(all(feature = "detect-cpufeatures", target_arch = "x86_64")))] - return false; -} - -impl DynamicImpl { - /// Choose the best available implementation based on the currently executing CPU. - #[inline(always)] - pub fn best() -> Self { - if have_sha_extensions() { - Self::Sha2 - } else { - Self::Ring - } - } -} - -impl Sha256 for DynamicImpl { - type Context = DynamicContext; - - #[inline(always)] - fn hash(&self, input: &[u8]) -> Vec { - match self { - Self::Sha2 => Sha2CrateImpl.hash(input), - Self::Ring => RingImpl.hash(input), - } - } - - #[inline(always)] - fn hash_fixed(&self, input: &[u8]) -> [u8; HASH_LEN] { - match self { - Self::Sha2 => Sha2CrateImpl.hash_fixed(input), - Self::Ring => RingImpl.hash_fixed(input), - } - } -} - -/// Context encapsulating all implemenation contexts. -/// -/// This enum ends up being 8 bytes larger than the largest inner context. -pub enum DynamicContext { - Sha2(sha2::Sha256), - Ring(ring::digest::Context), -} - -impl Sha256Context for DynamicContext { - fn new() -> Self { - match DynamicImpl::best() { - DynamicImpl::Sha2 => Self::Sha2(Sha256Context::new()), - DynamicImpl::Ring => Self::Ring(Sha256Context::new()), - } - } - - fn update(&mut self, bytes: &[u8]) { - match self { - Self::Sha2(ctxt) => Sha256Context::update(ctxt, bytes), - Self::Ring(ctxt) => Sha256Context::update(ctxt, bytes), - } - } - - fn finalize(self) -> [u8; HASH_LEN] { - match self { - Self::Sha2(ctxt) => Sha256Context::finalize(ctxt), - Self::Ring(ctxt) => Sha256Context::finalize(ctxt), - } - } -} - -/// The max index that can be used with `ZERO_HASHES`. -#[cfg(feature = "zero_hash_cache")] -pub const ZERO_HASHES_MAX_INDEX: usize = 48; - -#[cfg(feature = "zero_hash_cache")] -lazy_static! { - /// Cached zero hashes where `ZERO_HASHES[i]` is the hash of a Merkle tree with 2^i zero leaves. - pub static ref ZERO_HASHES: Vec> = { - let mut hashes = vec![vec![0; 32]; ZERO_HASHES_MAX_INDEX + 1]; - - for i in 0..ZERO_HASHES_MAX_INDEX { - hashes[i + 1] = hash32_concat(&hashes[i], &hashes[i])[..].to_vec(); - } - - hashes - }; -} - -#[cfg(test)] -mod tests { - use super::*; - use rustc_hex::FromHex; - - #[cfg(target_arch = "wasm32")] - use wasm_bindgen_test::*; - - #[cfg_attr(not(target_arch = "wasm32"), test)] - #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] - fn test_hashing() { - let input: Vec = b"hello world".as_ref().into(); - - let output = hash(input.as_ref()); - let expected_hex = "b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9"; - let expected: Vec = expected_hex.from_hex().unwrap(); - assert_eq!(expected, output); - } - - #[cfg(feature = "zero_hash_cache")] - mod zero_hash { - use super::*; - - #[test] - fn zero_hash_zero() { - assert_eq!(ZERO_HASHES[0], vec![0; 32]); - } - } -} diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index caceb997786..9e7f2fdb08d 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "3.5.1" +version = "4.1.0" authors = ["Paul Hauner "] edition = "2021" @@ -21,12 +21,12 @@ env_logger = "0.9.0" types = { path = "../consensus/types" } state_processing = { path = "../consensus/state_processing" } int_to_bytes = { path = "../consensus/int_to_bytes" } -eth2_ssz = "0.4.1" +ethereum_ssz = "0.5.0" environment = { path = "../lighthouse/environment" } eth2_network_config = { path = "../common/eth2_network_config" } genesis = { path = "../beacon_node/genesis" } deposit_contract = { path = "../common/deposit_contract" } -tree_hash = "0.4.1" +tree_hash = "0.5.0" clap_utils = { path = "../common/clap_utils" } lighthouse_network = { path = "../beacon_node/lighthouse_network" } validator_dir = { path = "../common/validator_dir", features = ["insecure_keys"] } diff --git a/lcli/Dockerfile b/lcli/Dockerfile index feda81d0302..98f33f21536 100644 --- a/lcli/Dockerfile +++ b/lcli/Dockerfile @@ -1,7 +1,7 @@ # `lcli` requires the full project to be in scope, so this should be built either: # - from the `lighthouse` dir with the command: `docker build -f ./lcli/Dockerflie .` # - from the current directory with the command: `docker build -f ./Dockerfile ../` -FROM rust:1.65.0-bullseye AS builder +FROM rust:1.68.2-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler COPY . lighthouse ARG PORTABLE diff --git a/lcli/src/check_deposit_data.rs b/lcli/src/check_deposit_data.rs index 56f18f99885..47c2c7addf0 100644 --- a/lcli/src/check_deposit_data.rs +++ b/lcli/src/check_deposit_data.rs @@ -2,9 +2,8 @@ use clap::ArgMatches; use clap_utils::{parse_required, parse_ssz_required}; use deposit_contract::{decode_eth1_tx_data, DEPOSIT_DATA_LEN}; use tree_hash::TreeHash; -use types::EthSpec; -pub fn run(matches: &ArgMatches) -> Result<(), String> { +pub fn run(matches: &ArgMatches) -> Result<(), String> { let rlp_bytes = parse_ssz_required::>(matches, "deposit-data")?; let amount = parse_required(matches, "deposit-amount")?; diff --git a/lcli/src/generate_bootnode_enr.rs b/lcli/src/generate_bootnode_enr.rs index 6f39392d121..8662a804761 100644 --- a/lcli/src/generate_bootnode_enr.rs +++ b/lcli/src/generate_bootnode_enr.rs @@ -3,15 +3,14 @@ use lighthouse_network::{ discovery::{build_enr, CombinedKey, CombinedKeyExt, Keypair, ENR_FILENAME}, NetworkConfig, NETWORK_KEY_FILENAME, }; -use std::fs; use std::fs::File; use std::io::Write; -use std::net::IpAddr; use std::path::PathBuf; +use std::{fs, net::Ipv4Addr}; use types::{ChainSpec, EnrForkId, Epoch, EthSpec, Hash256}; pub fn run(matches: &ArgMatches) -> Result<(), String> { - let ip: IpAddr = clap_utils::parse_required(matches, "ip")?; + let ip: Ipv4Addr = clap_utils::parse_required(matches, "ip")?; let udp_port: u16 = clap_utils::parse_required(matches, "udp-port")?; let tcp_port: u16 = clap_utils::parse_required(matches, "tcp-port")?; let output_dir: PathBuf = clap_utils::parse_required(matches, "output-dir")?; @@ -25,12 +24,10 @@ pub fn run(matches: &ArgMatches) -> Result<(), String> { )); } - let config = NetworkConfig { - enr_address: Some(ip), - enr_udp_port: Some(udp_port), - enr_tcp_port: Some(tcp_port), - ..Default::default() - }; + let mut config = NetworkConfig::default(); + config.enr_address = (Some(ip), None); + config.enr_udp4_port = Some(udp_port); + config.enr_tcp6_port = Some(tcp_port); let local_keypair = Keypair::generate_secp256k1(); let enr_key = CombinedKey::from_libp2p(&local_keypair)?; diff --git a/lcli/src/main.rs b/lcli/src/main.rs index acde9bd72ae..fe49814bfe4 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -848,7 +848,7 @@ fn run( } ("new-testnet", Some(matches)) => new_testnet::run::(testnet_dir, matches) .map_err(|e| format!("Failed to run new_testnet command: {}", e)), - ("check-deposit-data", Some(matches)) => check_deposit_data::run::(matches) + ("check-deposit-data", Some(matches)) => check_deposit_data::run(matches) .map_err(|e| format!("Failed to run check-deposit-data command: {}", e)), ("generate-bootnode-enr", Some(matches)) => generate_bootnode_enr::run::(matches) .map_err(|e| format!("Failed to run generate-bootnode-enr command: {}", e)), diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 9360c960083..2afad1b5823 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "lighthouse" -version = "3.5.1" +version = "4.1.0" authors = ["Sigma Prime "] edition = "2021" autotests = false -rust-version = "1.65" +rust-version = "1.68.2" [features] default = ["slasher-mdbx"] @@ -33,7 +33,7 @@ slog = { version = "2.5.2", features = ["max_level_trace"] } sloggers = { version = "2.1.1", features = ["json"] } types = { "path" = "../consensus/types" } bls = { path = "../crypto/bls" } -eth2_hashing = "0.3.0" +ethereum_hashing = "1.0.0-beta.2" clap = "2.33.3" env_logger = "0.9.0" environment = { path = "./environment" } diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 3da006c86d2..1845113d8ce 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -6,8 +6,8 @@ use clap_utils::{flags::DISABLE_MALLOC_TUNING_FLAG, get_eth2_network_config}; use directory::{parse_path_or_default, DEFAULT_BEACON_NODE_DIR, DEFAULT_VALIDATOR_DIR}; use env_logger::{Builder, Env}; use environment::{EnvironmentBuilder, LoggerConfig}; -use eth2_hashing::have_sha_extensions; use eth2_network_config::{Eth2NetworkConfig, DEFAULT_HARDCODED_NETWORK, HARDCODED_NET_NAMES}; +use ethereum_hashing::have_sha_extensions; use lighthouse_version::VERSION; use malloc_utils::configure_memory_allocator; use slog::{crit, info, warn}; @@ -152,7 +152,8 @@ fn main() { .help( "If present, log files will be generated as world-readable meaning they can be read by \ any user on the machine. Note that logs can often contain sensitive information \ - about your validator and so this flag should be used with caution.") + about your validator and so this flag should be used with caution. For Windows users, \ + the log file permissions will be inherited from the parent folder.") .global(true), ) .arg( diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 7f957b626db..9dd67eadc60 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -1,14 +1,15 @@ -use beacon_node::{beacon_chain::CountUnrealizedFull, ClientConfig as Config}; +use beacon_node::ClientConfig as Config; use crate::exec::{CommandLineTestExec, CompletedTest}; use beacon_node::beacon_chain::chain_config::{ + DisallowedReOrgOffsets, DEFAULT_RE_ORG_CUTOFF_DENOMINATOR, DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, DEFAULT_RE_ORG_THRESHOLD, }; use eth1::Eth1Endpoint; use lighthouse_network::PeerId; use std::fs::File; use std::io::{Read, Write}; -use std::net::IpAddr; +use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use std::path::PathBuf; use std::process::Command; use std::str::FromStr; @@ -16,7 +17,7 @@ use std::string::ToString; use std::time::Duration; use tempfile::TempDir; use types::{Address, Checkpoint, Epoch, ExecutionBlockHash, ForkName, Hash256, MainnetEthSpec}; -use unused_port::{unused_tcp_port, unused_udp_port}; +use unused_port::{unused_tcp4_port, unused_tcp6_port, unused_udp4_port, unused_udp6_port}; const DEFAULT_ETH1_ENDPOINT: &str = "http://localhost:8545/"; @@ -118,6 +119,26 @@ fn disable_lock_timeouts_flag() { .with_config(|config| assert!(!config.chain.enable_lock_timeouts)); } +#[test] +fn shuffling_cache_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.chain.shuffling_cache_size, + beacon_node::beacon_chain::shuffling_cache::DEFAULT_CACHE_SIZE + ) + }); +} + +#[test] +fn shuffling_cache_set() { + CommandLineTest::new() + .flag("shuffling-cache-size", Some("500")) + .run_with_zero_port() + .with_config(|config| assert_eq!(config.chain.shuffling_cache_size, 500)); +} + #[test] fn fork_choice_before_proposal_timeout_default() { CommandLineTest::new() @@ -212,74 +233,58 @@ fn paranoid_block_proposal_on() { .with_config(|config| assert!(config.chain.paranoid_block_proposal)); } -#[test] -fn count_unrealized_default() { - CommandLineTest::new() - .run_with_zero_port() - .with_config(|config| assert!(config.chain.count_unrealized)); -} - #[test] fn count_unrealized_no_arg() { CommandLineTest::new() .flag("count-unrealized", None) - .run_with_zero_port() - .with_config(|config| assert!(config.chain.count_unrealized)); + // This flag should be ignored, so there's nothing to test but that the + // client starts with the flag present. + .run_with_zero_port(); } #[test] fn count_unrealized_false() { CommandLineTest::new() .flag("count-unrealized", Some("false")) - .run_with_zero_port() - .with_config(|config| assert!(!config.chain.count_unrealized)); + // This flag should be ignored, so there's nothing to test but that the + // client starts with the flag present. + .run_with_zero_port(); } #[test] fn count_unrealized_true() { CommandLineTest::new() .flag("count-unrealized", Some("true")) - .run_with_zero_port() - .with_config(|config| assert!(config.chain.count_unrealized)); + // This flag should be ignored, so there's nothing to test but that the + // client starts with the flag present. + .run_with_zero_port(); } #[test] fn count_unrealized_full_no_arg() { CommandLineTest::new() .flag("count-unrealized-full", None) - .run_with_zero_port() - .with_config(|config| { - assert_eq!( - config.chain.count_unrealized_full, - CountUnrealizedFull::False - ) - }); + // This flag should be ignored, so there's nothing to test but that the + // client starts with the flag present. + .run_with_zero_port(); } #[test] fn count_unrealized_full_false() { CommandLineTest::new() .flag("count-unrealized-full", Some("false")) - .run_with_zero_port() - .with_config(|config| { - assert_eq!( - config.chain.count_unrealized_full, - CountUnrealizedFull::False - ) - }); + // This flag should be ignored, so there's nothing to test but that the + // client starts with the flag present. + .run_with_zero_port(); } #[test] fn count_unrealized_full_true() { CommandLineTest::new() .flag("count-unrealized-full", Some("true")) - .run_with_zero_port() - .with_config(|config| { - assert_eq!( - config.chain.count_unrealized_full, - CountUnrealizedFull::True - ) - }); + // This flag should be ignored, so there's nothing to test but that the + // client starts with the flag present. + .run_with_zero_port(); } #[test] @@ -711,6 +716,40 @@ fn builder_fallback_flags() { ); } +#[test] +fn builder_user_agent() { + run_payload_builder_flag_test_with_config( + "builder", + "http://meow.cats", + None, + None, + |config| { + assert_eq!( + config.execution_layer.as_ref().unwrap().builder_user_agent, + None + ); + }, + ); + run_payload_builder_flag_test_with_config( + "builder", + "http://meow.cats", + Some("builder-user-agent"), + Some("anon"), + |config| { + assert_eq!( + config + .execution_layer + .as_ref() + .unwrap() + .builder_user_agent + .as_ref() + .unwrap(), + "anon" + ); + }, + ); +} + fn run_jwt_optional_flags_test(jwt_flag: &str, jwt_id_flag: &str, jwt_version_flag: &str) { use sensitive_url::SensitiveUrl; @@ -851,37 +890,188 @@ fn network_shutdown_after_sync_disabled_flag() { .with_config(|config| assert!(!config.network.shutdown_after_sync)); } #[test] -fn network_listen_address_flag() { - let addr = "127.0.0.2".parse::().unwrap(); +fn network_listen_address_flag_v4() { + let addr = "127.0.0.2".parse::().unwrap(); CommandLineTest::new() .flag("listen-address", Some("127.0.0.2")) .run_with_zero_port() - .with_config(|config| assert_eq!(config.network.listen_address, addr)); + .with_config(|config| { + assert_eq!( + config.network.listen_addrs().v4().map(|addr| addr.addr), + Some(addr) + ) + }); +} +#[test] +fn network_listen_address_flag_v6() { + const ADDR: &str = "::1"; + let addr = ADDR.parse::().unwrap(); + CommandLineTest::new() + .flag("listen-address", Some(ADDR)) + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.network.listen_addrs().v6().map(|addr| addr.addr), + Some(addr) + ) + }); +} +#[test] +fn network_listen_address_flag_dual_stack() { + const V4_ADDR: &str = "127.0.0.1"; + const V6_ADDR: &str = "::1"; + let ipv6_addr = V6_ADDR.parse::().unwrap(); + let ipv4_addr = V4_ADDR.parse::().unwrap(); + CommandLineTest::new() + .flag("listen-address", Some(V6_ADDR)) + .flag("listen-address", Some(V4_ADDR)) + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.network.listen_addrs().v6().map(|addr| addr.addr), + Some(ipv6_addr) + ); + assert_eq!( + config.network.listen_addrs().v4().map(|addr| addr.addr), + Some(ipv4_addr) + ) + }); +} +#[test] +#[should_panic] +fn network_listen_address_flag_wrong_double_v4_value_config() { + // It's actually possible to listen over multiple sockets in libp2p over the same ip version. + // However this is not compatible with the single contactable address over each version in ENR. + // Because of this, it's important to test this is disallowed. + const V4_ADDR1: &str = "127.0.0.1"; + const V4_ADDR2: &str = "0.0.0.0"; + CommandLineTest::new() + .flag("listen-address", Some(V4_ADDR1)) + .flag("listen-address", Some(V4_ADDR2)) + .run_with_zero_port(); +} +#[test] +#[should_panic] +fn network_listen_address_flag_wrong_double_v6_value_config() { + // It's actually possible to listen over multiple sockets in libp2p over the same ip version. + // However this is not compatible with the single contactable address over each version in ENR. + // Because of this, it's important to test this is disallowed. + const V6_ADDR1: &str = "::3"; + const V6_ADDR2: &str = "::1"; + CommandLineTest::new() + .flag("listen-address", Some(V6_ADDR1)) + .flag("listen-address", Some(V6_ADDR2)) + .run_with_zero_port(); +} +#[test] +fn network_port_flag_over_ipv4() { + let port = unused_tcp4_port().expect("Unable to find unused port."); + CommandLineTest::new() + .flag("port", Some(port.to_string().as_str())) + .run() + .with_config(|config| { + assert_eq!( + config + .network + .listen_addrs() + .v4() + .map(|listen_addr| (listen_addr.udp_port, listen_addr.tcp_port)), + Some((port, port)) + ); + }); } #[test] -fn network_port_flag() { - let port = unused_tcp_port().expect("Unable to find unused port."); +fn network_port_flag_over_ipv6() { + let port = unused_tcp6_port().expect("Unable to find unused port."); CommandLineTest::new() + .flag("listen-address", Some("::1")) .flag("port", Some(port.to_string().as_str())) .run() .with_config(|config| { - assert_eq!(config.network.libp2p_port, port); - assert_eq!(config.network.discovery_port, port); + assert_eq!( + config + .network + .listen_addrs() + .v6() + .map(|listen_addr| (listen_addr.udp_port, listen_addr.tcp_port)), + Some((port, port)) + ); + }); +} +#[test] +fn network_port_and_discovery_port_flags_over_ipv4() { + let tcp4_port = unused_tcp4_port().expect("Unable to find unused port."); + let udp4_port = unused_udp4_port().expect("Unable to find unused port."); + CommandLineTest::new() + .flag("port", Some(tcp4_port.to_string().as_str())) + .flag("discovery-port", Some(udp4_port.to_string().as_str())) + .run() + .with_config(|config| { + assert_eq!( + config + .network + .listen_addrs() + .v4() + .map(|listen_addr| (listen_addr.tcp_port, listen_addr.udp_port)), + Some((tcp4_port, udp4_port)) + ); + }); +} +#[test] +fn network_port_and_discovery_port_flags_over_ipv6() { + let tcp6_port = unused_tcp6_port().expect("Unable to find unused port."); + let udp6_port = unused_udp6_port().expect("Unable to find unused port."); + CommandLineTest::new() + .flag("listen-address", Some("::1")) + .flag("port", Some(tcp6_port.to_string().as_str())) + .flag("discovery-port", Some(udp6_port.to_string().as_str())) + .run() + .with_config(|config| { + assert_eq!( + config + .network + .listen_addrs() + .v6() + .map(|listen_addr| (listen_addr.tcp_port, listen_addr.udp_port)), + Some((tcp6_port, udp6_port)) + ); }); } #[test] -fn network_port_and_discovery_port_flags() { - let port1 = unused_tcp_port().expect("Unable to find unused port."); - let port2 = unused_udp_port().expect("Unable to find unused port."); +fn network_port_and_discovery_port_flags_over_ipv4_and_ipv6() { + let tcp4_port = unused_tcp4_port().expect("Unable to find unused port."); + let udp4_port = unused_udp4_port().expect("Unable to find unused port."); + let tcp6_port = unused_tcp6_port().expect("Unable to find unused port."); + let udp6_port = unused_udp6_port().expect("Unable to find unused port."); CommandLineTest::new() - .flag("port", Some(port1.to_string().as_str())) - .flag("discovery-port", Some(port2.to_string().as_str())) + .flag("listen-address", Some("::1")) + .flag("listen-address", Some("127.0.0.1")) + .flag("port", Some(tcp4_port.to_string().as_str())) + .flag("discovery-port", Some(udp4_port.to_string().as_str())) + .flag("port6", Some(tcp6_port.to_string().as_str())) + .flag("discovery-port6", Some(udp6_port.to_string().as_str())) .run() .with_config(|config| { - assert_eq!(config.network.libp2p_port, port1); - assert_eq!(config.network.discovery_port, port2); + assert_eq!( + config + .network + .listen_addrs() + .v4() + .map(|listen_addr| (listen_addr.tcp_port, listen_addr.udp_port)), + Some((tcp4_port, udp4_port)) + ); + + assert_eq!( + config + .network + .listen_addrs() + .v6() + .map(|listen_addr| (listen_addr.tcp_port, listen_addr.udp_port)), + Some((tcp6_port, udp6_port)) + ); }); } + #[test] fn disable_discovery_flag() { CommandLineTest::new() @@ -890,6 +1080,13 @@ fn disable_discovery_flag() { .with_config(|config| assert!(config.network.disable_discovery)); } #[test] +fn disable_peer_scoring_flag() { + CommandLineTest::new() + .flag("disable-peer-scoring", None) + .run_with_zero_port() + .with_config(|config| assert!(config.network.disable_peer_scoring)); +} +#[test] fn disable_upnp_flag() { CommandLineTest::new() .flag("disable-upnp", None) @@ -897,6 +1094,19 @@ fn disable_upnp_flag() { .with_config(|config| assert!(!config.network.upnp_enabled)); } #[test] +fn disable_backfill_rate_limiting_flag() { + CommandLineTest::new() + .flag("disable-backfill-rate-limiting", None) + .run_with_zero_port() + .with_config(|config| assert!(!config.chain.enable_backfill_rate_limiting)); +} +#[test] +fn default_backfill_rate_limiting_flag() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| assert!(config.chain.enable_backfill_rate_limiting)); +} +#[test] fn default_boot_nodes() { let mainnet = vec![ // Lighthouse Team (Sigma Prime) @@ -986,7 +1196,6 @@ fn zero_ports_flag() { CommandLineTest::new() .run_with_zero_port() .with_config(|config| { - assert_eq!(config.network.enr_address, None); assert_eq!(config.http_api.listen_port, 0); assert_eq!(config.http_metrics.listen_port, 0); }); @@ -1003,67 +1212,171 @@ fn network_load_flag() { // Tests for ENR flags. #[test] -fn enr_udp_port_flags() { - let port = unused_udp_port().expect("Unable to find unused port."); +fn enr_udp_port_flag() { + let port = unused_udp4_port().expect("Unable to find unused port."); CommandLineTest::new() .flag("enr-udp-port", Some(port.to_string().as_str())) .run_with_zero_port() - .with_config(|config| assert_eq!(config.network.enr_udp_port, Some(port))); + .with_config(|config| assert_eq!(config.network.enr_udp4_port, Some(port))); } #[test] -fn enr_tcp_port_flags() { - let port = unused_tcp_port().expect("Unable to find unused port."); +fn enr_tcp_port_flag() { + let port = unused_tcp4_port().expect("Unable to find unused port."); CommandLineTest::new() .flag("enr-tcp-port", Some(port.to_string().as_str())) .run_with_zero_port() - .with_config(|config| assert_eq!(config.network.enr_tcp_port, Some(port))); + .with_config(|config| assert_eq!(config.network.enr_tcp4_port, Some(port))); +} +#[test] +fn enr_udp6_port_flag() { + let port = unused_udp6_port().expect("Unable to find unused port."); + CommandLineTest::new() + .flag("enr-udp6-port", Some(port.to_string().as_str())) + .run_with_zero_port() + .with_config(|config| assert_eq!(config.network.enr_udp6_port, Some(port))); } #[test] -fn enr_match_flag() { - let addr = "127.0.0.2".parse::().unwrap(); - let port1 = unused_udp_port().expect("Unable to find unused port."); - let port2 = unused_udp_port().expect("Unable to find unused port."); +fn enr_tcp6_port_flag() { + let port = unused_tcp6_port().expect("Unable to find unused port."); + CommandLineTest::new() + .flag("enr-tcp6-port", Some(port.to_string().as_str())) + .run_with_zero_port() + .with_config(|config| assert_eq!(config.network.enr_tcp6_port, Some(port))); +} +#[test] +fn enr_match_flag_over_ipv4() { + let addr = "127.0.0.2".parse::().unwrap(); + let udp4_port = unused_udp4_port().expect("Unable to find unused port."); + let tcp4_port = unused_tcp4_port().expect("Unable to find unused port."); CommandLineTest::new() .flag("enr-match", None) .flag("listen-address", Some("127.0.0.2")) - .flag("discovery-port", Some(port1.to_string().as_str())) - .flag("port", Some(port2.to_string().as_str())) + .flag("discovery-port", Some(udp4_port.to_string().as_str())) + .flag("port", Some(tcp4_port.to_string().as_str())) + .run() + .with_config(|config| { + assert_eq!( + config.network.listen_addrs().v4().map(|listen_addr| ( + listen_addr.addr, + listen_addr.udp_port, + listen_addr.tcp_port + )), + Some((addr, udp4_port, tcp4_port)) + ); + assert_eq!(config.network.enr_address, (Some(addr), None)); + assert_eq!(config.network.enr_udp4_port, Some(udp4_port)); + }); +} +#[test] +fn enr_match_flag_over_ipv6() { + const ADDR: &str = "::1"; + let addr = ADDR.parse::().unwrap(); + let udp6_port = unused_udp6_port().expect("Unable to find unused port."); + let tcp6_port = unused_tcp6_port().expect("Unable to find unused port."); + CommandLineTest::new() + .flag("enr-match", None) + .flag("listen-address", Some(ADDR)) + .flag("discovery-port", Some(udp6_port.to_string().as_str())) + .flag("port", Some(tcp6_port.to_string().as_str())) .run() .with_config(|config| { - assert_eq!(config.network.listen_address, addr); - assert_eq!(config.network.enr_address, Some(addr)); - assert_eq!(config.network.discovery_port, port1); - assert_eq!(config.network.enr_udp_port, Some(port1)); + assert_eq!( + config.network.listen_addrs().v6().map(|listen_addr| ( + listen_addr.addr, + listen_addr.udp_port, + listen_addr.tcp_port + )), + Some((addr, udp6_port, tcp6_port)) + ); + assert_eq!(config.network.enr_address, (None, Some(addr))); + assert_eq!(config.network.enr_udp6_port, Some(udp6_port)); + }); +} +#[test] +fn enr_match_flag_over_ipv4_and_ipv6() { + const IPV6_ADDR: &str = "::1"; + let ipv6_addr = IPV6_ADDR.parse::().unwrap(); + let udp6_port = unused_udp6_port().expect("Unable to find unused port."); + let tcp6_port = unused_tcp6_port().expect("Unable to find unused port."); + const IPV4_ADDR: &str = "127.0.0.1"; + let ipv4_addr = IPV4_ADDR.parse::().unwrap(); + let udp4_port = unused_udp4_port().expect("Unable to find unused port."); + let tcp4_port = unused_tcp4_port().expect("Unable to find unused port."); + CommandLineTest::new() + .flag("enr-match", None) + .flag("listen-address", Some(IPV4_ADDR)) + .flag("discovery-port", Some(udp4_port.to_string().as_str())) + .flag("port", Some(tcp4_port.to_string().as_str())) + .flag("listen-address", Some(IPV6_ADDR)) + .flag("discovery-port6", Some(udp6_port.to_string().as_str())) + .flag("port6", Some(tcp6_port.to_string().as_str())) + .run() + .with_config(|config| { + assert_eq!( + config.network.listen_addrs().v6().map(|listen_addr| ( + listen_addr.addr, + listen_addr.udp_port, + listen_addr.tcp_port + )), + Some((ipv6_addr, udp6_port, tcp6_port)) + ); + assert_eq!( + config.network.listen_addrs().v4().map(|listen_addr| ( + listen_addr.addr, + listen_addr.udp_port, + listen_addr.tcp_port + )), + Some((ipv4_addr, udp4_port, tcp4_port)) + ); + assert_eq!( + config.network.enr_address, + (Some(ipv4_addr), Some(ipv6_addr)) + ); + assert_eq!(config.network.enr_udp6_port, Some(udp6_port)); + assert_eq!(config.network.enr_udp4_port, Some(udp4_port)); + }); +} +#[test] +fn enr_address_flag_with_ipv4() { + let addr = "192.167.1.1".parse::().unwrap(); + let port = unused_udp4_port().expect("Unable to find unused port."); + CommandLineTest::new() + .flag("enr-address", Some("192.167.1.1")) + .flag("enr-udp-port", Some(port.to_string().as_str())) + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.network.enr_address, (Some(addr), None)); + assert_eq!(config.network.enr_udp4_port, Some(port)); }); } #[test] -fn enr_address_flag() { - let addr = "192.167.1.1".parse::().unwrap(); - let port = unused_udp_port().expect("Unable to find unused port."); +fn enr_address_flag_with_ipv6() { + let addr = "192.167.1.1".parse::().unwrap(); + let port = unused_udp4_port().expect("Unable to find unused port."); CommandLineTest::new() .flag("enr-address", Some("192.167.1.1")) .flag("enr-udp-port", Some(port.to_string().as_str())) .run_with_zero_port() .with_config(|config| { - assert_eq!(config.network.enr_address, Some(addr)); - assert_eq!(config.network.enr_udp_port, Some(port)); + assert_eq!(config.network.enr_address, (Some(addr), None)); + assert_eq!(config.network.enr_udp4_port, Some(port)); }); } #[test] fn enr_address_dns_flag() { - let addr = "127.0.0.1".parse::().unwrap(); - let ipv6addr = "::1".parse::().unwrap(); - let port = unused_udp_port().expect("Unable to find unused port."); + let addr = Ipv4Addr::LOCALHOST; + let ipv6addr = Ipv6Addr::LOCALHOST; + let port = unused_udp4_port().expect("Unable to find unused port."); CommandLineTest::new() .flag("enr-address", Some("localhost")) .flag("enr-udp-port", Some(port.to_string().as_str())) .run_with_zero_port() .with_config(|config| { assert!( - config.network.enr_address == Some(addr) - || config.network.enr_address == Some(ipv6addr) + config.network.enr_address.0 == Some(addr) + || config.network.enr_address.1 == Some(ipv6addr) ); - assert_eq!(config.network.enr_udp_port, Some(port)); + assert_eq!(config.network.enr_udp4_port, Some(port)); }); } #[test] @@ -1100,8 +1413,8 @@ fn http_address_ipv6_flag() { } #[test] fn http_port_flag() { - let port1 = unused_tcp_port().expect("Unable to find unused port."); - let port2 = unused_tcp_port().expect("Unable to find unused port."); + let port1 = unused_tcp4_port().expect("Unable to find unused port."); + let port2 = unused_tcp4_port().expect("Unable to find unused port."); CommandLineTest::new() .flag("http-port", Some(port1.to_string().as_str())) .flag("port", Some(port2.to_string().as_str())) @@ -1215,8 +1528,8 @@ fn metrics_address_ipv6_flag() { } #[test] fn metrics_port_flag() { - let port1 = unused_tcp_port().expect("Unable to find unused port."); - let port2 = unused_tcp_port().expect("Unable to find unused port."); + let port1 = unused_tcp4_port().expect("Unable to find unused port."); + let port2 = unused_tcp4_port().expect("Unable to find unused port."); CommandLineTest::new() .flag("metrics", None) .flag("metrics-port", Some(port1.to_string().as_str())) @@ -1610,6 +1923,10 @@ fn enable_proposer_re_orgs_default() { config.chain.re_org_max_epochs_since_finalization, DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, ); + assert_eq!( + config.chain.re_org_cutoff(12), + Duration::from_secs(12) / DEFAULT_RE_ORG_CUTOFF_DENOMINATOR + ); }); } @@ -1642,6 +1959,49 @@ fn proposer_re_org_max_epochs_since_finalization() { }); } +#[test] +fn proposer_re_org_cutoff() { + CommandLineTest::new() + .flag("proposer-reorg-cutoff", Some("500")) + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.chain.re_org_cutoff(12), Duration::from_millis(500)) + }); +} + +#[test] +fn proposer_re_org_disallowed_offsets_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.chain.re_org_disallowed_offsets, + DisallowedReOrgOffsets::new::(vec![0]).unwrap() + ) + }); +} + +#[test] +fn proposer_re_org_disallowed_offsets_override() { + CommandLineTest::new() + .flag("--proposer-reorg-disallowed-offsets", Some("1,2,3")) + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.chain.re_org_disallowed_offsets, + DisallowedReOrgOffsets::new::(vec![1, 2, 3]).unwrap() + ) + }); +} + +#[test] +#[should_panic] +fn proposer_re_org_disallowed_offsets_invalid() { + CommandLineTest::new() + .flag("--proposer-reorg-disallowed-offsets", Some("32,33,34")) + .run_with_zero_port(); +} + #[test] fn monitoring_endpoint() { CommandLineTest::new() diff --git a/lighthouse/tests/boot_node.rs b/lighthouse/tests/boot_node.rs index 8c000bbb3d4..4dd5ad95dd4 100644 --- a/lighthouse/tests/boot_node.rs +++ b/lighthouse/tests/boot_node.rs @@ -12,7 +12,7 @@ use std::path::{Path, PathBuf}; use std::process::Command; use std::str::FromStr; use tempfile::TempDir; -use unused_port::unused_udp_port; +use unused_port::unused_udp4_port; const IP_ADDRESS: &str = "192.168.2.108"; @@ -62,7 +62,7 @@ fn enr_address_arg() { #[test] fn port_flag() { - let port = unused_udp_port().unwrap(); + let port = unused_udp4_port().unwrap(); CommandLineTest::new() .flag("port", Some(port.to_string().as_str())) .run_with_ip() @@ -122,7 +122,7 @@ fn boot_nodes_flag() { #[test] fn enr_port_flag() { - let port = unused_udp_port().unwrap(); + let port = unused_udp4_port().unwrap(); CommandLineTest::new() .flag("enr-port", Some(port.to_string().as_str())) .run_with_ip() diff --git a/scripts/local_testnet/README.md b/scripts/local_testnet/README.md index c9fb3876813..c4050ac934e 100644 --- a/scripts/local_testnet/README.md +++ b/scripts/local_testnet/README.md @@ -20,7 +20,7 @@ Modify `vars.env` as desired. Start a local eth1 ganache server plus boot node along with `BN_COUNT` number of beacon nodes and `VC_COUNT` validator clients. -The `start_local_testnet.sh` script takes three options `-v VC_COUNT`, `-d DEBUG_LEVEL` and `-h` for help. +The `start_local_testnet.sh` script takes four options `-v VC_COUNT`, `-d DEBUG_LEVEL`, `-p` to enable builder proposals and `-h` for help. The options may be in any order or absent in which case they take the default value specified. - VC_COUNT: the number of validator clients to create, default: `BN_COUNT` - DEBUG_LEVEL: one of { error, warn, info, debug, trace }, default: `info` diff --git a/scripts/local_testnet/start_local_testnet.sh b/scripts/local_testnet/start_local_testnet.sh index dcc0a5382a9..e3aba5c3add 100755 --- a/scripts/local_testnet/start_local_testnet.sh +++ b/scripts/local_testnet/start_local_testnet.sh @@ -28,7 +28,7 @@ while getopts "v:d:ph" flag; do echo "Options:" echo " -v: VC_COUNT default: $VC_COUNT" echo " -d: DEBUG_LEVEL default: info" - echo " -p: enable private tx proposals" + echo " -p: enable builder proposals" echo " -h: this help" exit ;; diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index c5ce8793ad4..7f2ac456b57 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -12,8 +12,8 @@ lmdb = ["lmdb-rkv", "lmdb-rkv-sys"] [dependencies] bincode = "1.3.1" byteorder = "1.3.4" -eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.1" +ethereum_ssz = "0.5.0" +ethereum_ssz_derive = "0.5.0" flate2 = { version = "1.0.14", features = ["zlib"], default-features = false } lazy_static = "1.4.0" lighthouse_metrics = { path = "../common/lighthouse_metrics" } @@ -26,8 +26,8 @@ serde = "1.0" serde_derive = "1.0" slog = "2.5.2" sloggers = { version = "2.1.1", features = ["json"] } -tree_hash = "0.4.1" -tree_hash_derive = "0.4.0" +tree_hash = "0.5.0" +tree_hash_derive = "0.5.0" types = { path = "../consensus/types" } strum = { version = "0.24.1", features = ["derive"] } diff --git a/testing/antithesis/Dockerfile.libvoidstar b/testing/antithesis/Dockerfile.libvoidstar index bae18073297..ddc49e13cd7 100644 --- a/testing/antithesis/Dockerfile.libvoidstar +++ b/testing/antithesis/Dockerfile.libvoidstar @@ -1,4 +1,4 @@ -FROM rust:1.66.1-bullseye AS builder +FROM rust:1.68.2-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler COPY . lighthouse diff --git a/testing/ef_tests/Cargo.toml b/testing/ef_tests/Cargo.toml index 79664a26228..11283052f07 100644 --- a/testing/ef_tests/Cargo.toml +++ b/testing/ef_tests/Cargo.toml @@ -22,10 +22,10 @@ serde = "1.0.116" serde_derive = "1.0.116" serde_repr = "0.1.6" serde_yaml = "0.8.13" -eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.1" -tree_hash = "0.4.1" -tree_hash_derive = "0.4.0" +ethereum_ssz = "0.5.0" +ethereum_ssz_derive = "0.5.0" +tree_hash = "0.5.0" +tree_hash_derive = "0.5.0" cached_tree_hash = { path = "../../consensus/cached_tree_hash" } state_processing = { path = "../../consensus/state_processing" } swap_or_not_shuffle = { path = "../../consensus/swap_or_not_shuffle" } diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index fc3dea6e2f5..f7562f477a2 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.3.0-rc.3 +TESTS_TAG := v1.3.0-rc.4 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 31165d6329c..4f5d9983012 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -45,7 +45,6 @@ pub struct Checks { justified_checkpoint: Option, justified_checkpoint_root: Option, finalized_checkpoint: Option, - best_justified_checkpoint: Option, u_justified_checkpoint: Option, u_finalized_checkpoint: Option, proposer_boost_root: Option, @@ -229,7 +228,6 @@ impl Case for ForkChoiceTest { justified_checkpoint, justified_checkpoint_root, finalized_checkpoint, - best_justified_checkpoint, u_justified_checkpoint, u_finalized_checkpoint, proposer_boost_root, @@ -260,11 +258,6 @@ impl Case for ForkChoiceTest { tester.check_finalized_checkpoint(*expected_finalized_checkpoint)?; } - if let Some(expected_best_justified_checkpoint) = best_justified_checkpoint { - tester - .check_best_justified_checkpoint(*expected_best_justified_checkpoint)?; - } - if let Some(expected_u_justified_checkpoint) = u_justified_checkpoint { tester.check_u_justified_checkpoint(*expected_u_justified_checkpoint)?; } @@ -378,7 +371,7 @@ impl Tester { .chain .canonical_head .fork_choice_write_lock() - .update_time(slot, &self.spec) + .update_time(slot) .unwrap(); } @@ -388,7 +381,7 @@ impl Tester { let result = self.block_on_dangerous(self.harness.chain.process_block( block_root, block.clone(), - CountUnrealized::False, + CountUnrealized::True, NotifyExecutionLayer::Yes, ))?; if result.is_ok() != valid { @@ -432,7 +425,7 @@ impl Tester { .harness .chain .slot_clock - .seconds_from_current_slot_start(self.spec.seconds_per_slot) + .seconds_from_current_slot_start() .unwrap(); let result = self @@ -448,7 +441,7 @@ impl Tester { &state, PayloadVerificationStatus::Irrelevant, &self.harness.chain.spec, - self.harness.chain.config.count_unrealized.into(), + CountUnrealized::True, ); if result.is_ok() { @@ -576,23 +569,6 @@ impl Tester { check_equal("finalized_checkpoint", fc_checkpoint, expected_checkpoint) } - pub fn check_best_justified_checkpoint( - &self, - expected_checkpoint: Checkpoint, - ) -> Result<(), Error> { - let best_justified_checkpoint = self - .harness - .chain - .canonical_head - .fork_choice_read_lock() - .best_justified_checkpoint(); - check_equal( - "best_justified_checkpoint", - best_justified_checkpoint, - expected_checkpoint, - ) - } - pub fn check_u_justified_checkpoint( &self, expected_checkpoint: Checkpoint, diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index abf18b3506d..2ed596e25e4 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -547,6 +547,11 @@ impl Handler for ForkChoiceHandler { return false; } + // Tests are no longer generated for the base/phase0 specification. + if fork_name == ForkName::Base { + return false; + } + // These tests check block validity (which may include signatures) and there is no need to // run them with fake crypto. cfg!(not(feature = "fake_crypto")) diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 8a7209b89b1..33f8d67ec00 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -503,6 +503,18 @@ fn fork_choice_ex_ante() { ForkChoiceHandler::::new("ex_ante").run(); } +#[test] +fn fork_choice_reorg() { + ForkChoiceHandler::::new("reorg").run(); + // There is no mainnet variant for this test. +} + +#[test] +fn fork_choice_withholding() { + ForkChoiceHandler::::new("withholding").run(); + // There is no mainnet variant for this test. +} + #[test] fn optimistic_sync() { OptimisticSyncHandler::::default().run(); diff --git a/testing/eth1_test_rig/src/ganache.rs b/testing/eth1_test_rig/src/ganache.rs index d8df3fd8aeb..898a089ba01 100644 --- a/testing/eth1_test_rig/src/ganache.rs +++ b/testing/eth1_test_rig/src/ganache.rs @@ -3,7 +3,7 @@ use std::io::prelude::*; use std::io::BufReader; use std::process::{Child, Command, Stdio}; use std::time::{Duration, Instant}; -use unused_port::unused_tcp_port; +use unused_port::unused_tcp4_port; use web3::{transports::Http, Transport, Web3}; /// How long we will wait for ganache to indicate that it is ready. @@ -65,7 +65,7 @@ impl GanacheInstance { /// Start a new `ganache` process, waiting until it indicates that it is ready to accept /// RPC connections. pub fn new(chain_id: u64) -> Result { - let port = unused_tcp_port()?; + let port = unused_tcp4_port()?; let binary = match cfg!(windows) { true => "ganache.cmd", false => "ganache", @@ -97,7 +97,7 @@ impl GanacheInstance { } pub fn fork(&self) -> Result { - let port = unused_tcp_port()?; + let port = unused_tcp4_port()?; let binary = match cfg!(windows) { true => "ganache.cmd", false => "ganache", diff --git a/testing/execution_engine_integration/src/execution_engine.rs b/testing/execution_engine_integration/src/execution_engine.rs index ad5af531586..61a50b0405e 100644 --- a/testing/execution_engine_integration/src/execution_engine.rs +++ b/testing/execution_engine_integration/src/execution_engine.rs @@ -4,7 +4,7 @@ use sensitive_url::SensitiveUrl; use std::path::PathBuf; use std::process::Child; use tempfile::TempDir; -use unused_port::unused_tcp_port; +use unused_port::unused_tcp4_port; pub const KEYSTORE_PASSWORD: &str = "testpwd"; pub const ACCOUNT1: &str = "7b8C3a386C0eea54693fFB0DA17373ffC9228139"; @@ -50,8 +50,8 @@ impl ExecutionEngine { pub fn new(engine: E) -> Self { let datadir = E::init_datadir(); let jwt_secret_path = datadir.path().join(DEFAULT_JWT_FILE); - let http_port = unused_tcp_port().unwrap(); - let http_auth_port = unused_tcp_port().unwrap(); + let http_port = unused_tcp4_port().unwrap(); + let http_auth_port = unused_tcp4_port().unwrap(); let child = E::start_client(&datadir, http_port, http_auth_port, jwt_secret_path); let provider = Provider::::try_from(format!("http://localhost:{}", http_port)) .expect("failed to instantiate ethers provider"); diff --git a/testing/execution_engine_integration/src/geth.rs b/testing/execution_engine_integration/src/geth.rs index 1b96fa9f3f9..5c83a97e21f 100644 --- a/testing/execution_engine_integration/src/geth.rs +++ b/testing/execution_engine_integration/src/geth.rs @@ -5,7 +5,7 @@ use std::path::{Path, PathBuf}; use std::process::{Child, Command, Output}; use std::{env, fs::File}; use tempfile::TempDir; -use unused_port::unused_tcp_port; +use unused_port::unused_tcp4_port; const GETH_BRANCH: &str = "master"; const GETH_REPO_URL: &str = "https://github.com/ethereum/go-ethereum"; @@ -83,7 +83,7 @@ impl GenericExecutionEngine for GethEngine { http_auth_port: u16, jwt_secret_path: PathBuf, ) -> Child { - let network_port = unused_tcp_port().unwrap(); + let network_port = unused_tcp4_port().unwrap(); Command::new(Self::binary_path()) .arg("--datadir") diff --git a/testing/execution_engine_integration/src/nethermind.rs b/testing/execution_engine_integration/src/nethermind.rs index 740d87ab8ae..485485c6fe3 100644 --- a/testing/execution_engine_integration/src/nethermind.rs +++ b/testing/execution_engine_integration/src/nethermind.rs @@ -6,12 +6,12 @@ use std::fs::File; use std::path::{Path, PathBuf}; use std::process::{Child, Command, Output}; use tempfile::TempDir; -use unused_port::unused_tcp_port; +use unused_port::unused_tcp4_port; /// We've pinned the Nethermind version since our method of using the `master` branch to /// find the latest tag isn't working. It appears Nethermind don't always tag on `master`. /// We should fix this so we always pull the latest version of Nethermind. -const NETHERMIND_BRANCH: &str = "release/1.14.6"; +const NETHERMIND_BRANCH: &str = "release/1.17.1"; const NETHERMIND_REPO_URL: &str = "https://github.com/NethermindEth/nethermind"; fn build_result(repo_dir: &Path) -> Output { @@ -67,7 +67,7 @@ impl NethermindEngine { .join("Nethermind.Runner") .join("bin") .join("Release") - .join("net6.0") + .join("net7.0") .join("Nethermind.Runner") } } @@ -88,14 +88,14 @@ impl GenericExecutionEngine for NethermindEngine { http_auth_port: u16, jwt_secret_path: PathBuf, ) -> Child { - let network_port = unused_tcp_port().unwrap(); + let network_port = unused_tcp4_port().unwrap(); let genesis_json_path = datadir.path().join("genesis.json"); Command::new(Self::binary_path()) .arg("--datadir") .arg(datadir.path().to_str().unwrap()) .arg("--config") - .arg("kiln") + .arg("hive") .arg("--Init.ChainSpecPath") .arg(genesis_json_path.to_str().unwrap()) .arg("--Merge.TerminalTotalDifficulty") diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 15e9f26018f..726019a8480 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -15,8 +15,8 @@ use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; use task_executor::TaskExecutor; use tokio::time::sleep; use types::{ - Address, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, ForkName, FullPayload, - Hash256, MainnetEthSpec, PublicKeyBytes, Slot, Uint256, + Address, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, + ForkName, FullPayload, Hash256, MainnetEthSpec, PublicKeyBytes, Slot, Uint256, }; const EXECUTION_ENGINE_START_TIMEOUT: Duration = Duration::from_secs(30); @@ -626,14 +626,35 @@ async fn check_payload_reconstruction( ee: &ExecutionPair, payload: &ExecutionPayload, ) { + // check via legacy eth_getBlockByHash let reconstructed = ee .execution_layer - // FIXME: handle other forks here? - .get_payload_by_block_hash(payload.block_hash(), ForkName::Merge) + .get_payload_by_hash_legacy(payload.block_hash(), payload.fork_name()) .await .unwrap() .unwrap(); assert_eq!(reconstructed, *payload); + // also check via payload bodies method + let capabilities = ee + .execution_layer + .get_engine_capabilities(None) + .await + .unwrap(); + assert!( + // if the engine doesn't have these capabilities, we need to update the client in our tests + capabilities.get_payload_bodies_by_hash_v1 && capabilities.get_payload_bodies_by_range_v1, + "Testing engine does not support payload bodies methods" + ); + let mut bodies = ee + .execution_layer + .get_payload_bodies_by_hash(vec![payload.block_hash()]) + .await + .unwrap(); + assert_eq!(bodies.len(), 1); + let body = bodies.pop().unwrap().unwrap(); + let header = ExecutionPayloadHeader::from(payload.to_ref()); + let reconstructed_from_body = body.to_payload(header).unwrap(); + assert_eq!(reconstructed_from_body, *payload); } /// Returns the duration since the unix epoch. diff --git a/testing/node_test_rig/src/lib.rs b/testing/node_test_rig/src/lib.rs index 82a60cda2f2..d4fd115bec3 100644 --- a/testing/node_test_rig/src/lib.rs +++ b/testing/node_test_rig/src/lib.rs @@ -89,8 +89,9 @@ pub fn testing_client_config() -> ClientConfig { let mut client_config = ClientConfig::default(); // Setting ports to `0` means that the OS will choose some available port. - client_config.network.libp2p_port = 0; - client_config.network.discovery_port = 0; + client_config + .network + .set_ipv4_listening_address(std::net::Ipv4Addr::UNSPECIFIED, 0, 0); client_config.network.upnp_enabled = false; client_config.http_api.enabled = true; client_config.http_api.listen_port = 0; diff --git a/testing/simulator/src/cli.rs b/testing/simulator/src/cli.rs index f1196502fbd..9668ee8cb44 100644 --- a/testing/simulator/src/cli.rs +++ b/testing/simulator/src/cli.rs @@ -24,6 +24,12 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .takes_value(true) .default_value("4") .help("Number of beacon nodes")) + .arg(Arg::with_name("proposer-nodes") + .short("n") + .long("nodes") + .takes_value(true) + .default_value("2") + .help("Number of proposer-only beacon nodes")) .arg(Arg::with_name("validators_per_node") .short("v") .long("validators_per_node") @@ -57,6 +63,12 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .takes_value(true) .default_value("4") .help("Number of beacon nodes")) + .arg(Arg::with_name("proposer-nodes") + .short("n") + .long("nodes") + .takes_value(true) + .default_value("2") + .help("Number of proposer-only beacon nodes")) .arg(Arg::with_name("validators_per_node") .short("v") .long("validators_per_node") diff --git a/testing/simulator/src/eth1_sim.rs b/testing/simulator/src/eth1_sim.rs index a7dcd30f040..43861b0cf95 100644 --- a/testing/simulator/src/eth1_sim.rs +++ b/testing/simulator/src/eth1_sim.rs @@ -13,7 +13,7 @@ use node_test_rig::{ use rayon::prelude::*; use sensitive_url::SensitiveUrl; use std::cmp::max; -use std::net::{IpAddr, Ipv4Addr}; +use std::net::Ipv4Addr; use std::time::Duration; use tokio::time::sleep; use types::{Epoch, EthSpec, MinimalEthSpec}; @@ -27,6 +27,8 @@ const SUGGESTED_FEE_RECIPIENT: [u8; 20] = pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { let node_count = value_t!(matches, "nodes", usize).expect("missing nodes default"); + let proposer_nodes = value_t!(matches, "proposer-nodes", usize).unwrap_or(0); + println!("PROPOSER-NODES: {}", proposer_nodes); let validators_per_node = value_t!(matches, "validators_per_node", usize) .expect("missing validators_per_node default"); let speed_up_factor = @@ -35,7 +37,8 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { let post_merge_sim = matches.is_present("post-merge"); println!("Beacon Chain Simulator:"); - println!(" nodes:{}", node_count); + println!(" nodes:{}, proposer_nodes: {}", node_count, proposer_nodes); + println!(" validators_per_node:{}", validators_per_node); println!(" post merge simulation:{}", post_merge_sim); println!(" continue_after_checks:{}", continue_after_checks); @@ -148,9 +151,9 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { beacon_config.sync_eth1_chain = true; beacon_config.eth1.auto_update_interval_millis = eth1_block_time.as_millis() as u64; beacon_config.eth1.chain_id = Eth1Id::from(chain_id); - beacon_config.network.target_peers = node_count - 1; + beacon_config.network.target_peers = node_count + proposer_nodes - 1; - beacon_config.network.enr_address = Some(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1))); + beacon_config.network.enr_address = (Some(Ipv4Addr::LOCALHOST), None); if post_merge_sim { let el_config = execution_layer::Config { @@ -174,7 +177,17 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { * One by one, add beacon nodes to the network. */ for _ in 0..node_count - 1 { - network.add_beacon_node(beacon_config.clone()).await?; + network + .add_beacon_node(beacon_config.clone(), false) + .await?; + } + + /* + * One by one, add proposer nodes to the network. + */ + for _ in 0..proposer_nodes - 1 { + println!("Adding a proposer node"); + network.add_beacon_node(beacon_config.clone(), true).await?; } /* @@ -311,7 +324,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { */ println!( "Simulation complete. Finished with {} beacon nodes and {} validator clients", - network.beacon_node_count(), + network.beacon_node_count() + network.proposer_node_count(), network.validator_client_count() ); diff --git a/testing/simulator/src/local_network.rs b/testing/simulator/src/local_network.rs index 8df912ed161..e35870d126c 100644 --- a/testing/simulator/src/local_network.rs +++ b/testing/simulator/src/local_network.rs @@ -25,6 +25,7 @@ pub const TERMINAL_BLOCK: u64 = 64; pub struct Inner { pub context: RuntimeContext, pub beacon_nodes: RwLock>>, + pub proposer_nodes: RwLock>>, pub validator_clients: RwLock>>, pub execution_nodes: RwLock>>, } @@ -58,10 +59,13 @@ impl LocalNetwork { context: RuntimeContext, mut beacon_config: ClientConfig, ) -> Result { - beacon_config.network.discovery_port = BOOTNODE_PORT; - beacon_config.network.libp2p_port = BOOTNODE_PORT; - beacon_config.network.enr_udp_port = Some(BOOTNODE_PORT); - beacon_config.network.enr_tcp_port = Some(BOOTNODE_PORT); + beacon_config.network.set_ipv4_listening_address( + std::net::Ipv4Addr::UNSPECIFIED, + BOOTNODE_PORT, + BOOTNODE_PORT, + ); + beacon_config.network.enr_udp4_port = Some(BOOTNODE_PORT); + beacon_config.network.enr_tcp4_port = Some(BOOTNODE_PORT); beacon_config.network.discv5_config.table_filter = |_| true; let execution_node = if let Some(el_config) = &mut beacon_config.execution_layer { @@ -94,6 +98,7 @@ impl LocalNetwork { inner: Arc::new(Inner { context, beacon_nodes: RwLock::new(vec![beacon_node]), + proposer_nodes: RwLock::new(vec![]), execution_nodes: RwLock::new(execution_node), validator_clients: RwLock::new(vec![]), }), @@ -108,6 +113,14 @@ impl LocalNetwork { self.beacon_nodes.read().len() } + /// Returns the number of proposer nodes in the network. + /// + /// Note: does not count nodes that are external to this `LocalNetwork` that may have connected + /// (e.g., another Lighthouse process on the same machine.) + pub fn proposer_node_count(&self) -> usize { + self.proposer_nodes.read().len() + } + /// Returns the number of validator clients in the network. /// /// Note: does not count nodes that are external to this `LocalNetwork` that may have connected @@ -117,7 +130,11 @@ impl LocalNetwork { } /// Adds a beacon node to the network, connecting to the 0'th beacon node via ENR. - pub async fn add_beacon_node(&self, mut beacon_config: ClientConfig) -> Result<(), String> { + pub async fn add_beacon_node( + &self, + mut beacon_config: ClientConfig, + is_proposer: bool, + ) -> Result<(), String> { let self_1 = self.clone(); let count = self.beacon_node_count() as u16; println!("Adding beacon node.."); @@ -132,11 +149,16 @@ impl LocalNetwork { .enr() .expect("bootnode must have a network"), ); - beacon_config.network.discovery_port = BOOTNODE_PORT + count; - beacon_config.network.libp2p_port = BOOTNODE_PORT + count; - beacon_config.network.enr_udp_port = Some(BOOTNODE_PORT + count); - beacon_config.network.enr_tcp_port = Some(BOOTNODE_PORT + count); + let count = (self.beacon_node_count() + self.proposer_node_count()) as u16; + beacon_config.network.set_ipv4_listening_address( + std::net::Ipv4Addr::UNSPECIFIED, + BOOTNODE_PORT + count, + BOOTNODE_PORT + count, + ); + beacon_config.network.enr_udp4_port = Some(BOOTNODE_PORT + count); + beacon_config.network.enr_tcp4_port = Some(BOOTNODE_PORT + count); beacon_config.network.discv5_config.table_filter = |_| true; + beacon_config.network.proposer_only = is_proposer; } if let Some(el_config) = &mut beacon_config.execution_layer { let config = MockExecutionConfig { @@ -167,7 +189,11 @@ impl LocalNetwork { beacon_config, ) .await?; - self_1.beacon_nodes.write().push(beacon_node); + if is_proposer { + self_1.proposer_nodes.write().push(beacon_node); + } else { + self_1.beacon_nodes.write().push(beacon_node); + } Ok(()) } @@ -194,6 +220,16 @@ impl LocalNetwork { .http_api_listen_addr() .expect("Must have http started") }; + // If there is a proposer node for the same index, we will use that for proposing + let proposer_socket_addr = { + let read_lock = self.proposer_nodes.read(); + read_lock.get(beacon_node).map(|proposer_node| { + proposer_node + .client + .http_api_listen_addr() + .expect("Must have http started") + }) + }; let beacon_node = SensitiveUrl::parse( format!("http://{}:{}", socket_addr.ip(), socket_addr.port()).as_str(), @@ -204,6 +240,21 @@ impl LocalNetwork { } else { vec![beacon_node] }; + + // If we have a proposer node established, use it. + if let Some(proposer_socket_addr) = proposer_socket_addr { + let url = SensitiveUrl::parse( + format!( + "http://{}:{}", + proposer_socket_addr.ip(), + proposer_socket_addr.port() + ) + .as_str(), + ) + .unwrap(); + validator_config.proposer_nodes = vec![url]; + } + let validator_client = LocalValidatorClient::production_with_insecure_keypairs( context, validator_config, @@ -217,9 +268,11 @@ impl LocalNetwork { /// For all beacon nodes in `Self`, return a HTTP client to access each nodes HTTP API. pub fn remote_nodes(&self) -> Result, String> { let beacon_nodes = self.beacon_nodes.read(); + let proposer_nodes = self.proposer_nodes.read(); beacon_nodes .iter() + .chain(proposer_nodes.iter()) .map(|beacon_node| beacon_node.remote_node()) .collect() } diff --git a/testing/simulator/src/no_eth1_sim.rs b/testing/simulator/src/no_eth1_sim.rs index 8cf0ccd793a..fc18b1cd489 100644 --- a/testing/simulator/src/no_eth1_sim.rs +++ b/testing/simulator/src/no_eth1_sim.rs @@ -7,7 +7,7 @@ use node_test_rig::{ }; use rayon::prelude::*; use std::cmp::max; -use std::net::{IpAddr, Ipv4Addr}; +use std::net::Ipv4Addr; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use tokio::time::sleep; use types::{Epoch, EthSpec, MainnetEthSpec}; @@ -92,7 +92,7 @@ pub fn run_no_eth1_sim(matches: &ArgMatches) -> Result<(), String> { beacon_config.dummy_eth1_backend = true; beacon_config.sync_eth1_chain = true; - beacon_config.network.enr_address = Some(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1))); + beacon_config.network.enr_address = (Some(Ipv4Addr::LOCALHOST), None); let main_future = async { let network = LocalNetwork::new(context.clone(), beacon_config.clone()).await?; @@ -101,7 +101,9 @@ pub fn run_no_eth1_sim(matches: &ArgMatches) -> Result<(), String> { */ for _ in 0..node_count - 1 { - network.add_beacon_node(beacon_config.clone()).await?; + network + .add_beacon_node(beacon_config.clone(), false) + .await?; } /* @@ -152,7 +154,7 @@ pub fn run_no_eth1_sim(matches: &ArgMatches) -> Result<(), String> { */ println!( "Simulation complete. Finished with {} beacon nodes and {} validator clients", - network.beacon_node_count(), + network.beacon_node_count() + network.proposer_node_count(), network.validator_client_count() ); diff --git a/testing/simulator/src/sync_sim.rs b/testing/simulator/src/sync_sim.rs index 1d15e531db7..78f7e1ee9fb 100644 --- a/testing/simulator/src/sync_sim.rs +++ b/testing/simulator/src/sync_sim.rs @@ -8,7 +8,7 @@ use node_test_rig::{ }; use node_test_rig::{testing_validator_config, ClientConfig}; use std::cmp::max; -use std::net::{IpAddr, Ipv4Addr}; +use std::net::Ipv4Addr; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use types::{Epoch, EthSpec}; @@ -96,7 +96,7 @@ fn syncing_sim( beacon_config.http_api.allow_sync_stalled = true; - beacon_config.network.enr_address = Some(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1))); + beacon_config.network.enr_address = (Some(Ipv4Addr::LOCALHOST), None); // Generate the directories and keystores required for the validator clients. let validator_indices = (0..num_validators).collect::>(); @@ -229,7 +229,7 @@ pub async fn verify_one_node_sync( ) .await; // Add a beacon node - network.add_beacon_node(beacon_config).await?; + network.add_beacon_node(beacon_config, false).await?; // Check every `epoch_duration` if nodes are synced // limited to at most `sync_timeout` epochs let mut interval = tokio::time::interval(epoch_duration); @@ -266,8 +266,10 @@ pub async fn verify_two_nodes_sync( ) .await; // Add beacon nodes - network.add_beacon_node(beacon_config.clone()).await?; - network.add_beacon_node(beacon_config).await?; + network + .add_beacon_node(beacon_config.clone(), false) + .await?; + network.add_beacon_node(beacon_config, false).await?; // Check every `epoch_duration` if nodes are synced // limited to at most `sync_timeout` epochs let mut interval = tokio::time::interval(epoch_duration); @@ -306,8 +308,10 @@ pub async fn verify_in_between_sync( ) .await; // Add two beacon nodes - network.add_beacon_node(beacon_config.clone()).await?; - network.add_beacon_node(beacon_config).await?; + network + .add_beacon_node(beacon_config.clone(), false) + .await?; + network.add_beacon_node(beacon_config, false).await?; // Delay before adding additional syncing nodes. epoch_delay( Epoch::new(sync_timeout - 5), @@ -316,7 +320,7 @@ pub async fn verify_in_between_sync( ) .await; // Add a beacon node - network.add_beacon_node(config1.clone()).await?; + network.add_beacon_node(config1.clone(), false).await?; // Check every `epoch_duration` if nodes are synced // limited to at most `sync_timeout` epochs let mut interval = tokio::time::interval(epoch_duration); diff --git a/testing/state_transition_vectors/Cargo.toml b/testing/state_transition_vectors/Cargo.toml index 6da9f2f4a6f..a25b3c31c1a 100644 --- a/testing/state_transition_vectors/Cargo.toml +++ b/testing/state_transition_vectors/Cargo.toml @@ -9,7 +9,7 @@ edition = "2021" [dependencies] state_processing = { path = "../../consensus/state_processing" } types = { path = "../../consensus/types" } -eth2_ssz = "0.4.1" +ethereum_ssz = "0.5.0" beacon_chain = { path = "../../beacon_node/beacon_chain" } lazy_static = "1.4.0" tokio = { version = "1.14.0", features = ["rt-multi-thread"] } diff --git a/testing/web3signer_tests/Cargo.toml b/testing/web3signer_tests/Cargo.toml index 8ce58300629..c0fbf667236 100644 --- a/testing/web3signer_tests/Cargo.toml +++ b/testing/web3signer_tests/Cargo.toml @@ -3,8 +3,6 @@ name = "web3signer_tests" version = "0.1.0" edition = "2021" -build = "build.rs" - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] @@ -27,9 +25,7 @@ serde = "1.0.116" serde_derive = "1.0.116" serde_yaml = "0.8.13" eth2_network_config = { path = "../../common/eth2_network_config" } - -[build-dependencies] -tokio = { version = "1.14.0", features = ["rt-multi-thread", "macros"] } -reqwest = { version = "0.11.0", features = ["json","stream"] } serde_json = "1.0.58" zip = "0.5.13" +lazy_static = "1.4.0" +parking_lot = "0.12.0" \ No newline at end of file diff --git a/testing/web3signer_tests/build.rs b/testing/web3signer_tests/src/get_web3signer.rs similarity index 88% rename from testing/web3signer_tests/build.rs rename to testing/web3signer_tests/src/get_web3signer.rs index a55c39376a4..800feb204ae 100644 --- a/testing/web3signer_tests/build.rs +++ b/testing/web3signer_tests/src/get_web3signer.rs @@ -15,17 +15,6 @@ use zip::ZipArchive; /// Use `Some("21.8.1")` to download a specific version. const FIXED_VERSION_STRING: Option<&str> = None; -#[tokio::main] -async fn main() { - let out_dir = env::var("OUT_DIR").unwrap(); - - // Read a Github API token from the environment. This is intended to prevent rate-limits on CI. - // We use a name that is unlikely to accidentally collide with anything the user has configured. - let github_token = env::var("LIGHTHOUSE_GITHUB_TOKEN"); - - download_binary(out_dir.into(), github_token.as_deref().unwrap_or("")).await; -} - pub async fn download_binary(dest_dir: PathBuf, github_token: &str) { let version_file = dest_dir.join("version"); diff --git a/testing/web3signer_tests/src/lib.rs b/testing/web3signer_tests/src/lib.rs index 16bffd04f96..dd17ae23b15 100644 --- a/testing/web3signer_tests/src/lib.rs +++ b/testing/web3signer_tests/src/lib.rs @@ -9,16 +9,21 @@ //! - Lighthouse can issue valid requests to Web3Signer. //! - The signatures generated by Web3Signer are identical to those which Lighthouse generates. //! -//! There is a build script in this crate which obtains the latest version of Web3Signer and makes -//! it available via the `OUT_DIR`. +//! There is a `download_binary` function in the `get_web3signer` module which obtains the latest version of Web3Signer and makes +//! it available via the `TEMP_DIR`. +#![cfg(all(test, unix, not(debug_assertions)))] + +mod get_web3signer; -#[cfg(all(test, unix, not(debug_assertions)))] mod tests { + use crate::get_web3signer::download_binary; use account_utils::validator_definitions::{ SigningDefinition, ValidatorDefinition, ValidatorDefinitions, Web3SignerDefinition, }; use eth2_keystore::KeystoreBuilder; use eth2_network_config::Eth2NetworkConfig; + use lazy_static::lazy_static; + use parking_lot::Mutex; use reqwest::Client; use serde::Serialize; use slot_clock::{SlotClock, TestingSlotClock}; @@ -31,7 +36,8 @@ mod tests { use std::sync::Arc; use std::time::{Duration, Instant}; use task_executor::TaskExecutor; - use tempfile::TempDir; + use tempfile::{tempdir, TempDir}; + use tokio::sync::OnceCell; use tokio::time::sleep; use types::*; use url::Url; @@ -51,6 +57,13 @@ mod tests { /// debugging. const SUPPRESS_WEB3SIGNER_LOGS: bool = true; + lazy_static! { + static ref TEMP_DIR: Arc> = Arc::new(Mutex::new( + tempdir().expect("Failed to create temporary directory") + )); + static ref GET_WEB3SIGNER_BIN: OnceCell<()> = OnceCell::new(); + } + type E = MainnetEthSpec; /// This marker trait is implemented for objects that we wish to compare to ensure Web3Signer @@ -99,7 +112,10 @@ mod tests { /// The location of the Web3Signer binary generated by the build script. fn web3signer_binary() -> PathBuf { - PathBuf::from(env::var("OUT_DIR").unwrap()) + TEMP_DIR + .lock() + .path() + .to_path_buf() .join("web3signer") .join("bin") .join("web3signer") @@ -143,6 +159,19 @@ mod tests { impl Web3SignerRig { pub async fn new(network: &str, listen_address: &str, listen_port: u16) -> Self { + GET_WEB3SIGNER_BIN + .get_or_init(|| async { + // Read a Github API token from the environment. This is intended to prevent rate-limits on CI. + // We use a name that is unlikely to accidentally collide with anything the user has configured. + let github_token = env::var("LIGHTHOUSE_GITHUB_TOKEN"); + download_binary( + TEMP_DIR.lock().path().to_path_buf(), + github_token.as_deref().unwrap_or(""), + ) + .await; + }) + .await; + let keystore_dir = TempDir::new().unwrap(); let keypair = testing_keypair(); let keystore = diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index ce33cb99297..494ebcb3dfc 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -13,7 +13,7 @@ tokio = { version = "1.14.0", features = ["time", "rt-multi-thread", "macros"] } logging = { path = "../common/logging" } [dependencies] -tree_hash = "0.4.1" +tree_hash = "0.5.0" clap = "2.33.3" slashing_protection = { path = "./slashing_protection" } slot_clock = { path = "../common/slot_clock" } @@ -47,7 +47,7 @@ lighthouse_version = { path = "../common/lighthouse_version" } warp_utils = { path = "../common/warp_utils" } warp = "0.3.2" hyper = "0.14.4" -eth2_serde_utils = "0.1.1" +ethereum_serde_utils = "0.5.0" libsecp256k1 = "0.7.0" ring = "0.16.19" rand = { version = "0.8.5", features = ["small_rng"] } diff --git a/validator_client/slashing_protection/Cargo.toml b/validator_client/slashing_protection/Cargo.toml index 631e54dc4eb..278dc22d0de 100644 --- a/validator_client/slashing_protection/Cargo.toml +++ b/validator_client/slashing_protection/Cargo.toml @@ -18,7 +18,7 @@ r2d2_sqlite = "0.21.0" serde = "1.0.116" serde_derive = "1.0.116" serde_json = "1.0.58" -eth2_serde_utils = "0.1.1" +ethereum_serde_utils = "0.5.0" filesystem = { path = "../../common/filesystem" } arbitrary = { version = "1.0", features = ["derive"], optional = true } diff --git a/validator_client/slashing_protection/src/interchange.rs b/validator_client/slashing_protection/src/interchange.rs index 3793766b6aa..99d37c38b9b 100644 --- a/validator_client/slashing_protection/src/interchange.rs +++ b/validator_client/slashing_protection/src/interchange.rs @@ -9,7 +9,7 @@ use types::{Epoch, Hash256, PublicKeyBytes, Slot}; #[serde(deny_unknown_fields)] #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct InterchangeMetadata { - #[serde(with = "eth2_serde_utils::quoted_u64::require_quotes")] + #[serde(with = "serde_utils::quoted_u64::require_quotes")] pub interchange_format_version: u64, pub genesis_validators_root: Hash256, } @@ -27,7 +27,7 @@ pub struct InterchangeData { #[serde(deny_unknown_fields)] #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct SignedBlock { - #[serde(with = "eth2_serde_utils::quoted_u64::require_quotes")] + #[serde(with = "serde_utils::quoted_u64::require_quotes")] pub slot: Slot, #[serde(skip_serializing_if = "Option::is_none")] pub signing_root: Option, @@ -37,9 +37,9 @@ pub struct SignedBlock { #[serde(deny_unknown_fields)] #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct SignedAttestation { - #[serde(with = "eth2_serde_utils::quoted_u64::require_quotes")] + #[serde(with = "serde_utils::quoted_u64::require_quotes")] pub source_epoch: Epoch, - #[serde(with = "eth2_serde_utils::quoted_u64::require_quotes")] + #[serde(with = "serde_utils::quoted_u64::require_quotes")] pub target_epoch: Epoch, #[serde(skip_serializing_if = "Option::is_none")] pub signing_root: Option, diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index 3b37492377f..61a5a094cd2 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -7,8 +7,11 @@ use crate::{ }; use crate::{http_metrics::metrics, validator_store::ValidatorStore}; use environment::RuntimeContext; +use eth2::BeaconNodeHttpClient; use slog::{crit, debug, error, info, trace, warn}; use slot_clock::SlotClock; +use std::fmt::Debug; +use std::future::Future; use std::ops::Deref; use std::sync::Arc; use std::time::Duration; @@ -45,6 +48,7 @@ pub struct BlockServiceBuilder { validator_store: Option>>, slot_clock: Option>, beacon_nodes: Option>>, + proposer_nodes: Option>>, context: Option>, graffiti: Option, graffiti_file: Option, @@ -57,6 +61,7 @@ impl BlockServiceBuilder { validator_store: None, slot_clock: None, beacon_nodes: None, + proposer_nodes: None, context: None, graffiti: None, graffiti_file: None, @@ -79,6 +84,11 @@ impl BlockServiceBuilder { self } + pub fn proposer_nodes(mut self, proposer_nodes: Arc>) -> Self { + self.proposer_nodes = Some(proposer_nodes); + self + } + pub fn runtime_context(mut self, context: RuntimeContext) -> Self { self.context = Some(context); self @@ -114,6 +124,7 @@ impl BlockServiceBuilder { context: self .context .ok_or("Cannot build BlockService without runtime_context")?, + proposer_nodes: self.proposer_nodes, graffiti: self.graffiti, graffiti_file: self.graffiti_file, block_delay: self.block_delay, @@ -122,11 +133,81 @@ impl BlockServiceBuilder { } } +// Combines a set of non-block-proposing `beacon_nodes` and only-block-proposing +// `proposer_nodes`. +pub struct ProposerFallback { + beacon_nodes: Arc>, + proposer_nodes: Option>>, +} + +impl ProposerFallback { + // Try `func` on `self.proposer_nodes` first. If that doesn't work, try `self.beacon_nodes`. + pub async fn first_success_try_proposers_first<'a, F, O, Err, R>( + &'a self, + require_synced: RequireSynced, + offline_on_failure: OfflineOnFailure, + func: F, + ) -> Result> + where + F: Fn(&'a BeaconNodeHttpClient) -> R + Clone, + R: Future>, + Err: Debug, + { + // If there are proposer nodes, try calling `func` on them and return early if they are successful. + if let Some(proposer_nodes) = &self.proposer_nodes { + if let Ok(result) = proposer_nodes + .first_success(require_synced, offline_on_failure, func.clone()) + .await + { + return Ok(result); + } + } + + // If the proposer nodes failed, try on the non-proposer nodes. + self.beacon_nodes + .first_success(require_synced, offline_on_failure, func) + .await + } + + // Try `func` on `self.beacon_nodes` first. If that doesn't work, try `self.proposer_nodes`. + pub async fn first_success_try_proposers_last<'a, F, O, Err, R>( + &'a self, + require_synced: RequireSynced, + offline_on_failure: OfflineOnFailure, + func: F, + ) -> Result> + where + F: Fn(&'a BeaconNodeHttpClient) -> R + Clone, + R: Future>, + Err: Debug, + { + // Try running `func` on the non-proposer beacon nodes. + let beacon_nodes_result = self + .beacon_nodes + .first_success(require_synced, offline_on_failure, func.clone()) + .await; + + match (beacon_nodes_result, &self.proposer_nodes) { + // The non-proposer node call succeed, return the result. + (Ok(success), _) => Ok(success), + // The non-proposer node call failed, but we don't have any proposer nodes. Return an error. + (Err(e), None) => Err(e), + // The non-proposer node call failed, try the same call on the proposer nodes. + (Err(_), Some(proposer_nodes)) => { + proposer_nodes + .first_success(require_synced, offline_on_failure, func) + .await + } + } + } +} + /// Helper to minimise `Arc` usage. pub struct Inner { validator_store: Arc>, slot_clock: Arc, beacon_nodes: Arc>, + proposer_nodes: Option>>, context: RuntimeContext, graffiti: Option, graffiti_file: Option, @@ -334,16 +415,23 @@ impl BlockService { let self_ref = &self; let proposer_index = self.validator_store.validator_index(&validator_pubkey); let validator_pubkey_ref = &validator_pubkey; + let proposer_fallback = ProposerFallback { + beacon_nodes: self.beacon_nodes.clone(), + proposer_nodes: self.proposer_nodes.clone(), + }; info!( log, "Requesting unsigned block"; "slot" => slot.as_u64(), ); + // Request block from first responsive beacon node. - let block = self - .beacon_nodes - .first_success( + // + // Try the proposer nodes last, since it's likely that they don't have a + // great view of attestations on the network. + let block = proposer_fallback + .first_success_try_proposers_last( RequireSynced::No, OfflineOnFailure::Yes, |beacon_node| async move { @@ -424,8 +512,12 @@ impl BlockService { ); // Publish block with first available beacon node. - self.beacon_nodes - .first_success( + // + // Try the proposer nodes first, since we've likely gone to efforts to + // protect them from DoS attacks and they're most likely to successfully + // publish a block. + proposer_fallback + .first_success_try_proposers_first( RequireSynced::No, OfflineOnFailure::Yes, |beacon_node| async { diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index fd96aa1f5c4..41ef85dfcd3 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -26,6 +26,15 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { ) .takes_value(true), ) + .arg( + Arg::with_name("proposer-nodes") + .long("proposer-nodes") + .value_name("NETWORK_ADDRESSES") + .help("Comma-separated addresses to one or more beacon node HTTP APIs. \ + These specify nodes that are used to send beacon block proposals. A failure will revert back to the standard beacon nodes specified in --beacon-nodes." + ) + .takes_value(true), + ) .arg( Arg::with_name("disable-run-on-all") .long("disable-run-on-all") @@ -118,7 +127,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .value_name("CERTIFICATE-FILES") .takes_value(true) .help("Comma-separated paths to custom TLS certificates to use when connecting \ - to a beacon node. These certificates must be in PEM format and are used \ + to a beacon node (and/or proposer node). These certificates must be in PEM format and are used \ in addition to the OS trust store. Commas must only be used as a \ delimiter, and must not be part of the certificate path.") ) diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 724d6c74f1f..b6e808a86b5 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -29,6 +29,8 @@ pub struct Config { /// /// Should be similar to `["http://localhost:8080"]` pub beacon_nodes: Vec, + /// An optional beacon node used for block proposals only. + pub proposer_nodes: Vec, /// If true, the validator client will still poll for duties and produce blocks even if the /// beacon node is not synced at startup. pub allow_unsynced_beacon_node: bool, @@ -95,6 +97,7 @@ impl Default for Config { validator_dir, secrets_dir, beacon_nodes, + proposer_nodes: Vec::new(), allow_unsynced_beacon_node: false, disable_auto_discover: false, init_slashing_protection: false, @@ -186,6 +189,14 @@ impl Config { .map_err(|e| format!("Unable to parse beacon node URL: {:?}", e))?]; } + if let Some(proposer_nodes) = parse_optional::(cli_args, "proposer_nodes")? { + config.proposer_nodes = proposer_nodes + .split(',') + .map(SensitiveUrl::parse) + .collect::>() + .map_err(|e| format!("Unable to parse proposer node URL: {:?}", e))?; + } + if cli_args.is_present("delete-lockfiles") { warn!( log, diff --git a/validator_client/src/http_api/api_secret.rs b/validator_client/src/http_api/api_secret.rs index b42cd11edd5..e688792ddc1 100644 --- a/validator_client/src/http_api/api_secret.rs +++ b/validator_client/src/http_api/api_secret.rs @@ -60,7 +60,7 @@ impl ApiSecret { // Create and write the secret key to file with appropriate permissions create_with_600_perms( &sk_path, - eth2_serde_utils::hex::encode(sk.serialize()).as_bytes(), + serde_utils::hex::encode(sk.serialize()).as_bytes(), ) .map_err(|e| { format!( @@ -75,7 +75,7 @@ impl ApiSecret { format!( "{}{}", PK_PREFIX, - eth2_serde_utils::hex::encode(&pk.serialize_compressed()[..]) + serde_utils::hex::encode(&pk.serialize_compressed()[..]) ) .as_bytes(), ) @@ -90,7 +90,7 @@ impl ApiSecret { let sk = fs::read(&sk_path) .map_err(|e| format!("cannot read {}: {}", SK_FILENAME, e)) .and_then(|bytes| { - eth2_serde_utils::hex::decode(&String::from_utf8_lossy(&bytes)) + serde_utils::hex::decode(&String::from_utf8_lossy(&bytes)) .map_err(|_| format!("{} should be 0x-prefixed hex", PK_FILENAME)) }) .and_then(|bytes| { @@ -114,7 +114,7 @@ impl ApiSecret { let hex = String::from_utf8(bytes).map_err(|_| format!("{} is not utf8", SK_FILENAME))?; if let Some(stripped) = hex.strip_prefix(PK_PREFIX) { - eth2_serde_utils::hex::decode(stripped) + serde_utils::hex::decode(stripped) .map_err(|_| format!("{} should be 0x-prefixed hex", SK_FILENAME)) } else { Err(format!("unable to parse {}", SK_FILENAME)) @@ -153,7 +153,7 @@ impl ApiSecret { /// Returns the public key of `self` as a 0x-prefixed hex string. fn pubkey_string(&self) -> String { - eth2_serde_utils::hex::encode(&self.pk.serialize_compressed()[..]) + serde_utils::hex::encode(&self.pk.serialize_compressed()[..]) } /// Returns the API token. @@ -205,7 +205,7 @@ impl ApiSecret { let message = Message::parse_slice(digest(&SHA256, input).as_ref()).expect("sha256 is 32 bytes"); let (signature, _) = libsecp256k1::sign(&message, &sk); - eth2_serde_utils::hex::encode(signature.serialize_der().as_ref()) + serde_utils::hex::encode(signature.serialize_der().as_ref()) } } } diff --git a/validator_client/src/http_api/create_signed_voluntary_exit.rs b/validator_client/src/http_api/create_signed_voluntary_exit.rs new file mode 100644 index 00000000000..b777d158064 --- /dev/null +++ b/validator_client/src/http_api/create_signed_voluntary_exit.rs @@ -0,0 +1,69 @@ +use crate::validator_store::ValidatorStore; +use bls::{PublicKey, PublicKeyBytes}; +use slog::{info, Logger}; +use slot_clock::SlotClock; +use std::sync::Arc; +use types::{Epoch, EthSpec, SignedVoluntaryExit, VoluntaryExit}; + +pub async fn create_signed_voluntary_exit( + pubkey: PublicKey, + maybe_epoch: Option, + validator_store: Arc>, + slot_clock: T, + log: Logger, +) -> Result { + let epoch = match maybe_epoch { + Some(epoch) => epoch, + None => get_current_epoch::(slot_clock).ok_or_else(|| { + warp_utils::reject::custom_server_error("Unable to determine current epoch".to_string()) + })?, + }; + + let pubkey_bytes = PublicKeyBytes::from(pubkey); + if !validator_store.has_validator(&pubkey_bytes) { + return Err(warp_utils::reject::custom_not_found(format!( + "{} is disabled or not managed by this validator client", + pubkey_bytes.as_hex_string() + ))); + } + + let validator_index = validator_store + .validator_index(&pubkey_bytes) + .ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "The validator index for {} is not known. The validator client \ + may still be initializing or the validator has not yet had a \ + deposit processed.", + pubkey_bytes.as_hex_string() + )) + })?; + + let voluntary_exit = VoluntaryExit { + epoch, + validator_index, + }; + + info!( + log, + "Signing voluntary exit"; + "validator" => pubkey_bytes.as_hex_string(), + "epoch" => epoch + ); + + let signed_voluntary_exit = validator_store + .sign_voluntary_exit(pubkey_bytes, voluntary_exit) + .await + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "Failed to sign voluntary exit: {:?}", + e + )) + })?; + + Ok(signed_voluntary_exit) +} + +/// Calculates the current epoch from the genesis time and current time. +fn get_current_epoch(slot_clock: T) -> Option { + slot_clock.now().map(|s| s.epoch(E::slots_per_epoch())) +} diff --git a/validator_client/src/http_api/create_validator.rs b/validator_client/src/http_api/create_validator.rs index a32ccce6279..f3107cfedbd 100644 --- a/validator_client/src/http_api/create_validator.rs +++ b/validator_client/src/http_api/create_validator.rs @@ -159,7 +159,7 @@ pub async fn create_validators_mnemonic, T: 'static + SlotClock, gas_limit: request.gas_limit, builder_proposals: request.builder_proposals, voting_pubkey, - eth1_deposit_tx_data: eth2_serde_utils::hex::encode(ð1_deposit_data.rlp), + eth1_deposit_tx_data: serde_utils::hex::encode(ð1_deposit_data.rlp), deposit_gwei: request.deposit_gwei, }); } diff --git a/validator_client/src/http_api/mod.rs b/validator_client/src/http_api/mod.rs index ad76b27aa61..9dc7689731e 100644 --- a/validator_client/src/http_api/mod.rs +++ b/validator_client/src/http_api/mod.rs @@ -1,9 +1,11 @@ mod api_secret; +mod create_signed_voluntary_exit; mod create_validator; mod keystores; mod remotekeys; mod tests; +use crate::http_api::create_signed_voluntary_exit::create_signed_voluntary_exit; use crate::{determine_graffiti, GraffitiFile, ValidatorStore}; use account_utils::{ mnemonic_from_phrase, @@ -75,6 +77,7 @@ pub struct Context { pub config: Config, pub log: Logger, pub sse_logging_components: Option, + pub slot_clock: T, pub _phantom: PhantomData, } @@ -193,6 +196,9 @@ pub fn serve( let inner_ctx = ctx.clone(); let log_filter = warp::any().map(move || inner_ctx.log.clone()); + let inner_slot_clock = ctx.slot_clock.clone(); + let slot_clock_filter = warp::any().map(move || inner_slot_clock.clone()); + let inner_spec = Arc::new(ctx.spec.clone()); let spec_filter = warp::any().map(move || inner_spec.clone()); @@ -912,6 +918,46 @@ pub fn serve( ) .map(|reply| warp::reply::with_status(reply, warp::http::StatusCode::NO_CONTENT)); + // POST /eth/v1/validator/{pubkey}/voluntary_exit + let post_validators_voluntary_exits = eth_v1 + .and(warp::path("validator")) + .and(warp::path::param::()) + .and(warp::path("voluntary_exit")) + .and(warp::query::()) + .and(warp::path::end()) + .and(validator_store_filter.clone()) + .and(slot_clock_filter) + .and(log_filter.clone()) + .and(signer.clone()) + .and(task_executor_filter.clone()) + .and_then( + |pubkey: PublicKey, + query: api_types::VoluntaryExitQuery, + validator_store: Arc>, + slot_clock: T, + log, + signer, + task_executor: TaskExecutor| { + blocking_signed_json_task(signer, move || { + if let Some(handle) = task_executor.handle() { + let signed_voluntary_exit = + handle.block_on(create_signed_voluntary_exit( + pubkey, + query.epoch, + validator_store, + slot_clock, + log, + ))?; + Ok(signed_voluntary_exit) + } else { + Err(warp_utils::reject::custom_server_error( + "Lighthouse shutting down".into(), + )) + } + }) + }, + ); + // GET /eth/v1/keystores let get_std_keystores = std_keystores .and(signer.clone()) @@ -1053,6 +1099,7 @@ pub fn serve( .or(post_validators_keystore) .or(post_validators_mnemonic) .or(post_validators_web3signer) + .or(post_validators_voluntary_exits) .or(post_fee_recipient) .or(post_gas_limit) .or(post_std_keystores) @@ -1128,4 +1175,4 @@ where response }) -} +} \ No newline at end of file diff --git a/validator_client/src/http_api/tests.rs b/validator_client/src/http_api/tests.rs index 6d629f01545..49fa7a4611f 100644 --- a/validator_client/src/http_api/tests.rs +++ b/validator_client/src/http_api/tests.rs @@ -45,6 +45,7 @@ struct ApiTester { initialized_validators: Arc>, validator_store: Arc>, url: SensitiveUrl, + slot_clock: TestingSlotClock, _server_shutdown: oneshot::Sender<()>, _validator_dir: TempDir, _runtime_shutdown: exit_future::Signal, @@ -90,8 +91,12 @@ impl ApiTester { let slashing_db_path = config.validator_dir.join(SLASHING_PROTECTION_FILENAME); let slashing_protection = SlashingDatabase::open_or_create(&slashing_db_path).unwrap(); - let slot_clock = - TestingSlotClock::new(Slot::new(0), Duration::from_secs(0), Duration::from_secs(1)); + let genesis_time: u64 = 0; + let slot_clock = TestingSlotClock::new( + Slot::new(0), + Duration::from_secs(genesis_time), + Duration::from_secs(1), + ); let (runtime_shutdown, exit) = exit_future::signal(); let (shutdown_tx, _) = futures::channel::mpsc::channel(1); @@ -101,9 +106,9 @@ impl ApiTester { initialized_validators, slashing_protection, Hash256::repeat_byte(42), - spec, + spec.clone(), Some(Arc::new(DoppelgangerService::new(log.clone()))), - slot_clock, + slot_clock.clone(), &config, executor.clone(), log.clone(), @@ -131,6 +136,7 @@ impl ApiTester { }, sse_logging_components: None, log, + slot_clock: slot_clock.clone(), _phantom: PhantomData, }); let ctx = context.clone(); @@ -157,6 +163,7 @@ impl ApiTester { initialized_validators, validator_store, url, + slot_clock, _server_shutdown: shutdown_tx, _validator_dir: validator_dir, _runtime_shutdown: runtime_shutdown, @@ -359,7 +366,7 @@ impl ApiTester { let withdrawal_keypair = keypairs.withdrawal.decrypt_keypair(PASSWORD_BYTES).unwrap(); let deposit_bytes = - eth2_serde_utils::hex::decode(&response[i].eth1_deposit_tx_data).unwrap(); + serde_utils::hex::decode(&response[i].eth1_deposit_tx_data).unwrap(); let (deposit_data, _) = decode_eth1_tx_data(&deposit_bytes, E::default_spec().max_effective_balance) @@ -495,6 +502,33 @@ impl ApiTester { self } + pub async fn test_sign_voluntary_exits(self, index: usize, maybe_epoch: Option) -> Self { + let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; + // manually setting validator index in `ValidatorStore` + self.initialized_validators + .write() + .set_index(&validator.voting_pubkey, 0); + + let expected_exit_epoch = maybe_epoch.unwrap_or_else(|| self.get_current_epoch()); + + let resp = self + .client + .post_validator_voluntary_exit(&validator.voting_pubkey, maybe_epoch) + .await; + + assert!(resp.is_ok()); + assert_eq!(resp.unwrap().message.epoch, expected_exit_epoch); + + self + } + + fn get_current_epoch(&self) -> Epoch { + self.slot_clock + .now() + .map(|s| s.epoch(E::slots_per_epoch())) + .unwrap() + } + pub async fn set_validator_enabled(self, index: usize, enabled: bool) -> Self { let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; @@ -779,6 +813,29 @@ fn hd_validator_creation() { }); } +#[test] +fn validator_exit() { + let runtime = build_runtime(); + let weak_runtime = Arc::downgrade(&runtime); + runtime.block_on(async { + ApiTester::new(weak_runtime) + .await + .create_hd_validators(HdValidatorScenario { + count: 2, + specify_mnemonic: false, + key_derivation_path_offset: 0, + disabled: vec![], + }) + .await + .assert_enabled_validators_count(2) + .assert_validators_count(2) + .test_sign_voluntary_exits(0, None) + .await + .test_sign_voluntary_exits(0, Some(Epoch::new(256))) + .await; + }); +} + #[test] fn validator_enabling() { let runtime = build_runtime(); @@ -924,4 +981,4 @@ fn web3signer_validator_creation() { .assert_enabled_validators_count(1) .assert_validators_count(1); }); -} +} \ No newline at end of file diff --git a/validator_client/src/http_metrics/metrics.rs b/validator_client/src/http_metrics/metrics.rs index b4e400c3e72..8a52a4d35e9 100644 --- a/validator_client/src/http_metrics/metrics.rs +++ b/validator_client/src/http_metrics/metrics.rs @@ -88,6 +88,11 @@ lazy_static::lazy_static! { "Total count of attempted SyncSelectionProof signings", &["status"] ); + pub static ref SIGNED_VOLUNTARY_EXITS_TOTAL: Result = try_create_int_counter_vec( + "vc_signed_voluntary_exits_total", + "Total count of VoluntaryExit signings", + &["status"] + ); pub static ref SIGNED_VALIDATOR_REGISTRATIONS_TOTAL: Result = try_create_int_counter_vec( "builder_validator_registrations_total", "Total count of ValidatorRegistrationData signings", diff --git a/validator_client/src/initialized_validators.rs b/validator_client/src/initialized_validators.rs index 7fe2f5f8ecd..468fc2b06b2 100644 --- a/validator_client/src/initialized_validators.rs +++ b/validator_client/src/initialized_validators.rs @@ -989,7 +989,23 @@ impl InitializedValidators { let cache = KeyCache::open_or_create(&self.validators_dir).map_err(Error::UnableToOpenKeyCache)?; - let mut key_cache = self.decrypt_key_cache(cache, &mut key_stores).await?; + + // Check if there is at least one local definition. + let has_local_definitions = self.definitions.as_slice().iter().any(|def| { + matches!( + def.signing_definition, + SigningDefinition::LocalKeystore { .. } + ) + }); + + // Only decrypt cache when there is at least one local definition. + // Decrypting cache is a very expensive operation which is never used for web3signer. + let mut key_cache = if has_local_definitions { + self.decrypt_key_cache(cache, &mut key_stores).await? + } else { + // Assign an empty KeyCache if all definitions are of the Web3Signer type. + KeyCache::new() + }; let mut disabled_uuids = HashSet::new(); for def in self.definitions.as_slice() { @@ -1115,13 +1131,16 @@ impl InitializedValidators { ); } } - for uuid in disabled_uuids { - key_cache.remove(&uuid); + + if has_local_definitions { + for uuid in disabled_uuids { + key_cache.remove(&uuid); + } } let validators_dir = self.validators_dir.clone(); let log = self.log.clone(); - if key_cache.is_modified() { + if has_local_definitions && key_cache.is_modified() { tokio::task::spawn_blocking(move || { match key_cache.save(validators_dir) { Err(e) => warn!( diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 917dc645f9b..b965cfb1375 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -24,6 +24,7 @@ pub use config::Config; use initialized_validators::InitializedValidators; use lighthouse_metrics::set_gauge; use monitoring_api::{MonitoringHttpClient, ProcessType}; +use sensitive_url::SensitiveUrl; pub use slashing_protection::{SlashingDatabase, SLASHING_PROTECTION_FILENAME}; use crate::beacon_node_fallback::{ @@ -94,6 +95,7 @@ pub struct ProductionValidatorClient { doppelganger_service: Option>, preparation_service: PreparationService, validator_store: Arc>, + slot_clock: SystemTimeSlotClock, http_api_listen_addr: Option, config: Config, } @@ -262,60 +264,70 @@ impl ProductionValidatorClient { .checked_sub(1) .ok_or_else(|| "No beacon nodes defined.".to_string())?; - let beacon_nodes: Vec = config - .beacon_nodes - .iter() - .enumerate() - .map(|(i, url)| { - let slot_duration = Duration::from_secs(context.eth2_config.spec.seconds_per_slot); + let beacon_node_setup = |x: (usize, &SensitiveUrl)| { + let i = x.0; + let url = x.1; + let slot_duration = Duration::from_secs(context.eth2_config.spec.seconds_per_slot); - let mut beacon_node_http_client_builder = ClientBuilder::new(); + let mut beacon_node_http_client_builder = ClientBuilder::new(); - // Add new custom root certificates if specified. - if let Some(certificates) = &config.beacon_nodes_tls_certs { - for cert in certificates { - beacon_node_http_client_builder = beacon_node_http_client_builder - .add_root_certificate(load_pem_certificate(cert)?); - } + // Add new custom root certificates if specified. + if let Some(certificates) = &config.beacon_nodes_tls_certs { + for cert in certificates { + beacon_node_http_client_builder = beacon_node_http_client_builder + .add_root_certificate(load_pem_certificate(cert)?); } + } - let beacon_node_http_client = beacon_node_http_client_builder - // Set default timeout to be the full slot duration. - .timeout(slot_duration) - .build() - .map_err(|e| format!("Unable to build HTTP client: {:?}", e))?; + let beacon_node_http_client = beacon_node_http_client_builder + // Set default timeout to be the full slot duration. + .timeout(slot_duration) + .build() + .map_err(|e| format!("Unable to build HTTP client: {:?}", e))?; - // Use quicker timeouts if a fallback beacon node exists. - let timeouts = if i < last_beacon_node_index && !config.use_long_timeouts { - info!( - log, - "Fallback endpoints are available, using optimized timeouts."; - ); - Timeouts { - attestation: slot_duration / HTTP_ATTESTATION_TIMEOUT_QUOTIENT, - attester_duties: slot_duration / HTTP_ATTESTER_DUTIES_TIMEOUT_QUOTIENT, - liveness: slot_duration / HTTP_LIVENESS_TIMEOUT_QUOTIENT, - proposal: slot_duration / HTTP_PROPOSAL_TIMEOUT_QUOTIENT, - proposer_duties: slot_duration / HTTP_PROPOSER_DUTIES_TIMEOUT_QUOTIENT, - sync_committee_contribution: slot_duration - / HTTP_SYNC_COMMITTEE_CONTRIBUTION_TIMEOUT_QUOTIENT, - sync_duties: slot_duration / HTTP_SYNC_DUTIES_TIMEOUT_QUOTIENT, - get_beacon_blocks_ssz: slot_duration - / HTTP_GET_BEACON_BLOCK_SSZ_TIMEOUT_QUOTIENT, - get_debug_beacon_states: slot_duration - / HTTP_GET_DEBUG_BEACON_STATE_QUOTIENT, - get_deposit_snapshot: slot_duration / HTTP_GET_DEPOSIT_SNAPSHOT_QUOTIENT, - } - } else { - Timeouts::set_all(slot_duration) - }; - - Ok(BeaconNodeHttpClient::from_components( - url.clone(), - beacon_node_http_client, - timeouts, - )) - }) + // Use quicker timeouts if a fallback beacon node exists. + let timeouts = if i < last_beacon_node_index && !config.use_long_timeouts { + info!( + log, + "Fallback endpoints are available, using optimized timeouts."; + ); + Timeouts { + attestation: slot_duration / HTTP_ATTESTATION_TIMEOUT_QUOTIENT, + attester_duties: slot_duration / HTTP_ATTESTER_DUTIES_TIMEOUT_QUOTIENT, + liveness: slot_duration / HTTP_LIVENESS_TIMEOUT_QUOTIENT, + proposal: slot_duration / HTTP_PROPOSAL_TIMEOUT_QUOTIENT, + proposer_duties: slot_duration / HTTP_PROPOSER_DUTIES_TIMEOUT_QUOTIENT, + sync_committee_contribution: slot_duration + / HTTP_SYNC_COMMITTEE_CONTRIBUTION_TIMEOUT_QUOTIENT, + sync_duties: slot_duration / HTTP_SYNC_DUTIES_TIMEOUT_QUOTIENT, + get_beacon_blocks_ssz: slot_duration + / HTTP_GET_BEACON_BLOCK_SSZ_TIMEOUT_QUOTIENT, + get_debug_beacon_states: slot_duration / HTTP_GET_DEBUG_BEACON_STATE_QUOTIENT, + get_deposit_snapshot: slot_duration / HTTP_GET_DEPOSIT_SNAPSHOT_QUOTIENT, + } + } else { + Timeouts::set_all(slot_duration) + }; + + Ok(BeaconNodeHttpClient::from_components( + url.clone(), + beacon_node_http_client, + timeouts, + )) + }; + + let beacon_nodes: Vec = config + .beacon_nodes + .iter() + .enumerate() + .map(beacon_node_setup) + .collect::, String>>()?; + + let proposer_nodes: Vec = config + .proposer_nodes + .iter() + .enumerate() + .map(beacon_node_setup) .collect::, String>>()?; let num_nodes = beacon_nodes.len(); @@ -324,6 +336,12 @@ impl ProductionValidatorClient { .map(CandidateBeaconNode::new) .collect(); + let proposer_nodes_num = proposer_nodes.len(); + let proposer_candidates = proposer_nodes + .into_iter() + .map(CandidateBeaconNode::new) + .collect(); + // Set the count for beacon node fallbacks excluding the primary beacon node. set_gauge( &http_metrics::metrics::ETH2_FALLBACK_CONFIGURED, @@ -348,9 +366,16 @@ impl ProductionValidatorClient { log.clone(), ); + let mut proposer_nodes: BeaconNodeFallback<_, T> = BeaconNodeFallback::new( + proposer_candidates, + config.disable_run_on_all, + context.eth2_config.spec.clone(), + log.clone(), + ); + // Perform some potentially long-running initialization tasks. let (genesis_time, genesis_validators_root) = tokio::select! { - tuple = init_from_beacon_node(&beacon_nodes, &context) => tuple?, + tuple = init_from_beacon_node(&beacon_nodes, &proposer_nodes, &context) => tuple?, () = context.executor.exit() => return Err("Shutting down".to_string()) }; @@ -366,9 +391,14 @@ impl ProductionValidatorClient { ); beacon_nodes.set_slot_clock(slot_clock.clone()); + proposer_nodes.set_slot_clock(slot_clock.clone()); + let beacon_nodes = Arc::new(beacon_nodes); start_fallback_updater_service(context.clone(), beacon_nodes.clone())?; + let proposer_nodes = Arc::new(proposer_nodes); + start_fallback_updater_service(context.clone(), proposer_nodes.clone())?; + let doppelganger_service = if config.enable_doppelganger_protection { Some(Arc::new(DoppelgangerService::new( context @@ -432,15 +462,21 @@ impl ProductionValidatorClient { ctx.shared.write().duties_service = Some(duties_service.clone()); } - let block_service = BlockServiceBuilder::new() + let mut block_service_builder = BlockServiceBuilder::new() .slot_clock(slot_clock.clone()) .validator_store(validator_store.clone()) .beacon_nodes(beacon_nodes.clone()) .runtime_context(context.service_context("block".into())) .graffiti(config.graffiti) .graffiti_file(config.graffiti_file.clone()) - .block_delay(config.block_delay) - .build()?; + .block_delay(config.block_delay); + + // If we have proposer nodes, add them to the block service builder. + if proposer_nodes_num > 0 { + block_service_builder = block_service_builder.proposer_nodes(proposer_nodes.clone()); + } + + let block_service = block_service_builder.build()?; let attestation_service = AttestationServiceBuilder::new() .duties_service(duties_service.clone()) @@ -461,7 +497,7 @@ impl ProductionValidatorClient { let sync_committee_service = SyncCommitteeService::new( duties_service.clone(), validator_store.clone(), - slot_clock, + slot_clock.clone(), beacon_nodes.clone(), context.service_context("sync_committee".into()), ); @@ -482,6 +518,7 @@ impl ProductionValidatorClient { preparation_service, validator_store, config, + slot_clock, http_api_listen_addr: None, }) } @@ -545,6 +582,7 @@ impl ProductionValidatorClient { spec: self.context.eth2_config.spec.clone(), config: self.config.http_api.clone(), sse_logging_components: self.context.sse_logging_components.clone(), + slot_clock: self.slot_clock.clone(), log: log.clone(), _phantom: PhantomData, }); @@ -579,13 +617,32 @@ impl ProductionValidatorClient { async fn init_from_beacon_node( beacon_nodes: &BeaconNodeFallback, + proposer_nodes: &BeaconNodeFallback, context: &RuntimeContext, ) -> Result<(u64, Hash256), String> { loop { beacon_nodes.update_unready_candidates().await; + proposer_nodes.update_unready_candidates().await; + let num_available = beacon_nodes.num_available().await; let num_total = beacon_nodes.num_total(); - if num_available > 0 { + + let proposer_available = beacon_nodes.num_available().await; + let proposer_total = beacon_nodes.num_total(); + + if proposer_total > 0 && proposer_available == 0 { + warn!( + context.log(), + "Unable to connect to a proposer node"; + "retry in" => format!("{} seconds", RETRY_DELAY.as_secs()), + "total_proposers" => proposer_total, + "available_proposers" => proposer_available, + "total_beacon_nodes" => num_total, + "available_beacon_nodes" => num_available, + ); + } + + if num_available > 0 && proposer_available == 0 { info!( context.log(), "Initialized beacon node connections"; @@ -593,6 +650,16 @@ async fn init_from_beacon_node( "available" => num_available, ); break; + } else if num_available > 0 { + info!( + context.log(), + "Initialized beacon node connections"; + "total" => num_total, + "available" => num_available, + "proposers_available" => proposer_available, + "proposers_total" => proposer_total, + ); + break; } else { warn!( context.log(), @@ -771,4 +838,4 @@ pub fn determine_graffiti( }) .or(validator_definition_graffiti) .or(graffiti_flag) -} +} \ No newline at end of file diff --git a/validator_client/src/signing_method.rs b/validator_client/src/signing_method.rs index ae9df080965..0de2f2f54fa 100644 --- a/validator_client/src/signing_method.rs +++ b/validator_client/src/signing_method.rs @@ -47,6 +47,7 @@ pub enum SignableMessage<'a, T: EthSpec, Payload: AbstractExecPayload = FullP }, SignedContributionAndProof(&'a ContributionAndProof), ValidatorRegistration(&'a ValidatorRegistrationData), + VoluntaryExit(&'a VoluntaryExit), } impl<'a, T: EthSpec, Payload: AbstractExecPayload> SignableMessage<'a, T, Payload> { @@ -67,6 +68,7 @@ impl<'a, T: EthSpec, Payload: AbstractExecPayload> SignableMessage<'a, T, Pay } => beacon_block_root.signing_root(domain), SignableMessage::SignedContributionAndProof(c) => c.signing_root(domain), SignableMessage::ValidatorRegistration(v) => v.signing_root(domain), + SignableMessage::VoluntaryExit(exit) => exit.signing_root(domain), } } } @@ -203,6 +205,7 @@ impl SigningMethod { SignableMessage::ValidatorRegistration(v) => { Web3SignerObject::ValidatorRegistration(v) } + SignableMessage::VoluntaryExit(e) => Web3SignerObject::VoluntaryExit(e), }; // Determine the Web3Signer message type. diff --git a/validator_client/src/signing_method/web3signer.rs b/validator_client/src/signing_method/web3signer.rs index 17e780304e1..2c1f0cb3fc6 100644 --- a/validator_client/src/signing_method/web3signer.rs +++ b/validator_client/src/signing_method/web3signer.rs @@ -54,15 +54,14 @@ pub enum Web3SignerObject<'a, T: EthSpec, Payload: AbstractExecPayload> { Deposit { pubkey: PublicKeyBytes, withdrawal_credentials: Hash256, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] amount: u64, - #[serde(with = "eth2_serde_utils::bytes_4_hex")] + #[serde(with = "serde_utils::bytes_4_hex")] genesis_fork_version: [u8; 4], }, RandaoReveal { epoch: Epoch, }, - #[allow(dead_code)] VoluntaryExit(&'a VoluntaryExit), SyncCommitteeMessage { beacon_block_root: Hash256, diff --git a/validator_client/src/validator_store.rs b/validator_client/src/validator_store.rs index 36a0d057342..73843579a2b 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/src/validator_store.rs @@ -22,8 +22,9 @@ use types::{ AggregateAndProof, Attestation, BeaconBlock, BlindedPayload, ChainSpec, ContributionAndProof, Domain, Epoch, EthSpec, Fork, Graffiti, Hash256, Keypair, PublicKeyBytes, SelectionProof, Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, SignedRoot, - SignedValidatorRegistrationData, Slot, SyncAggregatorSelectionData, SyncCommitteeContribution, - SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData, + SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, SyncAggregatorSelectionData, + SyncCommitteeContribution, SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId, + ValidatorRegistrationData, VoluntaryExit, }; use validator_dir::ValidatorDir; @@ -155,6 +156,14 @@ impl ValidatorStore { self.validators.clone() } + /// Indicates if the `voting_public_key` exists in self and is enabled. + pub fn has_validator(&self, voting_public_key: &PublicKeyBytes) -> bool { + self.validators + .read() + .validator(voting_public_key) + .is_some() + } + /// Insert a new validator to `self`, where the validator is represented by an EIP-2335 /// keystore on the filesystem. #[allow(clippy::too_many_arguments)] @@ -616,6 +625,32 @@ impl ValidatorStore { } } + pub async fn sign_voluntary_exit( + &self, + validator_pubkey: PublicKeyBytes, + voluntary_exit: VoluntaryExit, + ) -> Result { + let signing_epoch = voluntary_exit.epoch; + let signing_context = self.signing_context(Domain::VoluntaryExit, signing_epoch); + let signing_method = self.doppelganger_bypassed_signing_method(validator_pubkey)?; + + let signature = signing_method + .get_signature::>( + SignableMessage::VoluntaryExit(&voluntary_exit), + signing_context, + &self.spec, + &self.task_executor, + ) + .await?; + + metrics::inc_counter_vec(&metrics::SIGNED_VOLUNTARY_EXITS_TOTAL, &[metrics::SUCCESS]); + + Ok(SignedVoluntaryExit { + message: voluntary_exit, + signature, + }) + } + pub async fn sign_validator_registration_data( &self, validator_registration_data: ValidatorRegistrationData, diff --git a/watch/.gitignore b/watch/.gitignore new file mode 100644 index 00000000000..5b6b0720c9e --- /dev/null +++ b/watch/.gitignore @@ -0,0 +1 @@ +config.yaml diff --git a/watch/Cargo.toml b/watch/Cargo.toml new file mode 100644 index 00000000000..d1793a9d068 --- /dev/null +++ b/watch/Cargo.toml @@ -0,0 +1,45 @@ +[package] +name = "watch" +version = "0.1.0" +edition = "2018" + +[lib] +name = "watch" +path = "src/lib.rs" + +[[bin]] +name = "watch" +path = "src/main.rs" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +clap = "2.33.3" +log = "0.4.14" +env_logger = "0.9.0" +types = { path = "../consensus/types" } +eth2 = { path = "../common/eth2" } +beacon_node = { path = "../beacon_node"} +tokio = { version = "1.14.0", features = ["time"] } +axum = "0.5.15" +hyper = "0.14.20" +serde = "1.0.116" +serde_json = "1.0.58" +reqwest = { version = "0.11.0", features = ["json","stream"] } +url = "2.2.2" +rand = "0.7.3" +diesel = { version = "2.0.2", features = ["postgres", "r2d2"] } +diesel_migrations = { version = "2.0.0", features = ["postgres"] } +byteorder = "1.4.3" +bls = { path = "../crypto/bls" } +hex = "0.4.2" +r2d2 = "0.8.9" +serde_yaml = "0.8.24" + +[dev-dependencies] +tokio-postgres = "0.7.5" +http_api = { path = "../beacon_node/http_api" } +beacon_chain = { path = "../beacon_node/beacon_chain" } +network = { path = "../beacon_node/network" } +testcontainers = "0.14.0" +unused_port = { path = "../common/unused_port" } diff --git a/watch/README.md b/watch/README.md new file mode 100644 index 00000000000..18bf393946d --- /dev/null +++ b/watch/README.md @@ -0,0 +1,460 @@ +## beacon.watch + +>beacon.watch is pre-MVP and still under active development and subject to change. + +beacon.watch is an Ethereum Beacon Chain monitoring platform whose goal is to provide fast access to +data which is: +1. Not already stored natively in the Beacon Chain +2. Too specialized for Block Explorers +3. Too sensitive for public Block Explorers + + +### Requirements +- `git` +- `rust` : https://rustup.rs/ +- `libpg` : https://www.postgresql.org/download/ +- `diesel_cli` : +``` +cargo install diesel_cli --no-default-features --features postgres +``` +- `docker` : https://docs.docker.com/engine/install/ +- `docker-compose` : https://docs.docker.com/compose/install/ + +### Setup +1. Setup the database: +``` +cd postgres_docker_compose +docker-compose up +``` + +1. Ensure the tests pass: +``` +cargo test --release +``` + +1. Drop the database (if it already exists) and run the required migrations: +``` +diesel database reset --database-url postgres://postgres:postgres@localhost/dev +``` + +1. Ensure a synced Lighthouse beacon node with historical states is available +at `localhost:5052`. +The smaller the value of `--slots-per-restore-point` the faster beacon.watch +will be able to sync to the beacon node. + +1. Run the updater daemon: +``` +cargo run --release -- run-updater +``` + +1. Start the HTTP API server: +``` +cargo run --release -- serve +``` + +1. Ensure connectivity: +``` +curl "http://localhost:5059/v1/slots/highest" +``` + +> Functionality on MacOS has not been tested. Windows is not supported. + + +### Configuration +beacon.watch can be configured through the use of a config file. +Available options can be seen in `config.yaml.default`. + +You can specify a config file during runtime: +``` +cargo run -- run-updater --config path/to/config.yaml +cargo run -- serve --config path/to/config.yaml +``` + +You can specify only the parts of the config file which you need changed. +Missing values will remain as their defaults. + +For example, if you wish to run with default settings but only wish to alter `log_level` +your config file would be: +```yaml +# config.yaml +log_level = "info" +``` + +### Available Endpoints +As beacon.watch continues to develop, more endpoints will be added. + +> In these examples any data containing information from blockprint has either been redacted or fabricated. + +#### `/v1/slots/{slot}` +```bash +curl "http://localhost:5059/v1/slots/4635296" +``` +```json +{ + "slot": "4635296", + "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", + "skipped": false, + "beacon_block": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62" +} +``` + +#### `/v1/slots?start_slot={}&end_slot={}` +```bash +curl "http://localhost:5059/v1/slots?start_slot=4635296&end_slot=4635297" +``` +```json +[ + { + "slot": "4635297", + "root": "0x04ad2e963811207e344bebeba5b1217805bcc3a9e2ed9fcf2205d491778c6182", + "skipped": false, + "beacon_block": "0x04ad2e963811207e344bebeba5b1217805bcc3a9e2ed9fcf2205d491778c6182" + }, + { + "slot": "4635296", + "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", + "skipped": false, + "beacon_block": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62" + } +] +``` + +#### `/v1/slots/lowest` +```bash +curl "http://localhost:5059/v1/slots/lowest" +``` +```json +{ + "slot": "4635296", + "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", + "skipped": false, + "beacon_block": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62" +} +``` + +#### `/v1/slots/highest` +```bash +curl "http://localhost:5059/v1/slots/highest" +``` +```json +{ + "slot": "4635358", + "root": "0xe9eff13560688f1bf15cf07b60c84963d4d04a4a885ed0eb19ceb8450011894b", + "skipped": false, + "beacon_block": "0xe9eff13560688f1bf15cf07b60c84963d4d04a4a885ed0eb19ceb8450011894b" +} +``` + +#### `v1/slots/{slot}/block` +```bash +curl "http://localhost:5059/v1/slots/4635296/block" +``` +```json +{ + "slot": "4635296", + "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", + "parent_root": "0x7c4860b420a23de9d126da71f9043b3744af98c847efd9e1440f2da8fbf7f31b" +} +``` + +#### `/v1/blocks/{block_id}` +```bash +curl "http://localhost:5059/v1/blocks/4635296" +# OR +curl "http://localhost:5059/v1/blocks/0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62" +``` +```json +{ + "slot": "4635296", + "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", + "parent_root": "0x7c4860b420a23de9d126da71f9043b3744af98c847efd9e1440f2da8fbf7f31b" +} +``` + +#### `/v1/blocks?start_slot={}&end_slot={}` +```bash +curl "http://localhost:5059/v1/blocks?start_slot=4635296&end_slot=4635297" +``` +```json +[ + { + "slot": "4635297", + "root": "0x04ad2e963811207e344bebeba5b1217805bcc3a9e2ed9fcf2205d491778c6182", + "parent_root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62" + }, + { + "slot": "4635296", + "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", + "parent_root": "0x7c4860b420a23de9d126da71f9043b3744af98c847efd9e1440f2da8fbf7f31b" + } +] +``` + +#### `/v1/blocks/{block_id}/previous` +```bash +curl "http://localhost:5059/v1/blocks/4635297/previous" +# OR +curl "http://localhost:5059/v1/blocks/0x04ad2e963811207e344bebeba5b1217805bcc3a9e2ed9fcf2205d491778c6182/previous" +``` +```json +{ + "slot": "4635296", + "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", + "parent_root": "0x7c4860b420a23de9d126da71f9043b3744af98c847efd9e1440f2da8fbf7f31b" +} +``` + +#### `/v1/blocks/{block_id}/next` +```bash +curl "http://localhost:5059/v1/blocks/4635296/next" +# OR +curl "http://localhost:5059/v1/blocks/0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62/next" +``` +```json +{ + "slot": "4635297", + "root": "0x04ad2e963811207e344bebeba5b1217805bcc3a9e2ed9fcf2205d491778c6182", + "parent_root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62" +} +``` + +#### `/v1/blocks/lowest` +```bash +curl "http://localhost:5059/v1/blocks/lowest" +``` +```json +{ + "slot": "4635296", + "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", + "parent_root": "0x7c4860b420a23de9d126da71f9043b3744af98c847efd9e1440f2da8fbf7f31b" +} +``` + +#### `/v1/blocks/highest` +```bash +curl "http://localhost:5059/v1/blocks/highest" +``` +```json +{ + "slot": "4635358", + "root": "0xe9eff13560688f1bf15cf07b60c84963d4d04a4a885ed0eb19ceb8450011894b", + "parent_root": "0xb66e05418bb5b1d4a965c994e1f0e5b5f0d7b780e0df12f3f6321510654fa1d2" +} +``` + +#### `/v1/blocks/{block_id}/proposer` +```bash +curl "http://localhost:5059/v1/blocks/4635296/proposer" +# OR +curl "http://localhost:5059/v1/blocks/0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62/proposer" + +``` +```json +{ + "slot": "4635296", + "proposer_index": 223126, + "graffiti": "" +} +``` + +#### `/v1/blocks/{block_id}/rewards` +```bash +curl "http://localhost:5059/v1/blocks/4635296/reward" +# OR +curl "http://localhost:5059/v1/blocks/0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62/reward" + +``` +```json +{ + "slot": "4635296", + "total": 25380059, + "attestation_reward": 24351867, + "sync_committee_reward": 1028192 +} +``` + +#### `/v1/blocks/{block_id}/packing` +```bash +curl "http://localhost:5059/v1/blocks/4635296/packing" +# OR +curl "http://localhost:5059/v1/blocks/0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62/packing" + +``` +```json +{ + "slot": "4635296", + "available": 16152, + "included": 13101, + "prior_skip_slots": 0 +} +``` + +#### `/v1/validators/{validator}` +```bash +curl "http://localhost:5059/v1/validators/1" +# OR +curl "http://localhost:5059/v1/validators/0xa1d1ad0714035353258038e964ae9675dc0252ee22cea896825c01458e1807bfad2f9969338798548d9858a571f7425c" +``` +```json +{ + "index": 1, + "public_key": "0xa1d1ad0714035353258038e964ae9675dc0252ee22cea896825c01458e1807bfad2f9969338798548d9858a571f7425c", + "status": "active_ongoing", + "client": null, + "activation_epoch": 0, + "exit_epoch": null +} +``` + +#### `/v1/validators/{validator}/attestation/{epoch}` +```bash +curl "http://localhost:5059/v1/validators/1/attestation/144853" +# OR +curl "http://localhost:5059/v1/validators/0xa1d1ad0714035353258038e964ae9675dc0252ee22cea896825c01458e1807bfad2f9969338798548d9858a571f7425c/attestation/144853" +``` +```json +{ + "index": 1, + "epoch": "144853", + "source": true, + "head": true, + "target": true +} +``` + +#### `/v1/validators/missed/{vote}/{epoch}` +```bash +curl "http://localhost:5059/v1/validators/missed/head/144853" +``` +```json +[ + 63, + 67, + 98, + ... +] +``` + +#### `/v1/validators/missed/{vote}/{epoch}/graffiti` +```bash +curl "http://localhost:5059/v1/validators/missed/head/144853/graffiti" +``` +```json +{ + "Mr F was here": 3, + "Lighthouse/v3.1.0-aa022f4": 5, + ... +} +``` + +#### `/v1/clients/missed/{vote}/{epoch}` +```bash +curl "http://localhost:5059/v1/clients/missed/source/144853" +``` +```json +{ + "Lighthouse": 100, + "Lodestar": 100, + "Nimbus": 100, + "Prysm": 100, + "Teku": 100, + "Unknown": 100 +} +``` + +#### `/v1/clients/missed/{vote}/{epoch}/percentages` +Note that this endpoint expresses the following: +``` +What percentage of each client implementation missed this vote? +``` + +```bash +curl "http://localhost:5059/v1/clients/missed/target/144853/percentages" +``` +```json +{ + "Lighthouse": 0.51234567890, + "Lodestar": 0.51234567890, + "Nimbus": 0.51234567890, + "Prysm": 0.09876543210, + "Teku": 0.09876543210, + "Unknown": 0.05647382910 +} +``` + +#### `/v1/clients/missed/{vote}/{epoch}/percentages/relative` +Note that this endpoint expresses the following: +``` +For the validators which did miss this vote, what percentage of them were from each client implementation? +``` +You can check these values against the output of `/v1/clients/percentages` to see any discrepancies. + +```bash +curl "http://localhost:5059/v1/clients/missed/target/144853/percentages/relative" +``` +```json +{ + "Lighthouse": 11.11111111111111, + "Lodestar": 11.11111111111111, + "Nimbus": 11.11111111111111, + "Prysm": 16.66666666666667, + "Teku": 16.66666666666667, + "Unknown": 33.33333333333333 +} + +``` + +#### `/v1/clients` +```bash +curl "http://localhost:5059/v1/clients" +``` +```json +{ + "Lighthouse": 5000, + "Lodestar": 5000, + "Nimbus": 5000, + "Prysm": 5000, + "Teku": 5000, + "Unknown": 5000 +} +``` + +#### `/v1/clients/percentages` +```bash +curl "http://localhost:5059/v1/clients/percentages" +``` +```json +{ + "Lighthouse": 16.66666666666667, + "Lodestar": 16.66666666666667, + "Nimbus": 16.66666666666667, + "Prysm": 16.66666666666667, + "Teku": 16.66666666666667, + "Unknown": 16.66666666666667 +} +``` + +### Future work +- New tables + - `skip_slots`? + + +- More API endpoints + - `/v1/proposers?start_epoch={}&end_epoch={}` and similar + - `/v1/validators/{status}/count` + + +- Concurrently backfill and forwards fill, so forwards fill is not bottlenecked by large backfills. + + +- Better/prettier (async?) logging. + + +- Connect to a range of beacon_nodes to sync different components concurrently. +Generally, processing certain api queries such as `block_packing` and `attestation_performance` take the longest to sync. + + +### Architecture +Connection Pooling: +- 1 Pool for Updater (read and write) +- 1 Pool for HTTP Server (should be read only, although not sure if we can enforce this) diff --git a/watch/config.yaml.default b/watch/config.yaml.default new file mode 100644 index 00000000000..131609237cb --- /dev/null +++ b/watch/config.yaml.default @@ -0,0 +1,49 @@ +--- +database: + user: "postgres" + password: "postgres" + dbname: "dev" + default_dbname: "postgres" + host: "localhost" + port: 5432 + connect_timeout_millis: 2000 + +server: + listen_addr: "127.0.0.1" + listen_port: 5059 + +updater: + # The URL of the Beacon Node to perform sync tasks with. + # Cannot yet accept multiple beacon nodes. + beacon_node_url: "http://localhost:5052" + # The number of epochs to backfill. Must be below 100. + max_backfill_size_epochs: 2 + # The epoch at which to stop backfilling. + backfill_stop_epoch: 0 + # Whether to sync the attestations table. + attestations: true + # Whether to sync the proposer_info table. + proposer_info: true + # Whether to sync the block_rewards table. + block_rewards: true + # Whether to sync the block_packing table. + block_packing: true + +blockprint: + # Whether to sync client information from blockprint. + enabled: false + # The URL of the blockprint server. + url: "" + # The username used to authenticate to the blockprint server. + username: "" + # The password used to authenticate to the blockprint server. + password: "" + +# Log level. +# Valid options are: +# - "trace" +# - "debug" +# - "info" +# - "warn" +# - "error" +log_level: "debug" diff --git a/watch/diesel.toml b/watch/diesel.toml new file mode 100644 index 00000000000..bfb01bccf0f --- /dev/null +++ b/watch/diesel.toml @@ -0,0 +1,5 @@ +# For documentation on how to configure this file, +# see diesel.rs/guides/configuring-diesel-cli + +[print_schema] +file = "src/database/schema.rs" diff --git a/watch/migrations/.gitkeep b/watch/migrations/.gitkeep new file mode 100644 index 00000000000..e69de29bb2d diff --git a/watch/migrations/00000000000000_diesel_initial_setup/down.sql b/watch/migrations/00000000000000_diesel_initial_setup/down.sql new file mode 100644 index 00000000000..a9f52609119 --- /dev/null +++ b/watch/migrations/00000000000000_diesel_initial_setup/down.sql @@ -0,0 +1,6 @@ +-- This file was automatically created by Diesel to setup helper functions +-- and other internal bookkeeping. This file is safe to edit, any future +-- changes will be added to existing projects as new migrations. + +DROP FUNCTION IF EXISTS diesel_manage_updated_at(_tbl regclass); +DROP FUNCTION IF EXISTS diesel_set_updated_at(); diff --git a/watch/migrations/00000000000000_diesel_initial_setup/up.sql b/watch/migrations/00000000000000_diesel_initial_setup/up.sql new file mode 100644 index 00000000000..d68895b1a7b --- /dev/null +++ b/watch/migrations/00000000000000_diesel_initial_setup/up.sql @@ -0,0 +1,36 @@ +-- This file was automatically created by Diesel to setup helper functions +-- and other internal bookkeeping. This file is safe to edit, any future +-- changes will be added to existing projects as new migrations. + + + + +-- Sets up a trigger for the given table to automatically set a column called +-- `updated_at` whenever the row is modified (unless `updated_at` was included +-- in the modified columns) +-- +-- # Example +-- +-- ```sql +-- CREATE TABLE users (id SERIAL PRIMARY KEY, updated_at TIMESTAMP NOT NULL DEFAULT NOW()); +-- +-- SELECT diesel_manage_updated_at('users'); +-- ``` +CREATE OR REPLACE FUNCTION diesel_manage_updated_at(_tbl regclass) RETURNS VOID AS $$ +BEGIN + EXECUTE format('CREATE TRIGGER set_updated_at BEFORE UPDATE ON %s + FOR EACH ROW EXECUTE PROCEDURE diesel_set_updated_at()', _tbl); +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION diesel_set_updated_at() RETURNS trigger AS $$ +BEGIN + IF ( + NEW IS DISTINCT FROM OLD AND + NEW.updated_at IS NOT DISTINCT FROM OLD.updated_at + ) THEN + NEW.updated_at := current_timestamp; + END IF; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; diff --git a/watch/migrations/2022-01-01-000000_canonical_slots/down.sql b/watch/migrations/2022-01-01-000000_canonical_slots/down.sql new file mode 100644 index 00000000000..551ed6605c7 --- /dev/null +++ b/watch/migrations/2022-01-01-000000_canonical_slots/down.sql @@ -0,0 +1 @@ +DROP TABLE canonical_slots diff --git a/watch/migrations/2022-01-01-000000_canonical_slots/up.sql b/watch/migrations/2022-01-01-000000_canonical_slots/up.sql new file mode 100644 index 00000000000..2629f11a4c7 --- /dev/null +++ b/watch/migrations/2022-01-01-000000_canonical_slots/up.sql @@ -0,0 +1,6 @@ +CREATE TABLE canonical_slots ( + slot integer PRIMARY KEY, + root bytea NOT NULL, + skipped boolean NOT NULL, + beacon_block bytea UNIQUE +) diff --git a/watch/migrations/2022-01-01-000001_beacon_blocks/down.sql b/watch/migrations/2022-01-01-000001_beacon_blocks/down.sql new file mode 100644 index 00000000000..8901956f47c --- /dev/null +++ b/watch/migrations/2022-01-01-000001_beacon_blocks/down.sql @@ -0,0 +1 @@ +DROP TABLE beacon_blocks diff --git a/watch/migrations/2022-01-01-000001_beacon_blocks/up.sql b/watch/migrations/2022-01-01-000001_beacon_blocks/up.sql new file mode 100644 index 00000000000..250c667b232 --- /dev/null +++ b/watch/migrations/2022-01-01-000001_beacon_blocks/up.sql @@ -0,0 +1,7 @@ +CREATE TABLE beacon_blocks ( + slot integer PRIMARY KEY REFERENCES canonical_slots(slot) ON DELETE CASCADE, + root bytea REFERENCES canonical_slots(beacon_block) NOT NULL, + parent_root bytea NOT NULL, + attestation_count integer NOT NULL, + transaction_count integer +) diff --git a/watch/migrations/2022-01-01-000002_validators/down.sql b/watch/migrations/2022-01-01-000002_validators/down.sql new file mode 100644 index 00000000000..17819fc3491 --- /dev/null +++ b/watch/migrations/2022-01-01-000002_validators/down.sql @@ -0,0 +1 @@ +DROP TABLE validators diff --git a/watch/migrations/2022-01-01-000002_validators/up.sql b/watch/migrations/2022-01-01-000002_validators/up.sql new file mode 100644 index 00000000000..69cfef6772b --- /dev/null +++ b/watch/migrations/2022-01-01-000002_validators/up.sql @@ -0,0 +1,7 @@ +CREATE TABLE validators ( + index integer PRIMARY KEY, + public_key bytea NOT NULL, + status text NOT NULL, + activation_epoch integer, + exit_epoch integer +) diff --git a/watch/migrations/2022-01-01-000003_proposer_info/down.sql b/watch/migrations/2022-01-01-000003_proposer_info/down.sql new file mode 100644 index 00000000000..d61330be5b2 --- /dev/null +++ b/watch/migrations/2022-01-01-000003_proposer_info/down.sql @@ -0,0 +1 @@ +DROP TABLE proposer_info diff --git a/watch/migrations/2022-01-01-000003_proposer_info/up.sql b/watch/migrations/2022-01-01-000003_proposer_info/up.sql new file mode 100644 index 00000000000..488aedb2730 --- /dev/null +++ b/watch/migrations/2022-01-01-000003_proposer_info/up.sql @@ -0,0 +1,5 @@ +CREATE TABLE proposer_info ( + slot integer PRIMARY KEY REFERENCES beacon_blocks(slot) ON DELETE CASCADE, + proposer_index integer REFERENCES validators(index) ON DELETE CASCADE NOT NULL, + graffiti text NOT NULL +) diff --git a/watch/migrations/2022-01-01-000004_active_config/down.sql b/watch/migrations/2022-01-01-000004_active_config/down.sql new file mode 100644 index 00000000000..b4304eb7b72 --- /dev/null +++ b/watch/migrations/2022-01-01-000004_active_config/down.sql @@ -0,0 +1 @@ +DROP TABLE active_config diff --git a/watch/migrations/2022-01-01-000004_active_config/up.sql b/watch/migrations/2022-01-01-000004_active_config/up.sql new file mode 100644 index 00000000000..476a0911607 --- /dev/null +++ b/watch/migrations/2022-01-01-000004_active_config/up.sql @@ -0,0 +1,5 @@ +CREATE TABLE active_config ( + id integer PRIMARY KEY CHECK (id=1), + config_name text NOT NULL, + slots_per_epoch integer NOT NULL +) diff --git a/watch/migrations/2022-01-01-000010_blockprint/down.sql b/watch/migrations/2022-01-01-000010_blockprint/down.sql new file mode 100644 index 00000000000..fa53325dad1 --- /dev/null +++ b/watch/migrations/2022-01-01-000010_blockprint/down.sql @@ -0,0 +1 @@ +DROP TABLE blockprint diff --git a/watch/migrations/2022-01-01-000010_blockprint/up.sql b/watch/migrations/2022-01-01-000010_blockprint/up.sql new file mode 100644 index 00000000000..2d5741f50b7 --- /dev/null +++ b/watch/migrations/2022-01-01-000010_blockprint/up.sql @@ -0,0 +1,4 @@ +CREATE TABLE blockprint ( + slot integer PRIMARY KEY REFERENCES beacon_blocks(slot) ON DELETE CASCADE, + best_guess text NOT NULL +) diff --git a/watch/migrations/2022-01-01-000011_block_rewards/down.sql b/watch/migrations/2022-01-01-000011_block_rewards/down.sql new file mode 100644 index 00000000000..2dc87995c74 --- /dev/null +++ b/watch/migrations/2022-01-01-000011_block_rewards/down.sql @@ -0,0 +1 @@ +DROP TABLE block_rewards diff --git a/watch/migrations/2022-01-01-000011_block_rewards/up.sql b/watch/migrations/2022-01-01-000011_block_rewards/up.sql new file mode 100644 index 00000000000..47cb4304f06 --- /dev/null +++ b/watch/migrations/2022-01-01-000011_block_rewards/up.sql @@ -0,0 +1,6 @@ +CREATE TABLE block_rewards ( + slot integer PRIMARY KEY REFERENCES beacon_blocks(slot) ON DELETE CASCADE, + total integer NOT NULL, + attestation_reward integer NOT NULL, + sync_committee_reward integer NOT NULL +) diff --git a/watch/migrations/2022-01-01-000012_block_packing/down.sql b/watch/migrations/2022-01-01-000012_block_packing/down.sql new file mode 100644 index 00000000000..e9e7755e3e0 --- /dev/null +++ b/watch/migrations/2022-01-01-000012_block_packing/down.sql @@ -0,0 +1 @@ +DROP TABLE block_packing diff --git a/watch/migrations/2022-01-01-000012_block_packing/up.sql b/watch/migrations/2022-01-01-000012_block_packing/up.sql new file mode 100644 index 00000000000..63a9925f920 --- /dev/null +++ b/watch/migrations/2022-01-01-000012_block_packing/up.sql @@ -0,0 +1,6 @@ +CREATE TABLE block_packing ( + slot integer PRIMARY KEY REFERENCES beacon_blocks(slot) ON DELETE CASCADE, + available integer NOT NULL, + included integer NOT NULL, + prior_skip_slots integer NOT NULL +) diff --git a/watch/migrations/2022-01-01-000013_suboptimal_attestations/down.sql b/watch/migrations/2022-01-01-000013_suboptimal_attestations/down.sql new file mode 100644 index 00000000000..0f32b6b4f33 --- /dev/null +++ b/watch/migrations/2022-01-01-000013_suboptimal_attestations/down.sql @@ -0,0 +1 @@ +DROP TABLE suboptimal_attestations diff --git a/watch/migrations/2022-01-01-000013_suboptimal_attestations/up.sql b/watch/migrations/2022-01-01-000013_suboptimal_attestations/up.sql new file mode 100644 index 00000000000..5352afefc8d --- /dev/null +++ b/watch/migrations/2022-01-01-000013_suboptimal_attestations/up.sql @@ -0,0 +1,8 @@ +CREATE TABLE suboptimal_attestations ( + epoch_start_slot integer CHECK (epoch_start_slot % 32 = 0) REFERENCES canonical_slots(slot) ON DELETE CASCADE, + index integer NOT NULL REFERENCES validators(index) ON DELETE CASCADE, + source boolean NOT NULL, + head boolean NOT NULL, + target boolean NOT NULL, + PRIMARY KEY(epoch_start_slot, index) +) diff --git a/watch/migrations/2022-01-01-000020_capella/down.sql b/watch/migrations/2022-01-01-000020_capella/down.sql new file mode 100644 index 00000000000..5903b351db9 --- /dev/null +++ b/watch/migrations/2022-01-01-000020_capella/down.sql @@ -0,0 +1,2 @@ +ALTER TABLE beacon_blocks +DROP COLUMN withdrawal_count; diff --git a/watch/migrations/2022-01-01-000020_capella/up.sql b/watch/migrations/2022-01-01-000020_capella/up.sql new file mode 100644 index 00000000000..b52b4b00998 --- /dev/null +++ b/watch/migrations/2022-01-01-000020_capella/up.sql @@ -0,0 +1,3 @@ +ALTER TABLE beacon_blocks +ADD COLUMN withdrawal_count integer; + diff --git a/watch/postgres_docker_compose/compose.yml b/watch/postgres_docker_compose/compose.yml new file mode 100644 index 00000000000..eae4de4a2ba --- /dev/null +++ b/watch/postgres_docker_compose/compose.yml @@ -0,0 +1,16 @@ +version: "3" + +services: + postgres: + image: postgres:12.3-alpine + restart: always + environment: + POSTGRES_PASSWORD: postgres + POSTGRES_USER: postgres + volumes: + - postgres:/var/lib/postgresql/data + ports: + - 127.0.0.1:5432:5432 + +volumes: + postgres: diff --git a/watch/src/block_packing/database.rs b/watch/src/block_packing/database.rs new file mode 100644 index 00000000000..f7375431cb3 --- /dev/null +++ b/watch/src/block_packing/database.rs @@ -0,0 +1,140 @@ +use crate::database::{ + schema::{beacon_blocks, block_packing}, + watch_types::{WatchHash, WatchSlot}, + Error, PgConn, MAX_SIZE_BATCH_INSERT, +}; + +use diesel::prelude::*; +use diesel::{Insertable, Queryable}; +use log::debug; +use serde::{Deserialize, Serialize}; +use std::time::Instant; + +#[derive(Debug, Queryable, Insertable, Serialize, Deserialize)] +#[diesel(table_name = block_packing)] +pub struct WatchBlockPacking { + pub slot: WatchSlot, + pub available: i32, + pub included: i32, + pub prior_skip_slots: i32, +} + +/// Insert a batch of values into the `block_packing` table. +/// +/// On a conflict, it will do nothing, leaving the old value. +pub fn insert_batch_block_packing( + conn: &mut PgConn, + packing: Vec, +) -> Result<(), Error> { + use self::block_packing::dsl::*; + + let mut count = 0; + let timer = Instant::now(); + + for chunk in packing.chunks(MAX_SIZE_BATCH_INSERT) { + count += diesel::insert_into(block_packing) + .values(chunk) + .on_conflict_do_nothing() + .execute(conn)?; + } + + let time_taken = timer.elapsed(); + debug!("Block packing inserted, count: {count}, time taken: {time_taken:?}"); + Ok(()) +} + +/// Selects the row from the `block_packing` table where `slot` is minimum. +pub fn get_lowest_block_packing(conn: &mut PgConn) -> Result, Error> { + use self::block_packing::dsl::*; + let timer = Instant::now(); + + let result = block_packing + .order_by(slot.asc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Block packing requested: lowest, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects the row from the `block_packing` table where `slot` is maximum. +pub fn get_highest_block_packing(conn: &mut PgConn) -> Result, Error> { + use self::block_packing::dsl::*; + let timer = Instant::now(); + + let result = block_packing + .order_by(slot.desc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Block packing requested: highest, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row of the `block_packing` table corresponding to a given `root_query`. +pub fn get_block_packing_by_root( + conn: &mut PgConn, + root_query: WatchHash, +) -> Result, Error> { + use self::beacon_blocks::dsl::{beacon_blocks, root}; + use self::block_packing::dsl::*; + let timer = Instant::now(); + + let join = beacon_blocks.inner_join(block_packing); + + let result = join + .select((slot, available, included, prior_skip_slots)) + .filter(root.eq(root_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Block packing requested: {root_query}, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row of the `block_packing` table corresponding to a given `slot_query`. +pub fn get_block_packing_by_slot( + conn: &mut PgConn, + slot_query: WatchSlot, +) -> Result, Error> { + use self::block_packing::dsl::*; + let timer = Instant::now(); + + let result = block_packing + .filter(slot.eq(slot_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Block packing requested: {slot_query}, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects `slot` from all rows of the `beacon_blocks` table which do not have a corresponding +/// row in `block_packing`. +#[allow(dead_code)] +pub fn get_unknown_block_packing( + conn: &mut PgConn, + slots_per_epoch: u64, +) -> Result>, Error> { + use self::beacon_blocks::dsl::{beacon_blocks, root, slot}; + use self::block_packing::dsl::block_packing; + + let join = beacon_blocks.left_join(block_packing); + + let result = join + .select(slot) + .filter(root.is_null()) + // Block packing cannot be retrieved for epoch 0 so we need to exclude them. + .filter(slot.ge(slots_per_epoch as i32)) + .order_by(slot.desc()) + .nullable() + .load::>(conn)?; + + Ok(result) +} diff --git a/watch/src/block_packing/mod.rs b/watch/src/block_packing/mod.rs new file mode 100644 index 00000000000..5d74fc59799 --- /dev/null +++ b/watch/src/block_packing/mod.rs @@ -0,0 +1,38 @@ +pub mod database; +pub mod server; +pub mod updater; + +use crate::database::watch_types::WatchSlot; +use crate::updater::error::Error; + +pub use database::{ + get_block_packing_by_root, get_block_packing_by_slot, get_highest_block_packing, + get_lowest_block_packing, get_unknown_block_packing, insert_batch_block_packing, + WatchBlockPacking, +}; +pub use server::block_packing_routes; + +use eth2::BeaconNodeHttpClient; +use types::Epoch; + +/// Sends a request to `lighthouse/analysis/block_packing`. +/// Formats the response into a vector of `WatchBlockPacking`. +/// +/// Will fail if `start_epoch == 0`. +pub async fn get_block_packing( + bn: &BeaconNodeHttpClient, + start_epoch: Epoch, + end_epoch: Epoch, +) -> Result, Error> { + Ok(bn + .get_lighthouse_analysis_block_packing(start_epoch, end_epoch) + .await? + .into_iter() + .map(|data| WatchBlockPacking { + slot: WatchSlot::from_slot(data.slot), + available: data.available_attestations as i32, + included: data.included_attestations as i32, + prior_skip_slots: data.prior_skip_slots as i32, + }) + .collect()) +} diff --git a/watch/src/block_packing/server.rs b/watch/src/block_packing/server.rs new file mode 100644 index 00000000000..819144562a5 --- /dev/null +++ b/watch/src/block_packing/server.rs @@ -0,0 +1,31 @@ +use crate::block_packing::database::{ + get_block_packing_by_root, get_block_packing_by_slot, WatchBlockPacking, +}; +use crate::database::{get_connection, PgPool, WatchHash, WatchSlot}; +use crate::server::Error; + +use axum::{extract::Path, routing::get, Extension, Json, Router}; +use eth2::types::BlockId; +use std::str::FromStr; + +pub async fn get_block_packing( + Path(block_query): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = get_connection(&pool).map_err(Error::Database)?; + match BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)? { + BlockId::Root(root) => Ok(Json(get_block_packing_by_root( + &mut conn, + WatchHash::from_hash(root), + )?)), + BlockId::Slot(slot) => Ok(Json(get_block_packing_by_slot( + &mut conn, + WatchSlot::from_slot(slot), + )?)), + _ => Err(Error::BadRequest), + } +} + +pub fn block_packing_routes() -> Router { + Router::new().route("/v1/blocks/:block/packing", get(get_block_packing)) +} diff --git a/watch/src/block_packing/updater.rs b/watch/src/block_packing/updater.rs new file mode 100644 index 00000000000..215964901a6 --- /dev/null +++ b/watch/src/block_packing/updater.rs @@ -0,0 +1,211 @@ +use crate::database::{self, Error as DbError}; +use crate::updater::{Error, UpdateHandler}; + +use crate::block_packing::get_block_packing; + +use eth2::types::{Epoch, EthSpec}; +use log::{debug, error, warn}; + +const MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING: u64 = 50; + +impl UpdateHandler { + /// Forward fills the `block_packing` table starting from the entry with the + /// highest slot. + /// + /// It constructs a request to the `get_block_packing` API with: + /// `start_epoch` -> highest completely filled epoch + 1 (or epoch of lowest beacon block) + /// `end_epoch` -> epoch of highest beacon block + /// + /// It will resync the latest epoch if it is not fully filled. + /// That is, `if highest_filled_slot % slots_per_epoch != 31` + /// This means that if the last slot of an epoch is a skip slot, the whole epoch will be + //// resynced during the next head update. + /// + /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING`. + pub async fn fill_block_packing(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + + // Get the slot of the highest entry in the `block_packing` table. + let highest_filled_slot_opt = if self.config.block_packing { + database::get_highest_block_packing(&mut conn)?.map(|packing| packing.slot) + } else { + return Err(Error::NotEnabled("block_packing".to_string())); + }; + + let mut start_epoch = if let Some(highest_filled_slot) = highest_filled_slot_opt { + if highest_filled_slot.as_slot() % self.slots_per_epoch + == self.slots_per_epoch.saturating_sub(1) + { + // The whole epoch is filled so we can begin syncing the next one. + highest_filled_slot.as_slot().epoch(self.slots_per_epoch) + 1 + } else { + // The epoch is only partially synced. Try to sync it fully. + highest_filled_slot.as_slot().epoch(self.slots_per_epoch) + } + } else { + // No entries in the `block_packing` table. Use `beacon_blocks` instead. + if let Some(lowest_beacon_block) = database::get_lowest_beacon_block(&mut conn)? { + lowest_beacon_block + .slot + .as_slot() + .epoch(self.slots_per_epoch) + } else { + // There are no blocks in the database, do not fill the `block_packing` table. + warn!("Refusing to fill block packing as there are no blocks in the database"); + return Ok(()); + } + }; + + // The `get_block_packing` API endpoint cannot accept `start_epoch == 0`. + if start_epoch == 0 { + start_epoch += 1 + } + + if let Some(highest_block_slot) = + database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot.as_slot()) + { + let mut end_epoch = highest_block_slot.epoch(self.slots_per_epoch); + + if start_epoch > end_epoch { + debug!("Block packing is up to date with the head of the database"); + return Ok(()); + } + + // Ensure the size of the request does not exceed the maximum allowed value. + if start_epoch < end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING) { + end_epoch = start_epoch + MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING + } + + if let Some(lowest_block_slot) = + database::get_lowest_beacon_block(&mut conn)?.map(|block| block.slot.as_slot()) + { + let mut packing = get_block_packing(&self.bn, start_epoch, end_epoch).await?; + + // Since we pull a full epoch of data but are not guaranteed to have all blocks of + // that epoch available, only insert blocks with corresponding `beacon_block`s. + packing.retain(|packing| { + packing.slot.as_slot() >= lowest_block_slot + && packing.slot.as_slot() <= highest_block_slot + }); + database::insert_batch_block_packing(&mut conn, packing)?; + } else { + return Err(Error::Database(DbError::Other( + "Database did not return a lowest block when one exists".to_string(), + ))); + } + } else { + // There are no blocks in the `beacon_blocks` database, but there are entries in the + // `block_packing` table. This is a critical failure. It usually means someone has + // manually tampered with the database tables and should not occur during normal + // operation. + error!("Database is corrupted. Please re-sync the database"); + return Err(Error::Database(DbError::DatabaseCorrupted)); + } + + Ok(()) + } + + /// Backfill the `block_packing` table starting from the entry with the lowest slot. + /// + /// It constructs a request to the `get_block_packing` function with: + /// `start_epoch` -> epoch of lowest_beacon_block + /// `end_epoch` -> epoch of lowest filled `block_packing` - 1 (or epoch of highest beacon block) + /// + /// It will resync the lowest epoch if it is not fully filled. + /// That is, `if lowest_filled_slot % slots_per_epoch != 0` + /// This means that if the last slot of an epoch is a skip slot, the whole epoch will be + //// resynced during the next head update. + /// + /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING`. + pub async fn backfill_block_packing(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + let max_block_packing_backfill = self.config.max_backfill_size_epochs; + + // Get the slot of the lowest entry in the `block_packing` table. + let lowest_filled_slot_opt = if self.config.block_packing { + database::get_lowest_block_packing(&mut conn)?.map(|packing| packing.slot) + } else { + return Err(Error::NotEnabled("block_packing".to_string())); + }; + + let end_epoch = if let Some(lowest_filled_slot) = lowest_filled_slot_opt { + if lowest_filled_slot.as_slot() % self.slots_per_epoch == 0 { + lowest_filled_slot + .as_slot() + .epoch(self.slots_per_epoch) + .saturating_sub(Epoch::new(1)) + } else { + // The epoch is only partially synced. Try to sync it fully. + lowest_filled_slot.as_slot().epoch(self.slots_per_epoch) + } + } else { + // No entries in the `block_packing` table. Use `beacon_blocks` instead. + if let Some(highest_beacon_block) = + database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot) + { + highest_beacon_block.as_slot().epoch(self.slots_per_epoch) + } else { + // There are no blocks in the database, do not backfill the `block_packing` table. + warn!("Refusing to backfill block packing as there are no blocks in the database"); + return Ok(()); + } + }; + + if end_epoch <= 1 { + debug!("Block packing backfill is complete"); + return Ok(()); + } + + if let Some(lowest_block_slot) = + database::get_lowest_beacon_block(&mut conn)?.map(|block| block.slot.as_slot()) + { + let mut start_epoch = lowest_block_slot.epoch(self.slots_per_epoch); + + if start_epoch >= end_epoch { + debug!("Block packing is up to date with the base of the database"); + return Ok(()); + } + + // Ensure that the request range does not exceed `max_block_packing_backfill` or + // `MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING`. + if start_epoch < end_epoch.saturating_sub(max_block_packing_backfill) { + start_epoch = end_epoch.saturating_sub(max_block_packing_backfill) + } + if start_epoch < end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING) { + start_epoch = end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING) + } + + // The `block_packing` API cannot accept `start_epoch == 0`. + if start_epoch == 0 { + start_epoch += 1 + } + + if let Some(highest_block_slot) = + database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot.as_slot()) + { + let mut packing = get_block_packing(&self.bn, start_epoch, end_epoch).await?; + + // Only insert blocks with corresponding `beacon_block`s. + packing.retain(|packing| { + packing.slot.as_slot() >= lowest_block_slot + && packing.slot.as_slot() <= highest_block_slot + }); + + database::insert_batch_block_packing(&mut conn, packing)?; + } else { + return Err(Error::Database(DbError::Other( + "Database did not return a lowest block when one exists".to_string(), + ))); + } + } else { + // There are no blocks in the `beacon_blocks` database, but there are entries in the + // `block_packing` table. This is a critical failure. It usually means someone has + // manually tampered with the database tables and should not occur during normal + // operation. + error!("Database is corrupted. Please re-sync the database"); + return Err(Error::Database(DbError::DatabaseCorrupted)); + } + + Ok(()) + } +} diff --git a/watch/src/block_rewards/database.rs b/watch/src/block_rewards/database.rs new file mode 100644 index 00000000000..a2bf49f3e4d --- /dev/null +++ b/watch/src/block_rewards/database.rs @@ -0,0 +1,137 @@ +use crate::database::{ + schema::{beacon_blocks, block_rewards}, + watch_types::{WatchHash, WatchSlot}, + Error, PgConn, MAX_SIZE_BATCH_INSERT, +}; + +use diesel::prelude::*; +use diesel::{Insertable, Queryable}; +use log::debug; +use serde::{Deserialize, Serialize}; +use std::time::Instant; + +#[derive(Debug, Queryable, Insertable, Serialize, Deserialize)] +#[diesel(table_name = block_rewards)] +pub struct WatchBlockRewards { + pub slot: WatchSlot, + pub total: i32, + pub attestation_reward: i32, + pub sync_committee_reward: i32, +} + +/// Insert a batch of values into the `block_rewards` table. +/// +/// On a conflict, it will do nothing, leaving the old value. +pub fn insert_batch_block_rewards( + conn: &mut PgConn, + rewards: Vec, +) -> Result<(), Error> { + use self::block_rewards::dsl::*; + + let mut count = 0; + let timer = Instant::now(); + + for chunk in rewards.chunks(MAX_SIZE_BATCH_INSERT) { + count += diesel::insert_into(block_rewards) + .values(chunk) + .on_conflict_do_nothing() + .execute(conn)?; + } + + let time_taken = timer.elapsed(); + debug!("Block rewards inserted, count: {count}, time_taken: {time_taken:?}"); + Ok(()) +} + +/// Selects the row from the `block_rewards` table where `slot` is minimum. +pub fn get_lowest_block_rewards(conn: &mut PgConn) -> Result, Error> { + use self::block_rewards::dsl::*; + let timer = Instant::now(); + + let result = block_rewards + .order_by(slot.asc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Block rewards requested: lowest, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects the row from the `block_rewards` table where `slot` is maximum. +pub fn get_highest_block_rewards(conn: &mut PgConn) -> Result, Error> { + use self::block_rewards::dsl::*; + let timer = Instant::now(); + + let result = block_rewards + .order_by(slot.desc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Block rewards requested: highest, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row of the `block_rewards` table corresponding to a given `root_query`. +pub fn get_block_rewards_by_root( + conn: &mut PgConn, + root_query: WatchHash, +) -> Result, Error> { + use self::beacon_blocks::dsl::{beacon_blocks, root}; + use self::block_rewards::dsl::*; + let timer = Instant::now(); + + let join = beacon_blocks.inner_join(block_rewards); + + let result = join + .select((slot, total, attestation_reward, sync_committee_reward)) + .filter(root.eq(root_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Block rewards requested: {root_query}, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row of the `block_rewards` table corresponding to a given `slot_query`. +pub fn get_block_rewards_by_slot( + conn: &mut PgConn, + slot_query: WatchSlot, +) -> Result, Error> { + use self::block_rewards::dsl::*; + let timer = Instant::now(); + + let result = block_rewards + .filter(slot.eq(slot_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Block rewards requested: {slot_query}, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects `slot` from all rows of the `beacon_blocks` table which do not have a corresponding +/// row in `block_rewards`. +#[allow(dead_code)] +pub fn get_unknown_block_rewards(conn: &mut PgConn) -> Result>, Error> { + use self::beacon_blocks::dsl::{beacon_blocks, root, slot}; + use self::block_rewards::dsl::block_rewards; + + let join = beacon_blocks.left_join(block_rewards); + + let result = join + .select(slot) + .filter(root.is_null()) + // Block rewards cannot be retrieved for `slot == 0` so we need to exclude it. + .filter(slot.ne(0)) + .order_by(slot.desc()) + .nullable() + .load::>(conn)?; + + Ok(result) +} diff --git a/watch/src/block_rewards/mod.rs b/watch/src/block_rewards/mod.rs new file mode 100644 index 00000000000..0dac88ea58d --- /dev/null +++ b/watch/src/block_rewards/mod.rs @@ -0,0 +1,38 @@ +pub mod database; +mod server; +mod updater; + +use crate::database::watch_types::WatchSlot; +use crate::updater::error::Error; + +pub use database::{ + get_block_rewards_by_root, get_block_rewards_by_slot, get_highest_block_rewards, + get_lowest_block_rewards, get_unknown_block_rewards, insert_batch_block_rewards, + WatchBlockRewards, +}; +pub use server::block_rewards_routes; + +use eth2::BeaconNodeHttpClient; +use types::Slot; + +/// Sends a request to `lighthouse/analysis/block_rewards`. +/// Formats the response into a vector of `WatchBlockRewards`. +/// +/// Will fail if `start_slot == 0`. +pub async fn get_block_rewards( + bn: &BeaconNodeHttpClient, + start_slot: Slot, + end_slot: Slot, +) -> Result, Error> { + Ok(bn + .get_lighthouse_analysis_block_rewards(start_slot, end_slot) + .await? + .into_iter() + .map(|data| WatchBlockRewards { + slot: WatchSlot::from_slot(data.meta.slot), + total: data.total as i32, + attestation_reward: data.attestation_rewards.total as i32, + sync_committee_reward: data.sync_committee_rewards as i32, + }) + .collect()) +} diff --git a/watch/src/block_rewards/server.rs b/watch/src/block_rewards/server.rs new file mode 100644 index 00000000000..480346e25b3 --- /dev/null +++ b/watch/src/block_rewards/server.rs @@ -0,0 +1,31 @@ +use crate::block_rewards::database::{ + get_block_rewards_by_root, get_block_rewards_by_slot, WatchBlockRewards, +}; +use crate::database::{get_connection, PgPool, WatchHash, WatchSlot}; +use crate::server::Error; + +use axum::{extract::Path, routing::get, Extension, Json, Router}; +use eth2::types::BlockId; +use std::str::FromStr; + +pub async fn get_block_rewards( + Path(block_query): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = get_connection(&pool).map_err(Error::Database)?; + match BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)? { + BlockId::Root(root) => Ok(Json(get_block_rewards_by_root( + &mut conn, + WatchHash::from_hash(root), + )?)), + BlockId::Slot(slot) => Ok(Json(get_block_rewards_by_slot( + &mut conn, + WatchSlot::from_slot(slot), + )?)), + _ => Err(Error::BadRequest), + } +} + +pub fn block_rewards_routes() -> Router { + Router::new().route("/v1/blocks/:block/rewards", get(get_block_rewards)) +} diff --git a/watch/src/block_rewards/updater.rs b/watch/src/block_rewards/updater.rs new file mode 100644 index 00000000000..ad34b1f0785 --- /dev/null +++ b/watch/src/block_rewards/updater.rs @@ -0,0 +1,157 @@ +use crate::database::{self, Error as DbError}; +use crate::updater::{Error, UpdateHandler}; + +use crate::block_rewards::get_block_rewards; + +use eth2::types::EthSpec; +use log::{debug, error, warn}; + +const MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS: u64 = 1600; + +impl UpdateHandler { + /// Forward fills the `block_rewards` table starting from the entry with the + /// highest slot. + /// + /// It constructs a request to the `get_block_rewards` API with: + /// `start_slot` -> highest filled `block_rewards` + 1 (or lowest beacon block) + /// `end_slot` -> highest beacon block + /// + /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS`. + pub async fn fill_block_rewards(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + + // Get the slot of the highest entry in the `block_rewards` table. + let highest_filled_slot_opt = if self.config.block_rewards { + database::get_highest_block_rewards(&mut conn)?.map(|reward| reward.slot) + } else { + return Err(Error::NotEnabled("block_rewards".to_string())); + }; + + let mut start_slot = if let Some(highest_filled_slot) = highest_filled_slot_opt { + highest_filled_slot.as_slot() + 1 + } else { + // No entries in the `block_rewards` table. Use `beacon_blocks` instead. + if let Some(lowest_beacon_block) = + database::get_lowest_beacon_block(&mut conn)?.map(|block| block.slot) + { + lowest_beacon_block.as_slot() + } else { + // There are no blocks in the database, do not fill the `block_rewards` table. + warn!("Refusing to fill block rewards as there are no blocks in the database"); + return Ok(()); + } + }; + + // The `block_rewards` API cannot accept `start_slot == 0`. + if start_slot == 0 { + start_slot += 1; + } + + if let Some(highest_beacon_block) = + database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot) + { + let mut end_slot = highest_beacon_block.as_slot(); + + if start_slot > end_slot { + debug!("Block rewards are up to date with the head of the database"); + return Ok(()); + } + + // Ensure the size of the request does not exceed the maximum allowed value. + if start_slot < end_slot.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS) { + end_slot = start_slot + MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS + } + + let rewards = get_block_rewards(&self.bn, start_slot, end_slot).await?; + database::insert_batch_block_rewards(&mut conn, rewards)?; + } else { + // There are no blocks in the `beacon_blocks` database, but there are entries in the + // `block_rewards` table. This is a critical failure. It usually means someone has + // manually tampered with the database tables and should not occur during normal + // operation. + error!("Database is corrupted. Please re-sync the database"); + return Err(Error::Database(DbError::DatabaseCorrupted)); + } + + Ok(()) + } + + /// Backfill the `block_rewards` tables starting from the entry with the + /// lowest slot. + /// + /// It constructs a request to the `get_block_rewards` API with: + /// `start_slot` -> lowest_beacon_block + /// `end_slot` -> lowest filled `block_rewards` - 1 (or highest beacon block) + /// + /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS`. + pub async fn backfill_block_rewards(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + let max_block_reward_backfill = self.config.max_backfill_size_epochs * self.slots_per_epoch; + + // Get the slot of the lowest entry in the `block_rewards` table. + let lowest_filled_slot_opt = if self.config.block_rewards { + database::get_lowest_block_rewards(&mut conn)?.map(|reward| reward.slot) + } else { + return Err(Error::NotEnabled("block_rewards".to_string())); + }; + + let end_slot = if let Some(lowest_filled_slot) = lowest_filled_slot_opt { + lowest_filled_slot.as_slot().saturating_sub(1_u64) + } else { + // No entries in the `block_rewards` table. Use `beacon_blocks` instead. + if let Some(highest_beacon_block) = + database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot) + { + highest_beacon_block.as_slot() + } else { + // There are no blocks in the database, do not backfill the `block_rewards` table. + warn!("Refusing to backfill block rewards as there are no blocks in the database"); + return Ok(()); + } + }; + + if end_slot <= 1 { + debug!("Block rewards backfill is complete"); + return Ok(()); + } + + if let Some(lowest_block_slot) = database::get_lowest_beacon_block(&mut conn)? { + let mut start_slot = lowest_block_slot.slot.as_slot(); + + if start_slot >= end_slot { + debug!("Block rewards are up to date with the base of the database"); + return Ok(()); + } + + // Ensure that the request range does not exceed `max_block_reward_backfill` or + // `MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS`. + if start_slot < end_slot.saturating_sub(max_block_reward_backfill) { + start_slot = end_slot.saturating_sub(max_block_reward_backfill) + } + + if start_slot < end_slot.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS) { + start_slot = end_slot.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS) + } + + // The `block_rewards` API cannot accept `start_slot == 0`. + if start_slot == 0 { + start_slot += 1 + } + + let rewards = get_block_rewards(&self.bn, start_slot, end_slot).await?; + + if self.config.block_rewards { + database::insert_batch_block_rewards(&mut conn, rewards)?; + } + } else { + // There are no blocks in the `beacon_blocks` database, but there are entries in the + // `block_rewards` table. This is a critical failure. It usually means someone has + // manually tampered with the database tables and should not occur during normal + // operation. + error!("Database is corrupted. Please re-sync the database"); + return Err(Error::Database(DbError::DatabaseCorrupted)); + } + + Ok(()) + } +} diff --git a/watch/src/blockprint/config.rs b/watch/src/blockprint/config.rs new file mode 100644 index 00000000000..721fa7cb197 --- /dev/null +++ b/watch/src/blockprint/config.rs @@ -0,0 +1,40 @@ +use serde::{Deserialize, Serialize}; + +pub const fn enabled() -> bool { + false +} + +pub const fn url() -> Option { + None +} + +pub const fn username() -> Option { + None +} + +pub const fn password() -> Option { + None +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Config { + #[serde(default = "enabled")] + pub enabled: bool, + #[serde(default = "url")] + pub url: Option, + #[serde(default = "username")] + pub username: Option, + #[serde(default = "password")] + pub password: Option, +} + +impl Default for Config { + fn default() -> Self { + Config { + enabled: enabled(), + url: url(), + username: username(), + password: password(), + } + } +} diff --git a/watch/src/blockprint/database.rs b/watch/src/blockprint/database.rs new file mode 100644 index 00000000000..afa35c81b63 --- /dev/null +++ b/watch/src/blockprint/database.rs @@ -0,0 +1,224 @@ +use crate::database::{ + self, + schema::{beacon_blocks, blockprint}, + watch_types::{WatchHash, WatchSlot}, + Error, PgConn, MAX_SIZE_BATCH_INSERT, +}; + +use diesel::prelude::*; +use diesel::sql_types::{Integer, Text}; +use diesel::{Insertable, Queryable}; +use log::debug; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use std::time::Instant; + +type WatchConsensusClient = String; +pub fn list_consensus_clients() -> Vec { + vec![ + "Lighthouse".to_string(), + "Lodestar".to_string(), + "Nimbus".to_string(), + "Prysm".to_string(), + "Teku".to_string(), + "Unknown".to_string(), + ] +} + +#[derive(Debug, Queryable, Insertable, Serialize, Deserialize)] +#[diesel(table_name = blockprint)] +pub struct WatchBlockprint { + pub slot: WatchSlot, + pub best_guess: WatchConsensusClient, +} + +#[derive(Debug, QueryableByName, diesel::FromSqlRow)] +pub struct WatchValidatorBlockprint { + #[diesel(sql_type = Integer)] + pub proposer_index: i32, + #[diesel(sql_type = Text)] + pub best_guess: WatchConsensusClient, + #[diesel(sql_type = Integer)] + pub slot: WatchSlot, +} + +/// Insert a batch of values into the `blockprint` table. +/// +/// On a conflict, it will do nothing, leaving the old value. +pub fn insert_batch_blockprint( + conn: &mut PgConn, + prints: Vec, +) -> Result<(), Error> { + use self::blockprint::dsl::*; + + let mut count = 0; + let timer = Instant::now(); + + for chunk in prints.chunks(MAX_SIZE_BATCH_INSERT) { + count += diesel::insert_into(blockprint) + .values(chunk) + .on_conflict_do_nothing() + .execute(conn)?; + } + + let time_taken = timer.elapsed(); + debug!("Blockprint inserted, count: {count}, time_taken: {time_taken:?}"); + Ok(()) +} + +/// Selects the row from the `blockprint` table where `slot` is minimum. +pub fn get_lowest_blockprint(conn: &mut PgConn) -> Result, Error> { + use self::blockprint::dsl::*; + let timer = Instant::now(); + + let result = blockprint + .order_by(slot.asc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Blockprint requested: lowest, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects the row from the `blockprint` table where `slot` is maximum. +pub fn get_highest_blockprint(conn: &mut PgConn) -> Result, Error> { + use self::blockprint::dsl::*; + let timer = Instant::now(); + + let result = blockprint + .order_by(slot.desc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Blockprint requested: highest, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row of the `blockprint` table corresponding to a given `root_query`. +pub fn get_blockprint_by_root( + conn: &mut PgConn, + root_query: WatchHash, +) -> Result, Error> { + use self::beacon_blocks::dsl::{beacon_blocks, root}; + use self::blockprint::dsl::*; + let timer = Instant::now(); + + let join = beacon_blocks.inner_join(blockprint); + + let result = join + .select((slot, best_guess)) + .filter(root.eq(root_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Blockprint requested: {root_query}, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row of the `blockprint` table corresponding to a given `slot_query`. +pub fn get_blockprint_by_slot( + conn: &mut PgConn, + slot_query: WatchSlot, +) -> Result, Error> { + use self::blockprint::dsl::*; + let timer = Instant::now(); + + let result = blockprint + .filter(slot.eq(slot_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Blockprint requested: {slot_query}, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects `slot` from all rows of the `beacon_blocks` table which do not have a corresponding +/// row in `blockprint`. +#[allow(dead_code)] +pub fn get_unknown_blockprint(conn: &mut PgConn) -> Result>, Error> { + use self::beacon_blocks::dsl::{beacon_blocks, root, slot}; + use self::blockprint::dsl::blockprint; + + let join = beacon_blocks.left_join(blockprint); + + let result = join + .select(slot) + .filter(root.is_null()) + .order_by(slot.desc()) + .nullable() + .load::>(conn)?; + + Ok(result) +} + +/// Constructs a HashMap of `index` -> `best_guess` for each validator's latest proposal at or before +/// `target_slot`. +/// Inserts `"Unknown" if no prior proposals exist. +pub fn construct_validator_blockprints_at_slot( + conn: &mut PgConn, + target_slot: WatchSlot, + slots_per_epoch: u64, +) -> Result, Error> { + use self::blockprint::dsl::{blockprint, slot}; + + let total_validators = + database::count_validators_activated_before_slot(conn, target_slot, slots_per_epoch)? + as usize; + + let mut blockprint_map = HashMap::with_capacity(total_validators); + + let latest_proposals = + database::get_all_validators_latest_proposer_info_at_slot(conn, target_slot)?; + + let latest_proposal_slots: Vec = latest_proposals.clone().into_keys().collect(); + + let result = blockprint + .filter(slot.eq_any(latest_proposal_slots)) + .load::(conn)?; + + // Insert the validators which have available blockprints. + for print in result { + if let Some(proposer) = latest_proposals.get(&print.slot) { + blockprint_map.insert(*proposer, print.best_guess); + } + } + + // Insert the rest of the unknown validators. + for validator_index in 0..total_validators { + blockprint_map + .entry(validator_index as i32) + .or_insert_with(|| "Unknown".to_string()); + } + + Ok(blockprint_map) +} + +/// Counts the number of occurances of each `client` present in the `validators` table at or before some +/// `target_slot`. +pub fn get_validators_clients_at_slot( + conn: &mut PgConn, + target_slot: WatchSlot, + slots_per_epoch: u64, +) -> Result, Error> { + let mut client_map: HashMap = HashMap::new(); + + // This includes all validators which were activated at or before `target_slot`. + let validator_blockprints = + construct_validator_blockprints_at_slot(conn, target_slot, slots_per_epoch)?; + + for client in list_consensus_clients() { + let count = validator_blockprints + .iter() + .filter(|(_, v)| (*v).clone() == client) + .count(); + client_map.insert(client, count); + } + + Ok(client_map) +} diff --git a/watch/src/blockprint/mod.rs b/watch/src/blockprint/mod.rs new file mode 100644 index 00000000000..b8107e5bf58 --- /dev/null +++ b/watch/src/blockprint/mod.rs @@ -0,0 +1,149 @@ +pub mod database; +pub mod server; +pub mod updater; + +mod config; + +use crate::database::WatchSlot; + +use eth2::SensitiveUrl; +use reqwest::{Client, Response, Url}; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use std::time::Duration; +use types::Slot; + +pub use config::Config; +pub use database::{ + get_blockprint_by_root, get_blockprint_by_slot, get_highest_blockprint, get_lowest_blockprint, + get_unknown_blockprint, get_validators_clients_at_slot, insert_batch_blockprint, + list_consensus_clients, WatchBlockprint, +}; +pub use server::blockprint_routes; + +const TIMEOUT: Duration = Duration::from_secs(50); + +#[derive(Debug)] +pub enum Error { + Reqwest(reqwest::Error), + Url(url::ParseError), + BlockprintNotSynced, + Other(String), +} + +impl From for Error { + fn from(e: reqwest::Error) -> Self { + Error::Reqwest(e) + } +} + +impl From for Error { + fn from(e: url::ParseError) -> Self { + Error::Url(e) + } +} + +pub struct WatchBlockprintClient { + pub client: Client, + pub server: SensitiveUrl, + pub username: Option, + pub password: Option, +} + +#[derive(Debug, Deserialize, Serialize)] +pub struct BlockprintSyncingResponse { + pub greatest_block_slot: Slot, + pub synced: bool, +} + +#[derive(Debug, Deserialize, Serialize)] +pub struct BlockprintResponse { + pub proposer_index: i32, + pub slot: Slot, + pub best_guess_single: String, +} + +impl WatchBlockprintClient { + async fn get(&self, url: Url) -> Result { + let mut builder = self.client.get(url).timeout(TIMEOUT); + if let Some(username) = &self.username { + builder = builder.basic_auth(username, self.password.as_ref()); + } + let response = builder.send().await.map_err(Error::Reqwest)?; + + if !response.status().is_success() { + return Err(Error::Other(response.text().await?)); + } + + Ok(response) + } + + // Returns the `greatest_block_slot` as reported by the Blockprint server. + // Will error if the Blockprint server is not synced. + #[allow(dead_code)] + pub async fn ensure_synced(&self) -> Result { + let url = self.server.full.join("sync/")?.join("status")?; + + let response = self.get(url).await?; + + let result = response.json::().await?; + if !result.synced { + return Err(Error::BlockprintNotSynced); + } + + Ok(result.greatest_block_slot) + } + + // Pulls the latest blockprint for all validators. + #[allow(dead_code)] + pub async fn blockprint_all_validators( + &self, + highest_validator: i32, + ) -> Result, Error> { + let url = self + .server + .full + .join("validator/")? + .join("blocks/")? + .join("latest")?; + + let response = self.get(url).await?; + + let mut result = response.json::>().await?; + result.retain(|print| print.proposer_index <= highest_validator); + + let mut map: HashMap = HashMap::with_capacity(result.len()); + for print in result { + map.insert(print.proposer_index, print.best_guess_single); + } + + Ok(map) + } + + // Construct a request to the Blockprint server for a range of slots between `start_slot` and + // `end_slot`. + pub async fn get_blockprint( + &self, + start_slot: Slot, + end_slot: Slot, + ) -> Result, Error> { + let url = self + .server + .full + .join("blocks/")? + .join(&format!("{start_slot}/{end_slot}"))?; + + let response = self.get(url).await?; + + let result = response + .json::>() + .await? + .iter() + .map(|response| WatchBlockprint { + slot: WatchSlot::from_slot(response.slot), + best_guess: response.best_guess_single.clone(), + }) + .collect(); + Ok(result) + } +} diff --git a/watch/src/blockprint/server.rs b/watch/src/blockprint/server.rs new file mode 100644 index 00000000000..488af157174 --- /dev/null +++ b/watch/src/blockprint/server.rs @@ -0,0 +1,31 @@ +use crate::blockprint::database::{ + get_blockprint_by_root, get_blockprint_by_slot, WatchBlockprint, +}; +use crate::database::{get_connection, PgPool, WatchHash, WatchSlot}; +use crate::server::Error; + +use axum::{extract::Path, routing::get, Extension, Json, Router}; +use eth2::types::BlockId; +use std::str::FromStr; + +pub async fn get_blockprint( + Path(block_query): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = get_connection(&pool).map_err(Error::Database)?; + match BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)? { + BlockId::Root(root) => Ok(Json(get_blockprint_by_root( + &mut conn, + WatchHash::from_hash(root), + )?)), + BlockId::Slot(slot) => Ok(Json(get_blockprint_by_slot( + &mut conn, + WatchSlot::from_slot(slot), + )?)), + _ => Err(Error::BadRequest), + } +} + +pub fn blockprint_routes() -> Router { + Router::new().route("/v1/blocks/:block/blockprint", get(get_blockprint)) +} diff --git a/watch/src/blockprint/updater.rs b/watch/src/blockprint/updater.rs new file mode 100644 index 00000000000..28c3184556c --- /dev/null +++ b/watch/src/blockprint/updater.rs @@ -0,0 +1,172 @@ +use crate::database::{self, Error as DbError}; +use crate::updater::{Error, UpdateHandler}; + +use eth2::types::EthSpec; +use log::{debug, error, warn}; + +const MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT: u64 = 1600; + +impl UpdateHandler { + /// Forward fills the `blockprint` table starting from the entry with the + /// highest slot. + /// + /// It constructs a request to the `get_blockprint` API with: + /// `start_slot` -> highest filled `blockprint` + 1 (or lowest beacon block) + /// `end_slot` -> highest beacon block + /// + /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT`. + pub async fn fill_blockprint(&mut self) -> Result<(), Error> { + // Ensure blockprint in enabled. + if let Some(blockprint_client) = &self.blockprint { + let mut conn = database::get_connection(&self.pool)?; + + // Get the slot of the highest entry in the `blockprint` table. + let mut start_slot = if let Some(highest_filled_slot) = + database::get_highest_blockprint(&mut conn)?.map(|print| print.slot) + { + highest_filled_slot.as_slot() + 1 + } else { + // No entries in the `blockprint` table. Use `beacon_blocks` instead. + if let Some(lowest_beacon_block) = + database::get_lowest_beacon_block(&mut conn)?.map(|block| block.slot) + { + lowest_beacon_block.as_slot() + } else { + // There are no blocks in the database, do not fill the `blockprint` table. + warn!("Refusing to fill blockprint as there are no blocks in the database"); + return Ok(()); + } + }; + + // The `blockprint` API cannot accept `start_slot == 0`. + if start_slot == 0 { + start_slot += 1; + } + + if let Some(highest_beacon_block) = + database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot) + { + let mut end_slot = highest_beacon_block.as_slot(); + + if start_slot > end_slot { + debug!("Blockprint is up to date with the head of the database"); + return Ok(()); + } + + // Ensure the size of the request does not exceed the maximum allowed value. + if start_slot < end_slot.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT) { + end_slot = start_slot + MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT + } + + let mut prints = blockprint_client + .get_blockprint(start_slot, end_slot) + .await?; + + // Ensure the prints returned from blockprint are for slots which exist in the + // `beacon_blocks` table. + prints.retain(|print| { + database::get_beacon_block_by_slot(&mut conn, print.slot) + .ok() + .flatten() + .is_some() + }); + + database::insert_batch_blockprint(&mut conn, prints)?; + } else { + // There are no blocks in the `beacon_blocks` database, but there are entries in either + // `blockprint` table. This is a critical failure. It usually means + // someone has manually tampered with the database tables and should not occur during + // normal operation. + error!("Database is corrupted. Please re-sync the database"); + return Err(Error::Database(DbError::DatabaseCorrupted)); + } + } + + Ok(()) + } + + /// Backfill the `blockprint` table starting from the entry with the lowest slot. + /// + /// It constructs a request to the `get_blockprint` API with: + /// `start_slot` -> lowest_beacon_block + /// `end_slot` -> lowest filled `blockprint` - 1 (or highest beacon block) + /// + /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT`. + pub async fn backfill_blockprint(&mut self) -> Result<(), Error> { + // Ensure blockprint in enabled. + if let Some(blockprint_client) = &self.blockprint { + let mut conn = database::get_connection(&self.pool)?; + let max_blockprint_backfill = + self.config.max_backfill_size_epochs * self.slots_per_epoch; + + // Get the slot of the lowest entry in the `blockprint` table. + let end_slot = if let Some(lowest_filled_slot) = + database::get_lowest_blockprint(&mut conn)?.map(|print| print.slot) + { + lowest_filled_slot.as_slot().saturating_sub(1_u64) + } else { + // No entries in the `blockprint` table. Use `beacon_blocks` instead. + if let Some(highest_beacon_block) = + database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot) + { + highest_beacon_block.as_slot() + } else { + // There are no blocks in the database, do not backfill the `blockprint` table. + warn!("Refusing to backfill blockprint as there are no blocks in the database"); + return Ok(()); + } + }; + + if end_slot <= 1 { + debug!("Blockprint backfill is complete"); + return Ok(()); + } + + if let Some(lowest_block_slot) = database::get_lowest_beacon_block(&mut conn)? { + let mut start_slot = lowest_block_slot.slot.as_slot(); + + if start_slot >= end_slot { + debug!("Blockprint are up to date with the base of the database"); + return Ok(()); + } + + // Ensure that the request range does not exceed `max_blockprint_backfill` or + // `MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT`. + if start_slot < end_slot.saturating_sub(max_blockprint_backfill) { + start_slot = end_slot.saturating_sub(max_blockprint_backfill) + } + + if start_slot < end_slot.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT) { + start_slot = end_slot.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT) + } + + // The `blockprint` API cannot accept `start_slot == 0`. + if start_slot == 0 { + start_slot += 1 + } + + let mut prints = blockprint_client + .get_blockprint(start_slot, end_slot) + .await?; + + // Ensure the prints returned from blockprint are for slots which exist in the + // `beacon_blocks` table. + prints.retain(|print| { + database::get_beacon_block_by_slot(&mut conn, print.slot) + .ok() + .flatten() + .is_some() + }); + + database::insert_batch_blockprint(&mut conn, prints)?; + } else { + // There are no blocks in the `beacon_blocks` database, but there are entries in the `blockprint` + // table. This is a critical failure. It usually means someone has manually tampered with the + // database tables and should not occur during normal operation. + error!("Database is corrupted. Please re-sync the database"); + return Err(Error::Database(DbError::DatabaseCorrupted)); + } + } + Ok(()) + } +} diff --git a/watch/src/cli.rs b/watch/src/cli.rs new file mode 100644 index 00000000000..a8e5f3716fe --- /dev/null +++ b/watch/src/cli.rs @@ -0,0 +1,55 @@ +use crate::{config::Config, logger, server, updater}; +use clap::{App, Arg}; +use tokio::sync::oneshot; + +pub const SERVE: &str = "serve"; +pub const RUN_UPDATER: &str = "run-updater"; +pub const CONFIG: &str = "config"; + +fn run_updater<'a, 'b>() -> App<'a, 'b> { + App::new(RUN_UPDATER).setting(clap::AppSettings::ColoredHelp) +} + +fn serve<'a, 'b>() -> App<'a, 'b> { + App::new(SERVE).setting(clap::AppSettings::ColoredHelp) +} + +pub fn app<'a, 'b>() -> App<'a, 'b> { + App::new("beacon_watch_daemon") + .author("Sigma Prime ") + .setting(clap::AppSettings::ColoredHelp) + .arg( + Arg::with_name(CONFIG) + .long(CONFIG) + .value_name("PATH_TO_CONFIG") + .help("Path to configuration file") + .takes_value(true) + .global(true), + ) + .subcommand(run_updater()) + .subcommand(serve()) +} + +pub async fn run() -> Result<(), String> { + let matches = app().get_matches(); + + let config = match matches.value_of(CONFIG) { + Some(path) => Config::load_from_file(path.to_string())?, + None => Config::default(), + }; + + logger::init_logger(&config.log_level); + + match matches.subcommand() { + (RUN_UPDATER, Some(_)) => updater::run_updater(config) + .await + .map_err(|e| format!("Failure: {:?}", e)), + (SERVE, Some(_)) => { + let (_shutdown_tx, shutdown_rx) = oneshot::channel(); + server::serve(config, shutdown_rx) + .await + .map_err(|e| format!("Failure: {:?}", e)) + } + _ => Err("Unsupported subcommand. See --help".into()), + } +} diff --git a/watch/src/client.rs b/watch/src/client.rs new file mode 100644 index 00000000000..43aaccde343 --- /dev/null +++ b/watch/src/client.rs @@ -0,0 +1,178 @@ +use crate::block_packing::WatchBlockPacking; +use crate::block_rewards::WatchBlockRewards; +use crate::database::models::{ + WatchBeaconBlock, WatchCanonicalSlot, WatchProposerInfo, WatchValidator, +}; +use crate::suboptimal_attestations::WatchAttestation; + +use eth2::types::BlockId; +use reqwest::Client; +use serde::de::DeserializeOwned; +use types::Hash256; +use url::Url; + +#[derive(Debug)] +pub enum Error { + Reqwest(reqwest::Error), + Url(url::ParseError), +} + +impl From for Error { + fn from(e: reqwest::Error) -> Self { + Error::Reqwest(e) + } +} + +impl From for Error { + fn from(e: url::ParseError) -> Self { + Error::Url(e) + } +} + +pub struct WatchHttpClient { + pub client: Client, + pub server: Url, +} + +impl WatchHttpClient { + async fn get_opt(&self, url: Url) -> Result, Error> { + let response = self.client.get(url).send().await?; + + if response.status() == 404 { + Ok(None) + } else { + response + .error_for_status()? + .json() + .await + .map_err(Into::into) + } + } + + pub async fn get_beacon_blocks( + &self, + block_id: BlockId, + ) -> Result, Error> { + let url = self + .server + .join("v1/")? + .join("blocks/")? + .join(&block_id.to_string())?; + + self.get_opt(url).await + } + + pub async fn get_lowest_canonical_slot(&self) -> Result, Error> { + let url = self.server.join("v1/")?.join("slots/")?.join("lowest")?; + + self.get_opt(url).await + } + + pub async fn get_highest_canonical_slot(&self) -> Result, Error> { + let url = self.server.join("v1/")?.join("slots/")?.join("highest")?; + + self.get_opt(url).await + } + + pub async fn get_lowest_beacon_block(&self) -> Result, Error> { + let url = self.server.join("v1/")?.join("blocks/")?.join("lowest")?; + + self.get_opt(url).await + } + + pub async fn get_highest_beacon_block(&self) -> Result, Error> { + let url = self.server.join("v1/")?.join("blocks/")?.join("highest")?; + + self.get_opt(url).await + } + + pub async fn get_next_beacon_block( + &self, + parent: Hash256, + ) -> Result, Error> { + let url = self + .server + .join("v1/")? + .join("blocks/")? + .join(&format!("{parent:?}/"))? + .join("next")?; + + self.get_opt(url).await + } + + pub async fn get_validator_by_index( + &self, + index: i32, + ) -> Result, Error> { + let url = self + .server + .join("v1/")? + .join("validators/")? + .join(&format!("{index}"))?; + + self.get_opt(url).await + } + + pub async fn get_proposer_info( + &self, + block_id: BlockId, + ) -> Result, Error> { + let url = self + .server + .join("v1/")? + .join("blocks/")? + .join(&format!("{block_id}/"))? + .join("proposer")?; + + self.get_opt(url).await + } + + pub async fn get_block_reward( + &self, + block_id: BlockId, + ) -> Result, Error> { + let url = self + .server + .join("v1/")? + .join("blocks/")? + .join(&format!("{block_id}/"))? + .join("rewards")?; + + self.get_opt(url).await + } + + pub async fn get_block_packing( + &self, + block_id: BlockId, + ) -> Result, Error> { + let url = self + .server + .join("v1/")? + .join("blocks/")? + .join(&format!("{block_id}/"))? + .join("packing")?; + + self.get_opt(url).await + } + + pub async fn get_all_validators(&self) -> Result>, Error> { + let url = self.server.join("v1/")?.join("validators/")?.join("all")?; + + self.get_opt(url).await + } + + pub async fn get_attestations( + &self, + epoch: i32, + ) -> Result>, Error> { + let url = self + .server + .join("v1/")? + .join("validators/")? + .join("all/")? + .join("attestation/")? + .join(&format!("{epoch}"))?; + + self.get_opt(url).await + } +} diff --git a/watch/src/config.rs b/watch/src/config.rs new file mode 100644 index 00000000000..4e61f9df9ca --- /dev/null +++ b/watch/src/config.rs @@ -0,0 +1,50 @@ +use crate::blockprint::Config as BlockprintConfig; +use crate::database::Config as DatabaseConfig; +use crate::server::Config as ServerConfig; +use crate::updater::Config as UpdaterConfig; + +use serde::{Deserialize, Serialize}; +use std::fs::File; + +pub const LOG_LEVEL: &str = "debug"; + +fn log_level() -> String { + LOG_LEVEL.to_string() +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Config { + #[serde(default)] + pub blockprint: BlockprintConfig, + #[serde(default)] + pub database: DatabaseConfig, + #[serde(default)] + pub server: ServerConfig, + #[serde(default)] + pub updater: UpdaterConfig, + /// The minimum severity for log messages. + #[serde(default = "log_level")] + pub log_level: String, +} + +impl Default for Config { + fn default() -> Self { + Self { + blockprint: BlockprintConfig::default(), + database: DatabaseConfig::default(), + server: ServerConfig::default(), + updater: UpdaterConfig::default(), + log_level: log_level(), + } + } +} + +impl Config { + pub fn load_from_file(path_to_file: String) -> Result { + let file = + File::open(path_to_file).map_err(|e| format!("Error reading config file: {:?}", e))?; + let config: Config = serde_yaml::from_reader(file) + .map_err(|e| format!("Error parsing config file: {:?}", e))?; + Ok(config) + } +} diff --git a/watch/src/database/compat.rs b/watch/src/database/compat.rs new file mode 100644 index 00000000000..b8cda0b2168 --- /dev/null +++ b/watch/src/database/compat.rs @@ -0,0 +1,49 @@ +//! Implementations of PostgreSQL compatibility traits. +use crate::database::watch_types::{WatchHash, WatchPK, WatchSlot}; +use diesel::deserialize::{self, FromSql}; +use diesel::pg::{Pg, PgValue}; +use diesel::serialize::{self, Output, ToSql}; +use diesel::sql_types::{Binary, Integer}; + +use std::convert::TryFrom; + +macro_rules! impl_to_from_sql_int { + ($type:ty) => { + impl ToSql for $type + where + i32: ToSql, + { + fn to_sql<'a>(&'a self, out: &mut Output<'a, '_, Pg>) -> serialize::Result { + let v = i32::try_from(self.as_u64()).map_err(|e| Box::new(e))?; + >::to_sql(&v, &mut out.reborrow()) + } + } + + impl FromSql for $type { + fn from_sql(bytes: PgValue<'_>) -> deserialize::Result { + Ok(Self::new(i32::from_sql(bytes)? as u64)) + } + } + }; +} + +macro_rules! impl_to_from_sql_binary { + ($type:ty) => { + impl ToSql for $type { + fn to_sql<'a>(&'a self, out: &mut Output<'a, '_, Pg>) -> serialize::Result { + let b = self.as_bytes(); + <&[u8] as ToSql>::to_sql(&b, &mut out.reborrow()) + } + } + + impl FromSql for $type { + fn from_sql(bytes: PgValue<'_>) -> deserialize::Result { + Self::from_bytes(bytes.as_bytes()).map_err(|e| e.to_string().into()) + } + } + }; +} + +impl_to_from_sql_int!(WatchSlot); +impl_to_from_sql_binary!(WatchHash); +impl_to_from_sql_binary!(WatchPK); diff --git a/watch/src/database/config.rs b/watch/src/database/config.rs new file mode 100644 index 00000000000..dc0c70832f4 --- /dev/null +++ b/watch/src/database/config.rs @@ -0,0 +1,74 @@ +use serde::{Deserialize, Serialize}; + +pub const USER: &str = "postgres"; +pub const PASSWORD: &str = "postgres"; +pub const DBNAME: &str = "dev"; +pub const DEFAULT_DBNAME: &str = "postgres"; +pub const HOST: &str = "localhost"; +pub const fn port() -> u16 { + 5432 +} +pub const fn connect_timeout_millis() -> u64 { + 2_000 // 2s +} + +fn user() -> String { + USER.to_string() +} + +fn password() -> String { + PASSWORD.to_string() +} + +fn dbname() -> String { + DBNAME.to_string() +} + +fn default_dbname() -> String { + DEFAULT_DBNAME.to_string() +} + +fn host() -> String { + HOST.to_string() +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Config { + #[serde(default = "user")] + pub user: String, + #[serde(default = "password")] + pub password: String, + #[serde(default = "dbname")] + pub dbname: String, + #[serde(default = "default_dbname")] + pub default_dbname: String, + #[serde(default = "host")] + pub host: String, + #[serde(default = "port")] + pub port: u16, + #[serde(default = "connect_timeout_millis")] + pub connect_timeout_millis: u64, +} + +impl Default for Config { + fn default() -> Self { + Self { + user: user(), + password: password(), + dbname: dbname(), + default_dbname: default_dbname(), + host: host(), + port: port(), + connect_timeout_millis: connect_timeout_millis(), + } + } +} + +impl Config { + pub fn build_database_url(&self) -> String { + format!( + "postgres://{}:{}@{}:{}/{}", + self.user, self.password, self.host, self.port, self.dbname + ) + } +} diff --git a/watch/src/database/error.rs b/watch/src/database/error.rs new file mode 100644 index 00000000000..8c5088fa133 --- /dev/null +++ b/watch/src/database/error.rs @@ -0,0 +1,55 @@ +use bls::Error as BlsError; +use diesel::result::{ConnectionError, Error as PgError}; +use eth2::SensitiveError; +use r2d2::Error as PoolError; +use std::fmt; +use types::BeaconStateError; + +#[derive(Debug)] +pub enum Error { + BeaconState(BeaconStateError), + Database(PgError), + DatabaseCorrupted, + InvalidSig(BlsError), + PostgresConnection(ConnectionError), + Pool(PoolError), + SensitiveUrl(SensitiveError), + InvalidRoot, + Other(String), +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +impl From for Error { + fn from(e: BeaconStateError) -> Self { + Error::BeaconState(e) + } +} + +impl From for Error { + fn from(e: ConnectionError) -> Self { + Error::PostgresConnection(e) + } +} + +impl From for Error { + fn from(e: PgError) -> Self { + Error::Database(e) + } +} + +impl From for Error { + fn from(e: PoolError) -> Self { + Error::Pool(e) + } +} + +impl From for Error { + fn from(e: BlsError) -> Self { + Error::InvalidSig(e) + } +} diff --git a/watch/src/database/mod.rs b/watch/src/database/mod.rs new file mode 100644 index 00000000000..b9a7a900a59 --- /dev/null +++ b/watch/src/database/mod.rs @@ -0,0 +1,782 @@ +mod config; +mod error; + +pub mod compat; +pub mod models; +pub mod schema; +pub mod utils; +pub mod watch_types; + +use self::schema::{ + active_config, beacon_blocks, canonical_slots, proposer_info, suboptimal_attestations, + validators, +}; + +use diesel::dsl::max; +use diesel::pg::PgConnection; +use diesel::prelude::*; +use diesel::r2d2::{Builder, ConnectionManager, Pool, PooledConnection}; +use diesel::upsert::excluded; +use log::{debug, info}; +use std::collections::HashMap; +use std::time::Instant; +use types::{EthSpec, SignedBeaconBlock}; + +pub use self::error::Error; +pub use self::models::{WatchBeaconBlock, WatchCanonicalSlot, WatchProposerInfo, WatchValidator}; +pub use self::watch_types::{WatchHash, WatchPK, WatchSlot}; + +pub use crate::block_rewards::{ + get_block_rewards_by_root, get_block_rewards_by_slot, get_highest_block_rewards, + get_lowest_block_rewards, get_unknown_block_rewards, insert_batch_block_rewards, + WatchBlockRewards, +}; + +pub use crate::block_packing::{ + get_block_packing_by_root, get_block_packing_by_slot, get_highest_block_packing, + get_lowest_block_packing, get_unknown_block_packing, insert_batch_block_packing, + WatchBlockPacking, +}; + +pub use crate::suboptimal_attestations::{ + get_all_suboptimal_attestations_for_epoch, get_attestation_by_index, get_attestation_by_pubkey, + get_highest_attestation, get_lowest_attestation, insert_batch_suboptimal_attestations, + WatchAttestation, WatchSuboptimalAttestation, +}; + +pub use crate::blockprint::{ + get_blockprint_by_root, get_blockprint_by_slot, get_highest_blockprint, get_lowest_blockprint, + get_unknown_blockprint, get_validators_clients_at_slot, insert_batch_blockprint, + WatchBlockprint, +}; + +pub use config::Config; + +/// Batch inserts cannot exceed a certain size. +/// See https://github.com/diesel-rs/diesel/issues/2414. +/// For some reason, this seems to translate to 65535 / 5 (13107) records. +pub const MAX_SIZE_BATCH_INSERT: usize = 13107; + +pub type PgPool = Pool>; +pub type PgConn = PooledConnection>; + +/// Connect to a Postgresql database and build a connection pool. +pub fn build_connection_pool(config: &Config) -> Result { + let database_url = config.clone().build_database_url(); + info!("Building connection pool at: {database_url}"); + let pg = ConnectionManager::::new(&database_url); + Builder::new().build(pg).map_err(Error::Pool) +} + +/// Retrieve an idle connection from the pool. +pub fn get_connection(pool: &PgPool) -> Result { + pool.get().map_err(Error::Pool) +} + +/// Insert the active config into the database. This is used to check if the connected beacon node +/// is compatible with the database. These values will not change (except +/// `current_blockprint_checkpoint`). +pub fn insert_active_config( + conn: &mut PgConn, + new_config_name: String, + new_slots_per_epoch: u64, +) -> Result<(), Error> { + use self::active_config::dsl::*; + + diesel::insert_into(active_config) + .values(&vec![( + id.eq(1), + config_name.eq(new_config_name), + slots_per_epoch.eq(new_slots_per_epoch as i32), + )]) + .on_conflict_do_nothing() + .execute(conn)?; + + Ok(()) +} + +/// Get the active config from the database. +pub fn get_active_config(conn: &mut PgConn) -> Result, Error> { + use self::active_config::dsl::*; + Ok(active_config + .select((config_name, slots_per_epoch)) + .filter(id.eq(1)) + .first::<(String, i32)>(conn) + .optional()?) +} + +/// +/// INSERT statements +/// + +/// Inserts a single row into the `canonical_slots` table. +/// If `new_slot.beacon_block` is `None`, the value in the row will be `null`. +/// +/// On a conflict, it will do nothing, leaving the old value. +pub fn insert_canonical_slot(conn: &mut PgConn, new_slot: WatchCanonicalSlot) -> Result<(), Error> { + diesel::insert_into(canonical_slots::table) + .values(&new_slot) + .on_conflict_do_nothing() + .execute(conn)?; + + debug!("Canonical slot inserted: {}", new_slot.slot); + Ok(()) +} + +pub fn insert_beacon_block( + conn: &mut PgConn, + block: SignedBeaconBlock, + root: WatchHash, +) -> Result<(), Error> { + use self::canonical_slots::dsl::{beacon_block, slot as canonical_slot}; + + let block_message = block.message(); + + // Pull out relevant values from the block. + let slot = WatchSlot::from_slot(block.slot()); + let parent_root = WatchHash::from_hash(block.parent_root()); + let proposer_index = block_message.proposer_index() as i32; + let graffiti = block_message.body().graffiti().as_utf8_lossy(); + let attestation_count = block_message.body().attestations().len() as i32; + + let full_payload = block_message.execution_payload().ok(); + + let transaction_count: Option = if let Some(bellatrix_payload) = + full_payload.and_then(|payload| payload.execution_payload_merge().ok()) + { + Some(bellatrix_payload.transactions.len() as i32) + } else { + full_payload + .and_then(|payload| payload.execution_payload_capella().ok()) + .map(|payload| payload.transactions.len() as i32) + }; + + let withdrawal_count: Option = full_payload + .and_then(|payload| payload.execution_payload_capella().ok()) + .map(|payload| payload.withdrawals.len() as i32); + + let block_to_add = WatchBeaconBlock { + slot, + root, + parent_root, + attestation_count, + transaction_count, + withdrawal_count, + }; + + let proposer_info_to_add = WatchProposerInfo { + slot, + proposer_index, + graffiti, + }; + + // Update the canonical slots table. + diesel::update(canonical_slots::table) + .set(beacon_block.eq(root)) + .filter(canonical_slot.eq(slot)) + // Do not overwrite the value if it already exists. + .filter(beacon_block.is_null()) + .execute(conn)?; + + diesel::insert_into(beacon_blocks::table) + .values(block_to_add) + .on_conflict_do_nothing() + .execute(conn)?; + + diesel::insert_into(proposer_info::table) + .values(proposer_info_to_add) + .on_conflict_do_nothing() + .execute(conn)?; + + debug!("Beacon block inserted at slot: {slot}, root: {root}, parent: {parent_root}"); + Ok(()) +} + +/// Insert a validator into the `validators` table +/// +/// On a conflict, it will only overwrite `status`, `activation_epoch` and `exit_epoch`. +pub fn insert_validator(conn: &mut PgConn, validator: WatchValidator) -> Result<(), Error> { + use self::validators::dsl::*; + let new_index = validator.index; + let new_public_key = validator.public_key; + + diesel::insert_into(validators) + .values(validator) + .on_conflict(index) + .do_update() + .set(( + status.eq(excluded(status)), + activation_epoch.eq(excluded(activation_epoch)), + exit_epoch.eq(excluded(exit_epoch)), + )) + .execute(conn)?; + + debug!("Validator inserted, index: {new_index}, public_key: {new_public_key}"); + Ok(()) +} + +/// Insert a batch of values into the `validators` table. +/// +/// On a conflict, it will do nothing. +/// +/// Should not be used when updating validators. +/// Validators should be updated through the `insert_validator` function which contains the correct +/// `on_conflict` clauses. +pub fn insert_batch_validators( + conn: &mut PgConn, + all_validators: Vec, +) -> Result<(), Error> { + use self::validators::dsl::*; + + let mut count = 0; + + for chunk in all_validators.chunks(1000) { + count += diesel::insert_into(validators) + .values(chunk) + .on_conflict_do_nothing() + .execute(conn)?; + } + + debug!("Validators inserted, count: {count}"); + Ok(()) +} + +/// +/// SELECT statements +/// + +/// Selects a single row of the `canonical_slots` table corresponding to a given `slot_query`. +pub fn get_canonical_slot( + conn: &mut PgConn, + slot_query: WatchSlot, +) -> Result, Error> { + use self::canonical_slots::dsl::*; + let timer = Instant::now(); + + let result = canonical_slots + .filter(slot.eq(slot_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Canonical slot requested: {slot_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row of the `canonical_slots` table corresponding to a given `root_query`. +/// Only returns the non-skipped slot which matches `root`. +pub fn get_canonical_slot_by_root( + conn: &mut PgConn, + root_query: WatchHash, +) -> Result, Error> { + use self::canonical_slots::dsl::*; + let timer = Instant::now(); + + let result = canonical_slots + .filter(root.eq(root_query)) + .filter(skipped.eq(false)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Canonical root requested: {root_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects `root` from a single row of the `canonical_slots` table corresponding to a given +/// `slot_query`. +#[allow(dead_code)] +pub fn get_root_at_slot( + conn: &mut PgConn, + slot_query: WatchSlot, +) -> Result, Error> { + use self::canonical_slots::dsl::*; + let timer = Instant::now(); + + let result = canonical_slots + .select(root) + .filter(slot.eq(slot_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Canonical slot requested: {slot_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects `slot` from the row of the `canonical_slots` table corresponding to the minimum value +/// of `slot`. +pub fn get_lowest_canonical_slot(conn: &mut PgConn) -> Result, Error> { + use self::canonical_slots::dsl::*; + let timer = Instant::now(); + + let result = canonical_slots + .order_by(slot.asc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Canonical slot requested: lowest, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects `slot` from the row of the `canonical_slots` table corresponding to the minimum value +/// of `slot` and where `skipped == false`. +pub fn get_lowest_non_skipped_canonical_slot( + conn: &mut PgConn, +) -> Result, Error> { + use self::canonical_slots::dsl::*; + let timer = Instant::now(); + + let result = canonical_slots + .filter(skipped.eq(false)) + .order_by(slot.asc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Canonical slot requested: lowest_non_skipped, time taken: {time_taken:?})"); + Ok(result) +} + +/// Select 'slot' from the row of the `canonical_slots` table corresponding to the maximum value +/// of `slot`. +pub fn get_highest_canonical_slot(conn: &mut PgConn) -> Result, Error> { + use self::canonical_slots::dsl::*; + let timer = Instant::now(); + + let result = canonical_slots + .order_by(slot.desc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Canonical slot requested: highest, time taken: {time_taken:?}"); + Ok(result) +} + +/// Select 'slot' from the row of the `canonical_slots` table corresponding to the maximum value +/// of `slot` and where `skipped == false`. +pub fn get_highest_non_skipped_canonical_slot( + conn: &mut PgConn, +) -> Result, Error> { + use self::canonical_slots::dsl::*; + let timer = Instant::now(); + + let result = canonical_slots + .filter(skipped.eq(false)) + .order_by(slot.desc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Canonical slot requested: highest_non_skipped, time taken: {time_taken:?}"); + Ok(result) +} + +/// Select all rows of the `canonical_slots` table where `slot >= `start_slot && slot <= +/// `end_slot`. +pub fn get_canonical_slots_by_range( + conn: &mut PgConn, + start_slot: WatchSlot, + end_slot: WatchSlot, +) -> Result>, Error> { + use self::canonical_slots::dsl::*; + let timer = Instant::now(); + + let result = canonical_slots + .filter(slot.ge(start_slot)) + .filter(slot.le(end_slot)) + .load::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!( + "Canonical slots by range requested, start_slot: {}, end_slot: {}, time_taken: {:?}", + start_slot.as_u64(), + end_slot.as_u64(), + time_taken + ); + Ok(result) +} + +/// Selects `root` from all rows of the `canonical_slots` table which have `beacon_block == null` +/// and `skipped == false` +pub fn get_unknown_canonical_blocks(conn: &mut PgConn) -> Result, Error> { + use self::canonical_slots::dsl::*; + + let result = canonical_slots + .select(root) + .filter(beacon_block.is_null()) + .filter(skipped.eq(false)) + .order_by(slot.desc()) + .load::(conn)?; + + Ok(result) +} + +/// Selects the row from the `beacon_blocks` table where `slot` is minimum. +pub fn get_lowest_beacon_block(conn: &mut PgConn) -> Result, Error> { + use self::beacon_blocks::dsl::*; + let timer = Instant::now(); + + let result = beacon_blocks + .order_by(slot.asc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Beacon block requested: lowest, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects the row from the `beacon_blocks` table where `slot` is maximum. +pub fn get_highest_beacon_block(conn: &mut PgConn) -> Result, Error> { + use self::beacon_blocks::dsl::*; + let timer = Instant::now(); + + let result = beacon_blocks + .order_by(slot.desc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Beacon block requested: highest, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row from the `beacon_blocks` table corresponding to a given `root_query`. +pub fn get_beacon_block_by_root( + conn: &mut PgConn, + root_query: WatchHash, +) -> Result, Error> { + use self::beacon_blocks::dsl::*; + let timer = Instant::now(); + + let result = beacon_blocks + .filter(root.eq(root_query)) + .first::(conn) + .optional()?; + let time_taken = timer.elapsed(); + debug!("Beacon block requested: {root_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row from the `beacon_blocks` table corresponding to a given `slot_query`. +pub fn get_beacon_block_by_slot( + conn: &mut PgConn, + slot_query: WatchSlot, +) -> Result, Error> { + use self::beacon_blocks::dsl::*; + let timer = Instant::now(); + + let result = beacon_blocks + .filter(slot.eq(slot_query)) + .first::(conn) + .optional()?; + let time_taken = timer.elapsed(); + debug!("Beacon block requested: {slot_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects the row from the `beacon_blocks` table where `parent_root` equals the given `parent`. +/// This fetches the next block in the database. +/// +/// Will return `Ok(None)` if there are no matching blocks (e.g. the tip of the chain). +pub fn get_beacon_block_with_parent( + conn: &mut PgConn, + parent: WatchHash, +) -> Result, Error> { + use self::beacon_blocks::dsl::*; + let timer = Instant::now(); + + let result = beacon_blocks + .filter(parent_root.eq(parent)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Next beacon block requested: {parent}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Select all rows of the `beacon_blocks` table where `slot >= `start_slot && slot <= +/// `end_slot`. +pub fn get_beacon_blocks_by_range( + conn: &mut PgConn, + start_slot: WatchSlot, + end_slot: WatchSlot, +) -> Result>, Error> { + use self::beacon_blocks::dsl::*; + let timer = Instant::now(); + + let result = beacon_blocks + .filter(slot.ge(start_slot)) + .filter(slot.le(end_slot)) + .load::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Beacon blocks by range requested, start_slot: {start_slot}, end_slot: {end_slot}, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row of the `proposer_info` table corresponding to a given `root_query`. +pub fn get_proposer_info_by_root( + conn: &mut PgConn, + root_query: WatchHash, +) -> Result, Error> { + use self::beacon_blocks::dsl::{beacon_blocks, root}; + use self::proposer_info::dsl::*; + let timer = Instant::now(); + + let join = beacon_blocks.inner_join(proposer_info); + + let result = join + .select((slot, proposer_index, graffiti)) + .filter(root.eq(root_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Proposer info requested for block: {root_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row of the `proposer_info` table corresponding to a given `slot_query`. +pub fn get_proposer_info_by_slot( + conn: &mut PgConn, + slot_query: WatchSlot, +) -> Result, Error> { + use self::proposer_info::dsl::*; + let timer = Instant::now(); + + let result = proposer_info + .filter(slot.eq(slot_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Proposer info requested for slot: {slot_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects multiple rows of the `proposer_info` table between `start_slot` and `end_slot`. +/// Selects a single row of the `proposer_info` table corresponding to a given `slot_query`. +#[allow(dead_code)] +pub fn get_proposer_info_by_range( + conn: &mut PgConn, + start_slot: WatchSlot, + end_slot: WatchSlot, +) -> Result>, Error> { + use self::proposer_info::dsl::*; + let timer = Instant::now(); + + let result = proposer_info + .filter(slot.ge(start_slot)) + .filter(slot.le(end_slot)) + .load::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!( + "Proposer info requested for range: {start_slot} to {end_slot}, time taken: {time_taken:?}" + ); + Ok(result) +} + +pub fn get_validators_latest_proposer_info( + conn: &mut PgConn, + indices_query: Vec, +) -> Result, Error> { + use self::proposer_info::dsl::*; + + let proposers = proposer_info + .filter(proposer_index.eq_any(indices_query)) + .load::(conn)?; + + let mut result = HashMap::new(); + for proposer in proposers { + result + .entry(proposer.proposer_index) + .or_insert_with(|| proposer.clone()); + let entry = result + .get_mut(&proposer.proposer_index) + .ok_or_else(|| Error::Other("An internal error occured".to_string()))?; + if proposer.slot > entry.slot { + entry.slot = proposer.slot + } + } + + Ok(result) +} + +/// Selects the max(`slot`) and `proposer_index` of each unique index in the +/// `proposer_info` table and returns them formatted as a `HashMap`. +/// Only returns rows which have `slot <= target_slot`. +/// +/// Ideally, this would return the full row, but I have not found a way to do that without using +/// a much more expensive SQL query. +pub fn get_all_validators_latest_proposer_info_at_slot( + conn: &mut PgConn, + target_slot: WatchSlot, +) -> Result, Error> { + use self::proposer_info::dsl::*; + + let latest_proposals: Vec<(i32, Option)> = proposer_info + .group_by(proposer_index) + .select((proposer_index, max(slot))) + .filter(slot.le(target_slot)) + .load::<(i32, Option)>(conn)?; + + let mut result = HashMap::new(); + + for proposal in latest_proposals { + if let Some(latest_slot) = proposal.1 { + result.insert(latest_slot, proposal.0); + } + } + + Ok(result) +} + +/// Selects a single row from the `validators` table corresponding to a given +/// `validator_index_query`. +pub fn get_validator_by_index( + conn: &mut PgConn, + validator_index_query: i32, +) -> Result, Error> { + use self::validators::dsl::*; + let timer = Instant::now(); + + let result = validators + .filter(index.eq(validator_index_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Validator requested: {validator_index_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row from the `validators` table corresponding to a given +/// `public_key_query`. +pub fn get_validator_by_public_key( + conn: &mut PgConn, + public_key_query: WatchPK, +) -> Result, Error> { + use self::validators::dsl::*; + let timer = Instant::now(); + + let result = validators + .filter(public_key.eq(public_key_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Validator requested: {public_key_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects all rows from the `validators` table which have an `index` contained in +/// the `indices_query`. +#[allow(dead_code)] +pub fn get_validators_by_indices( + conn: &mut PgConn, + indices_query: Vec, +) -> Result, Error> { + use self::validators::dsl::*; + let timer = Instant::now(); + + let query_len = indices_query.len(); + let result = validators + .filter(index.eq_any(indices_query)) + .load::(conn)?; + + let time_taken = timer.elapsed(); + debug!("{query_len} validators requested, time taken: {time_taken:?}"); + Ok(result) +} + +// Selects all rows from the `validators` table. +pub fn get_all_validators(conn: &mut PgConn) -> Result, Error> { + use self::validators::dsl::*; + let timer = Instant::now(); + + let result = validators.load::(conn)?; + + let time_taken = timer.elapsed(); + debug!("All validators requested, time taken: {time_taken:?}"); + Ok(result) +} + +/// Counts the number of rows in the `validators` table. +#[allow(dead_code)] +pub fn count_validators(conn: &mut PgConn) -> Result { + use self::validators::dsl::*; + + validators.count().get_result(conn).map_err(Error::Database) +} + +/// Counts the number of rows in the `validators` table where +/// `activation_epoch <= target_slot.epoch()`. +pub fn count_validators_activated_before_slot( + conn: &mut PgConn, + target_slot: WatchSlot, + slots_per_epoch: u64, +) -> Result { + use self::validators::dsl::*; + + let target_epoch = target_slot.epoch(slots_per_epoch); + + validators + .count() + .filter(activation_epoch.le(target_epoch.as_u64() as i32)) + .get_result(conn) + .map_err(Error::Database) +} + +/// +/// DELETE statements. +/// + +/// Deletes all rows of the `canonical_slots` table which have `slot` greater than `slot_query`. +/// +/// Due to the ON DELETE CASCADE clause present in the database migration SQL, deleting rows from +/// `canonical_slots` will delete all corresponding rows in `beacon_blocks, `block_rewards`, +/// `block_packing` and `proposer_info`. +pub fn delete_canonical_slots_above( + conn: &mut PgConn, + slot_query: WatchSlot, +) -> Result { + use self::canonical_slots::dsl::*; + + let result = diesel::delete(canonical_slots) + .filter(slot.gt(slot_query)) + .execute(conn)?; + + debug!("Deleted canonical slots above {slot_query}: {result} rows deleted"); + Ok(result) +} + +/// Deletes all rows of the `suboptimal_attestations` table which have `epoch_start_slot` greater +/// than `epoch_start_slot_query`. +pub fn delete_suboptimal_attestations_above( + conn: &mut PgConn, + epoch_start_slot_query: WatchSlot, +) -> Result { + use self::suboptimal_attestations::dsl::*; + + let result = diesel::delete(suboptimal_attestations) + .filter(epoch_start_slot.gt(epoch_start_slot_query)) + .execute(conn)?; + + debug!("Deleted attestations above: {epoch_start_slot_query}, rows deleted: {result}"); + Ok(result) +} diff --git a/watch/src/database/models.rs b/watch/src/database/models.rs new file mode 100644 index 00000000000..f42444d6612 --- /dev/null +++ b/watch/src/database/models.rs @@ -0,0 +1,67 @@ +use crate::database::{ + schema::{beacon_blocks, canonical_slots, proposer_info, validators}, + watch_types::{WatchHash, WatchPK, WatchSlot}, +}; +use diesel::{Insertable, Queryable}; +use serde::{Deserialize, Serialize}; +use std::hash::{Hash, Hasher}; + +pub type WatchEpoch = i32; + +#[derive(Debug, Queryable, Insertable, Serialize, Deserialize)] +#[diesel(table_name = canonical_slots)] +pub struct WatchCanonicalSlot { + pub slot: WatchSlot, + pub root: WatchHash, + pub skipped: bool, + pub beacon_block: Option, +} + +#[derive(Debug, Queryable, Insertable, Serialize, Deserialize)] +#[diesel(table_name = beacon_blocks)] +pub struct WatchBeaconBlock { + pub slot: WatchSlot, + pub root: WatchHash, + pub parent_root: WatchHash, + pub attestation_count: i32, + pub transaction_count: Option, + pub withdrawal_count: Option, +} + +#[derive(Clone, Debug, Queryable, Insertable, Serialize, Deserialize)] +#[diesel(table_name = validators)] +pub struct WatchValidator { + pub index: i32, + pub public_key: WatchPK, + pub status: String, + pub activation_epoch: Option, + pub exit_epoch: Option, +} + +// Implement a minimal version of `Hash` and `Eq` so that we know if a validator status has changed. +impl Hash for WatchValidator { + fn hash(&self, state: &mut H) { + self.index.hash(state); + self.status.hash(state); + self.activation_epoch.hash(state); + self.exit_epoch.hash(state); + } +} + +impl PartialEq for WatchValidator { + fn eq(&self, other: &Self) -> bool { + self.index == other.index + && self.status == other.status + && self.activation_epoch == other.activation_epoch + && self.exit_epoch == other.exit_epoch + } +} +impl Eq for WatchValidator {} + +#[derive(Clone, Debug, Queryable, Insertable, Serialize, Deserialize)] +#[diesel(table_name = proposer_info)] +pub struct WatchProposerInfo { + pub slot: WatchSlot, + pub proposer_index: i32, + pub graffiti: String, +} diff --git a/watch/src/database/schema.rs b/watch/src/database/schema.rs new file mode 100644 index 00000000000..32f22d506db --- /dev/null +++ b/watch/src/database/schema.rs @@ -0,0 +1,102 @@ +// @generated automatically by Diesel CLI. + +diesel::table! { + active_config (id) { + id -> Int4, + config_name -> Text, + slots_per_epoch -> Int4, + } +} + +diesel::table! { + beacon_blocks (slot) { + slot -> Int4, + root -> Bytea, + parent_root -> Bytea, + attestation_count -> Int4, + transaction_count -> Nullable, + withdrawal_count -> Nullable, + } +} + +diesel::table! { + block_packing (slot) { + slot -> Int4, + available -> Int4, + included -> Int4, + prior_skip_slots -> Int4, + } +} + +diesel::table! { + block_rewards (slot) { + slot -> Int4, + total -> Int4, + attestation_reward -> Int4, + sync_committee_reward -> Int4, + } +} + +diesel::table! { + blockprint (slot) { + slot -> Int4, + best_guess -> Text, + } +} + +diesel::table! { + canonical_slots (slot) { + slot -> Int4, + root -> Bytea, + skipped -> Bool, + beacon_block -> Nullable, + } +} + +diesel::table! { + proposer_info (slot) { + slot -> Int4, + proposer_index -> Int4, + graffiti -> Text, + } +} + +diesel::table! { + suboptimal_attestations (epoch_start_slot, index) { + epoch_start_slot -> Int4, + index -> Int4, + source -> Bool, + head -> Bool, + target -> Bool, + } +} + +diesel::table! { + validators (index) { + index -> Int4, + public_key -> Bytea, + status -> Text, + activation_epoch -> Nullable, + exit_epoch -> Nullable, + } +} + +diesel::joinable!(block_packing -> beacon_blocks (slot)); +diesel::joinable!(block_rewards -> beacon_blocks (slot)); +diesel::joinable!(blockprint -> beacon_blocks (slot)); +diesel::joinable!(proposer_info -> beacon_blocks (slot)); +diesel::joinable!(proposer_info -> validators (proposer_index)); +diesel::joinable!(suboptimal_attestations -> canonical_slots (epoch_start_slot)); +diesel::joinable!(suboptimal_attestations -> validators (index)); + +diesel::allow_tables_to_appear_in_same_query!( + active_config, + beacon_blocks, + block_packing, + block_rewards, + blockprint, + canonical_slots, + proposer_info, + suboptimal_attestations, + validators, +); diff --git a/watch/src/database/utils.rs b/watch/src/database/utils.rs new file mode 100644 index 00000000000..7e450f0cee7 --- /dev/null +++ b/watch/src/database/utils.rs @@ -0,0 +1,29 @@ +#![allow(dead_code)] +use crate::database::config::Config; +use diesel::pg::PgConnection; +use diesel::prelude::*; +use diesel_migrations::{FileBasedMigrations, MigrationHarness}; + +/// Sets `config.dbname` to `config.default_dbname` and returns `(new_config, old_dbname)`. +/// +/// This is useful for creating or dropping databases, since these actions must be done by +/// logging into another database. +pub fn get_config_using_default_db(config: &Config) -> (Config, String) { + let mut config = config.clone(); + let new_dbname = std::mem::replace(&mut config.dbname, config.default_dbname.clone()); + (config, new_dbname) +} + +/// Runs the set of migrations as detected in the local directory. +/// Equivalent to `diesel migration run`. +/// +/// Contains `unwrap`s so is only suitable for test code. +/// TODO(mac) refactor to return Result +pub fn run_migrations(config: &Config) -> PgConnection { + let database_url = config.clone().build_database_url(); + let mut conn = PgConnection::establish(&database_url).unwrap(); + let migrations = FileBasedMigrations::find_migrations_directory().unwrap(); + conn.run_pending_migrations(migrations).unwrap(); + conn.begin_test_transaction().unwrap(); + conn +} diff --git a/watch/src/database/watch_types.rs b/watch/src/database/watch_types.rs new file mode 100644 index 00000000000..0b3ba2c304d --- /dev/null +++ b/watch/src/database/watch_types.rs @@ -0,0 +1,119 @@ +use crate::database::error::Error; +use diesel::{ + sql_types::{Binary, Integer}, + AsExpression, FromSqlRow, +}; +use serde::{Deserialize, Serialize}; +use std::fmt; +use std::str::FromStr; +use types::{Epoch, Hash256, PublicKeyBytes, Slot}; +#[derive( + Clone, + Copy, + Debug, + AsExpression, + FromSqlRow, + Deserialize, + Serialize, + Hash, + PartialEq, + Eq, + PartialOrd, + Ord, +)] +#[diesel(sql_type = Integer)] +pub struct WatchSlot(Slot); + +impl fmt::Display for WatchSlot { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +impl WatchSlot { + pub fn new(slot: u64) -> Self { + Self(Slot::new(slot)) + } + + pub fn from_slot(slot: Slot) -> Self { + Self(slot) + } + + pub fn as_slot(self) -> Slot { + self.0 + } + + pub fn as_u64(self) -> u64 { + self.0.as_u64() + } + + pub fn epoch(self, slots_per_epoch: u64) -> Epoch { + self.as_slot().epoch(slots_per_epoch) + } +} + +#[derive(Clone, Copy, Debug, AsExpression, FromSqlRow, Deserialize, Serialize)] +#[diesel(sql_type = Binary)] +pub struct WatchHash(Hash256); + +impl fmt::Display for WatchHash { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self.0) + } +} + +impl WatchHash { + pub fn as_hash(&self) -> Hash256 { + self.0 + } + + pub fn from_hash(hash: Hash256) -> Self { + WatchHash(hash) + } + + pub fn as_bytes(&self) -> &[u8] { + self.0.as_bytes() + } + + pub fn from_bytes(src: &[u8]) -> Result { + if src.len() == 32 { + Ok(WatchHash(Hash256::from_slice(src))) + } else { + Err(Error::InvalidRoot) + } + } +} + +#[derive(Clone, Copy, Debug, Eq, PartialEq, AsExpression, FromSqlRow, Serialize, Deserialize)] +#[diesel(sql_type = Binary)] +pub struct WatchPK(PublicKeyBytes); + +impl fmt::Display for WatchPK { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self.0) + } +} + +impl WatchPK { + pub fn as_bytes(&self) -> &[u8] { + self.0.as_serialized() + } + + pub fn from_bytes(src: &[u8]) -> Result { + Ok(WatchPK(PublicKeyBytes::deserialize(src)?)) + } + + pub fn from_pubkey(key: PublicKeyBytes) -> Self { + WatchPK(key) + } +} + +impl FromStr for WatchPK { + type Err = String; + + fn from_str(s: &str) -> Result { + Ok(WatchPK( + PublicKeyBytes::from_str(s).map_err(|e| format!("Cannot be parsed: {}", e))?, + )) + } +} diff --git a/watch/src/lib.rs b/watch/src/lib.rs new file mode 100644 index 00000000000..664c9451655 --- /dev/null +++ b/watch/src/lib.rs @@ -0,0 +1,12 @@ +#![cfg(unix)] +pub mod block_packing; +pub mod block_rewards; +pub mod blockprint; +pub mod cli; +pub mod client; +pub mod config; +pub mod database; +pub mod logger; +pub mod server; +pub mod suboptimal_attestations; +pub mod updater; diff --git a/watch/src/logger.rs b/watch/src/logger.rs new file mode 100644 index 00000000000..49310b42aae --- /dev/null +++ b/watch/src/logger.rs @@ -0,0 +1,24 @@ +use env_logger::Builder; +use log::{info, LevelFilter}; +use std::process; + +pub fn init_logger(log_level: &str) { + let log_level = match log_level.to_lowercase().as_str() { + "trace" => LevelFilter::Trace, + "debug" => LevelFilter::Debug, + "info" => LevelFilter::Info, + "warn" => LevelFilter::Warn, + "error" => LevelFilter::Error, + _ => { + eprintln!("Unsupported log level"); + process::exit(1) + } + }; + + let mut builder = Builder::new(); + builder.filter(Some("watch"), log_level); + + builder.init(); + + info!("Logger initialized with log-level: {log_level}"); +} diff --git a/watch/src/main.rs b/watch/src/main.rs new file mode 100644 index 00000000000..f971747da42 --- /dev/null +++ b/watch/src/main.rs @@ -0,0 +1,41 @@ +#[cfg(unix)] +use std::process; + +#[cfg(unix)] +mod block_packing; +#[cfg(unix)] +mod block_rewards; +#[cfg(unix)] +mod blockprint; +#[cfg(unix)] +mod cli; +#[cfg(unix)] +mod config; +#[cfg(unix)] +mod database; +#[cfg(unix)] +mod logger; +#[cfg(unix)] +mod server; +#[cfg(unix)] +mod suboptimal_attestations; +#[cfg(unix)] +mod updater; + +#[cfg(unix)] +#[tokio::main] +async fn main() { + match cli::run().await { + Ok(()) => process::exit(0), + Err(e) => { + eprintln!("Command failed with: {}", e); + drop(e); + process::exit(1) + } + } +} + +#[cfg(windows)] +fn main() { + eprintln!("Windows is not supported. Exiting."); +} diff --git a/watch/src/server/config.rs b/watch/src/server/config.rs new file mode 100644 index 00000000000..a7d38e706f8 --- /dev/null +++ b/watch/src/server/config.rs @@ -0,0 +1,28 @@ +use serde::{Deserialize, Serialize}; +use std::net::IpAddr; + +pub const LISTEN_ADDR: &str = "127.0.0.1"; + +pub const fn listen_port() -> u16 { + 5059 +} +fn listen_addr() -> IpAddr { + LISTEN_ADDR.parse().expect("Server address is not valid") +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Config { + #[serde(default = "listen_addr")] + pub listen_addr: IpAddr, + #[serde(default = "listen_port")] + pub listen_port: u16, +} + +impl Default for Config { + fn default() -> Self { + Self { + listen_addr: listen_addr(), + listen_port: listen_port(), + } + } +} diff --git a/watch/src/server/error.rs b/watch/src/server/error.rs new file mode 100644 index 00000000000..d1542f78419 --- /dev/null +++ b/watch/src/server/error.rs @@ -0,0 +1,50 @@ +use crate::database::Error as DbError; +use axum::Error as AxumError; +use axum::{http::StatusCode, response::IntoResponse, Json}; +use hyper::Error as HyperError; +use serde_json::json; + +#[derive(Debug)] +pub enum Error { + Axum(AxumError), + Hyper(HyperError), + Database(DbError), + BadRequest, + NotFound, + Other(String), +} + +impl IntoResponse for Error { + fn into_response(self) -> axum::response::Response { + let (status, error_message) = match self { + Self::BadRequest => (StatusCode::BAD_REQUEST, "Bad Request"), + Self::NotFound => (StatusCode::NOT_FOUND, "Not Found"), + _ => (StatusCode::INTERNAL_SERVER_ERROR, "Internal Server Error"), + }; + (status, Json(json!({ "error": error_message }))).into_response() + } +} + +impl From for Error { + fn from(e: HyperError) -> Self { + Error::Hyper(e) + } +} + +impl From for Error { + fn from(e: AxumError) -> Self { + Error::Axum(e) + } +} + +impl From for Error { + fn from(e: DbError) -> Self { + Error::Database(e) + } +} + +impl From for Error { + fn from(e: String) -> Self { + Error::Other(e) + } +} diff --git a/watch/src/server/handler.rs b/watch/src/server/handler.rs new file mode 100644 index 00000000000..6777026867e --- /dev/null +++ b/watch/src/server/handler.rs @@ -0,0 +1,266 @@ +use crate::database::{ + self, Error as DbError, PgPool, WatchBeaconBlock, WatchCanonicalSlot, WatchHash, WatchPK, + WatchProposerInfo, WatchSlot, WatchValidator, +}; +use crate::server::Error; +use axum::{ + extract::{Path, Query}, + Extension, Json, +}; +use eth2::types::BlockId; +use std::collections::HashMap; +use std::str::FromStr; + +pub async fn get_slot( + Path(slot): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + Ok(Json(database::get_canonical_slot( + &mut conn, + WatchSlot::new(slot), + )?)) +} + +pub async fn get_slot_lowest( + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + Ok(Json(database::get_lowest_canonical_slot(&mut conn)?)) +} + +pub async fn get_slot_highest( + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + Ok(Json(database::get_highest_canonical_slot(&mut conn)?)) +} + +pub async fn get_slots_by_range( + Query(query): Query>, + Extension(pool): Extension, +) -> Result>>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + if let Some(start_slot) = query.get("start_slot") { + if let Some(end_slot) = query.get("end_slot") { + if start_slot > end_slot { + Err(Error::BadRequest) + } else { + Ok(Json(database::get_canonical_slots_by_range( + &mut conn, + WatchSlot::new(*start_slot), + WatchSlot::new(*end_slot), + )?)) + } + } else { + Err(Error::BadRequest) + } + } else { + Err(Error::BadRequest) + } +} + +pub async fn get_block( + Path(block_query): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + let block_id: BlockId = BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)?; + match block_id { + BlockId::Slot(slot) => Ok(Json(database::get_beacon_block_by_slot( + &mut conn, + WatchSlot::from_slot(slot), + )?)), + BlockId::Root(root) => Ok(Json(database::get_beacon_block_by_root( + &mut conn, + WatchHash::from_hash(root), + )?)), + _ => Err(Error::BadRequest), + } +} + +pub async fn get_block_lowest( + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + Ok(Json(database::get_lowest_beacon_block(&mut conn)?)) +} + +pub async fn get_block_highest( + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + Ok(Json(database::get_highest_beacon_block(&mut conn)?)) +} + +pub async fn get_block_previous( + Path(block_query): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + match BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)? { + BlockId::Root(root) => { + if let Some(block) = + database::get_beacon_block_by_root(&mut conn, WatchHash::from_hash(root))? + .map(|block| block.parent_root) + { + Ok(Json(database::get_beacon_block_by_root(&mut conn, block)?)) + } else { + Err(Error::NotFound) + } + } + BlockId::Slot(slot) => Ok(Json(database::get_beacon_block_by_slot( + &mut conn, + WatchSlot::new(slot.as_u64().checked_sub(1_u64).ok_or(Error::NotFound)?), + )?)), + _ => Err(Error::BadRequest), + } +} + +pub async fn get_block_next( + Path(block_query): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + match BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)? { + BlockId::Root(root) => Ok(Json(database::get_beacon_block_with_parent( + &mut conn, + WatchHash::from_hash(root), + )?)), + BlockId::Slot(slot) => Ok(Json(database::get_beacon_block_by_slot( + &mut conn, + WatchSlot::from_slot(slot + 1_u64), + )?)), + _ => Err(Error::BadRequest), + } +} + +pub async fn get_blocks_by_range( + Query(query): Query>, + Extension(pool): Extension, +) -> Result>>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + if let Some(start_slot) = query.get("start_slot") { + if let Some(end_slot) = query.get("end_slot") { + if start_slot > end_slot { + Err(Error::BadRequest) + } else { + Ok(Json(database::get_beacon_blocks_by_range( + &mut conn, + WatchSlot::new(*start_slot), + WatchSlot::new(*end_slot), + )?)) + } + } else { + Err(Error::BadRequest) + } + } else { + Err(Error::BadRequest) + } +} + +pub async fn get_block_proposer( + Path(block_query): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + match BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)? { + BlockId::Root(root) => Ok(Json(database::get_proposer_info_by_root( + &mut conn, + WatchHash::from_hash(root), + )?)), + BlockId::Slot(slot) => Ok(Json(database::get_proposer_info_by_slot( + &mut conn, + WatchSlot::from_slot(slot), + )?)), + _ => Err(Error::BadRequest), + } +} + +pub async fn get_validator( + Path(validator_query): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + if validator_query.starts_with("0x") { + let pubkey = WatchPK::from_str(&validator_query).map_err(|_| Error::BadRequest)?; + Ok(Json(database::get_validator_by_public_key( + &mut conn, pubkey, + )?)) + } else { + let index = i32::from_str(&validator_query).map_err(|_| Error::BadRequest)?; + Ok(Json(database::get_validator_by_index(&mut conn, index)?)) + } +} + +pub async fn get_all_validators( + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + Ok(Json(database::get_all_validators(&mut conn)?)) +} + +pub async fn get_validator_latest_proposal( + Path(validator_query): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + if validator_query.starts_with("0x") { + let pubkey = WatchPK::from_str(&validator_query).map_err(|_| Error::BadRequest)?; + let validator = + database::get_validator_by_public_key(&mut conn, pubkey)?.ok_or(Error::NotFound)?; + Ok(Json(database::get_validators_latest_proposer_info( + &mut conn, + vec![validator.index], + )?)) + } else { + let index = i32::from_str(&validator_query).map_err(|_| Error::BadRequest)?; + Ok(Json(database::get_validators_latest_proposer_info( + &mut conn, + vec![index], + )?)) + } +} + +pub async fn get_client_breakdown( + Extension(pool): Extension, + Extension(slots_per_epoch): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + + if let Some(target_slot) = database::get_highest_canonical_slot(&mut conn)? { + Ok(Json(database::get_validators_clients_at_slot( + &mut conn, + target_slot.slot, + slots_per_epoch, + )?)) + } else { + Err(Error::Database(DbError::Other( + "No slots found in database.".to_string(), + ))) + } +} + +pub async fn get_client_breakdown_percentages( + Extension(pool): Extension, + Extension(slots_per_epoch): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + + let mut result = HashMap::new(); + if let Some(target_slot) = database::get_highest_canonical_slot(&mut conn)? { + let total = database::count_validators_activated_before_slot( + &mut conn, + target_slot.slot, + slots_per_epoch, + )?; + let clients = + database::get_validators_clients_at_slot(&mut conn, target_slot.slot, slots_per_epoch)?; + for (client, number) in clients.iter() { + let percentage: f64 = *number as f64 / total as f64 * 100.0; + result.insert(client.to_string(), percentage); + } + } + + Ok(Json(result)) +} diff --git a/watch/src/server/mod.rs b/watch/src/server/mod.rs new file mode 100644 index 00000000000..09d5ec6aac5 --- /dev/null +++ b/watch/src/server/mod.rs @@ -0,0 +1,134 @@ +use crate::block_packing::block_packing_routes; +use crate::block_rewards::block_rewards_routes; +use crate::blockprint::blockprint_routes; +use crate::config::Config as FullConfig; +use crate::database::{self, PgPool}; +use crate::suboptimal_attestations::{attestation_routes, blockprint_attestation_routes}; +use axum::{ + handler::Handler, + http::{StatusCode, Uri}, + routing::get, + Extension, Json, Router, +}; +use eth2::types::ErrorMessage; +use log::info; +use std::future::Future; +use std::net::SocketAddr; +use tokio::sync::oneshot; + +pub use config::Config; +pub use error::Error; + +mod config; +mod error; +mod handler; + +pub async fn serve(config: FullConfig, shutdown: oneshot::Receiver<()>) -> Result<(), Error> { + let db = database::build_connection_pool(&config.database)?; + let (_, slots_per_epoch) = database::get_active_config(&mut database::get_connection(&db)?)? + .ok_or_else(|| { + Error::Other( + "Database not found. Please run the updater prior to starting the server" + .to_string(), + ) + })?; + + let server = start_server(&config, slots_per_epoch as u64, db, async { + let _ = shutdown.await; + })?; + + server.await?; + + Ok(()) +} + +/// Creates a server that will serve requests using information from `config`. +/// +/// The server will create its own connection pool to serve connections to the database. +/// This is separate to the connection pool that is used for the `updater`. +/// +/// The server will shut down gracefully when the `shutdown` future resolves. +/// +/// ## Returns +/// +/// This function will bind the server to the address specified in the config and then return a +/// Future representing the actual server that will need to be awaited. +/// +/// ## Errors +/// +/// Returns an error if the server is unable to bind or there is another error during +/// configuration. +pub fn start_server( + config: &FullConfig, + slots_per_epoch: u64, + pool: PgPool, + shutdown: impl Future + Send + Sync + 'static, +) -> Result> + 'static, Error> { + let mut routes = Router::new() + .route("/v1/slots", get(handler::get_slots_by_range)) + .route("/v1/slots/:slot", get(handler::get_slot)) + .route("/v1/slots/lowest", get(handler::get_slot_lowest)) + .route("/v1/slots/highest", get(handler::get_slot_highest)) + .route("/v1/slots/:slot/block", get(handler::get_block)) + .route("/v1/blocks", get(handler::get_blocks_by_range)) + .route("/v1/blocks/:block", get(handler::get_block)) + .route("/v1/blocks/lowest", get(handler::get_block_lowest)) + .route("/v1/blocks/highest", get(handler::get_block_highest)) + .route( + "/v1/blocks/:block/previous", + get(handler::get_block_previous), + ) + .route("/v1/blocks/:block/next", get(handler::get_block_next)) + .route( + "/v1/blocks/:block/proposer", + get(handler::get_block_proposer), + ) + .route("/v1/validators/:validator", get(handler::get_validator)) + .route("/v1/validators/all", get(handler::get_all_validators)) + .route( + "/v1/validators/:validator/latest_proposal", + get(handler::get_validator_latest_proposal), + ) + .route("/v1/clients", get(handler::get_client_breakdown)) + .route( + "/v1/clients/percentages", + get(handler::get_client_breakdown_percentages), + ) + .merge(attestation_routes()) + .merge(blockprint_routes()) + .merge(block_packing_routes()) + .merge(block_rewards_routes()); + + if config.blockprint.enabled && config.updater.attestations { + routes = routes.merge(blockprint_attestation_routes()) + } + + let app = routes + .fallback(route_not_found.into_service()) + .layer(Extension(pool)) + .layer(Extension(slots_per_epoch)); + + let addr = SocketAddr::new(config.server.listen_addr, config.server.listen_port); + + let server = axum::Server::try_bind(&addr)?.serve(app.into_make_service()); + + let server = server.with_graceful_shutdown(async { + shutdown.await; + }); + + info!("HTTP server listening on {}", addr); + + Ok(server) +} + +// The default route indicating that no available routes matched the request. +async fn route_not_found(uri: Uri) -> (StatusCode, Json) { + ( + StatusCode::METHOD_NOT_ALLOWED, + Json(ErrorMessage { + code: StatusCode::METHOD_NOT_ALLOWED.as_u16(), + message: format!("No route for {uri}"), + stacktraces: vec![], + }), + ) +} diff --git a/watch/src/suboptimal_attestations/database.rs b/watch/src/suboptimal_attestations/database.rs new file mode 100644 index 00000000000..cb947d250a2 --- /dev/null +++ b/watch/src/suboptimal_attestations/database.rs @@ -0,0 +1,224 @@ +use crate::database::{ + schema::{suboptimal_attestations, validators}, + watch_types::{WatchPK, WatchSlot}, + Error, PgConn, MAX_SIZE_BATCH_INSERT, +}; + +use diesel::prelude::*; +use diesel::{Insertable, Queryable}; +use log::debug; +use serde::{Deserialize, Serialize}; +use std::time::Instant; + +use types::Epoch; + +#[derive(Clone, Copy, Debug, Serialize, Deserialize)] +pub struct WatchAttestation { + pub index: i32, + pub epoch: Epoch, + pub source: bool, + pub head: bool, + pub target: bool, +} + +impl WatchAttestation { + pub fn optimal(index: i32, epoch: Epoch) -> WatchAttestation { + WatchAttestation { + index, + epoch, + source: true, + head: true, + target: true, + } + } +} + +#[derive(Debug, Queryable, Insertable, Serialize, Deserialize)] +#[diesel(table_name = suboptimal_attestations)] +pub struct WatchSuboptimalAttestation { + pub epoch_start_slot: WatchSlot, + pub index: i32, + pub source: bool, + pub head: bool, + pub target: bool, +} + +impl WatchSuboptimalAttestation { + pub fn to_attestation(&self, slots_per_epoch: u64) -> WatchAttestation { + WatchAttestation { + index: self.index, + epoch: self.epoch_start_slot.epoch(slots_per_epoch), + source: self.source, + head: self.head, + target: self.target, + } + } +} + +/// Insert a batch of values into the `suboptimal_attestations` table +/// +/// Since attestations technically occur per-slot but we only store them per-epoch (via its +/// `start_slot`) so if any slot in the epoch changes, we need to resync the whole epoch as a +/// 'suboptimal' attestation could now be 'optimal'. +/// +/// This is handled in the update code, where in the case of a re-org, the affected epoch is +/// deleted completely. +/// +/// On a conflict, it will do nothing. +pub fn insert_batch_suboptimal_attestations( + conn: &mut PgConn, + attestations: Vec, +) -> Result<(), Error> { + use self::suboptimal_attestations::dsl::*; + + let mut count = 0; + let timer = Instant::now(); + + for chunk in attestations.chunks(MAX_SIZE_BATCH_INSERT) { + count += diesel::insert_into(suboptimal_attestations) + .values(chunk) + .on_conflict_do_nothing() + .execute(conn)?; + } + + let time_taken = timer.elapsed(); + debug!("Attestations inserted, count: {count}, time taken: {time_taken:?}"); + Ok(()) +} + +/// Selects the row from the `suboptimal_attestations` table where `epoch_start_slot` is minimum. +pub fn get_lowest_attestation( + conn: &mut PgConn, +) -> Result, Error> { + use self::suboptimal_attestations::dsl::*; + + Ok(suboptimal_attestations + .order_by(epoch_start_slot.asc()) + .limit(1) + .first::(conn) + .optional()?) +} + +/// Selects the row from the `suboptimal_attestations` table where `epoch_start_slot` is maximum. +pub fn get_highest_attestation( + conn: &mut PgConn, +) -> Result, Error> { + use self::suboptimal_attestations::dsl::*; + + Ok(suboptimal_attestations + .order_by(epoch_start_slot.desc()) + .limit(1) + .first::(conn) + .optional()?) +} + +/// Selects a single row from the `suboptimal_attestations` table corresponding to a given +/// `index_query` and `epoch_query`. +pub fn get_attestation_by_index( + conn: &mut PgConn, + index_query: i32, + epoch_query: Epoch, + slots_per_epoch: u64, +) -> Result, Error> { + use self::suboptimal_attestations::dsl::*; + let timer = Instant::now(); + + let result = suboptimal_attestations + .filter(epoch_start_slot.eq(WatchSlot::from_slot( + epoch_query.start_slot(slots_per_epoch), + ))) + .filter(index.eq(index_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Attestation requested for validator: {index_query}, epoch: {epoch_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row from the `suboptimal_attestations` table corresponding +/// to a given `pubkey_query` and `epoch_query`. +#[allow(dead_code)] +pub fn get_attestation_by_pubkey( + conn: &mut PgConn, + pubkey_query: WatchPK, + epoch_query: Epoch, + slots_per_epoch: u64, +) -> Result, Error> { + use self::suboptimal_attestations::dsl::*; + use self::validators::dsl::{public_key, validators}; + let timer = Instant::now(); + + let join = validators.inner_join(suboptimal_attestations); + + let result = join + .select((epoch_start_slot, index, source, head, target)) + .filter(epoch_start_slot.eq(WatchSlot::from_slot( + epoch_query.start_slot(slots_per_epoch), + ))) + .filter(public_key.eq(pubkey_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Attestation requested for validator: {pubkey_query}, epoch: {epoch_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects `index` for all validators in the suboptimal_attestations table +/// that have `source == false` for the corresponding `epoch_start_slot_query`. +pub fn get_validators_missed_source( + conn: &mut PgConn, + epoch_start_slot_query: WatchSlot, +) -> Result, Error> { + use self::suboptimal_attestations::dsl::*; + + Ok(suboptimal_attestations + .select(index) + .filter(epoch_start_slot.eq(epoch_start_slot_query)) + .filter(source.eq(false)) + .load::(conn)?) +} + +/// Selects `index` for all validators in the suboptimal_attestations table +/// that have `head == false` for the corresponding `epoch_start_slot_query`. +pub fn get_validators_missed_head( + conn: &mut PgConn, + epoch_start_slot_query: WatchSlot, +) -> Result, Error> { + use self::suboptimal_attestations::dsl::*; + + Ok(suboptimal_attestations + .select(index) + .filter(epoch_start_slot.eq(epoch_start_slot_query)) + .filter(head.eq(false)) + .load::(conn)?) +} + +/// Selects `index` for all validators in the suboptimal_attestations table +/// that have `target == false` for the corresponding `epoch_start_slot_query`. +pub fn get_validators_missed_target( + conn: &mut PgConn, + epoch_start_slot_query: WatchSlot, +) -> Result, Error> { + use self::suboptimal_attestations::dsl::*; + + Ok(suboptimal_attestations + .select(index) + .filter(epoch_start_slot.eq(epoch_start_slot_query)) + .filter(target.eq(false)) + .load::(conn)?) +} + +/// Selects all rows from the `suboptimal_attestations` table for the given +/// `epoch_start_slot_query`. +pub fn get_all_suboptimal_attestations_for_epoch( + conn: &mut PgConn, + epoch_start_slot_query: WatchSlot, +) -> Result, Error> { + use self::suboptimal_attestations::dsl::*; + + Ok(suboptimal_attestations + .filter(epoch_start_slot.eq(epoch_start_slot_query)) + .load::(conn)?) +} diff --git a/watch/src/suboptimal_attestations/mod.rs b/watch/src/suboptimal_attestations/mod.rs new file mode 100644 index 00000000000..a94532e8ab2 --- /dev/null +++ b/watch/src/suboptimal_attestations/mod.rs @@ -0,0 +1,56 @@ +pub mod database; +pub mod server; +pub mod updater; + +use crate::database::watch_types::WatchSlot; +use crate::updater::error::Error; + +pub use database::{ + get_all_suboptimal_attestations_for_epoch, get_attestation_by_index, get_attestation_by_pubkey, + get_highest_attestation, get_lowest_attestation, insert_batch_suboptimal_attestations, + WatchAttestation, WatchSuboptimalAttestation, +}; + +pub use server::{attestation_routes, blockprint_attestation_routes}; + +use eth2::BeaconNodeHttpClient; +use types::Epoch; + +/// Sends a request to `lighthouse/analysis/attestation_performance`. +/// Formats the response into a vector of `WatchSuboptimalAttestation`. +/// +/// Any attestations with `source == true && head == true && target == true` are ignored. +pub async fn get_attestation_performances( + bn: &BeaconNodeHttpClient, + start_epoch: Epoch, + end_epoch: Epoch, + slots_per_epoch: u64, +) -> Result, Error> { + let mut output = Vec::new(); + let result = bn + .get_lighthouse_analysis_attestation_performance( + start_epoch, + end_epoch, + "global".to_string(), + ) + .await?; + for index in result { + for epoch in index.epochs { + if epoch.1.active { + // Check if the attestation is suboptimal. + if !epoch.1.source || !epoch.1.head || !epoch.1.target { + output.push(WatchSuboptimalAttestation { + epoch_start_slot: WatchSlot::from_slot( + Epoch::new(epoch.0).start_slot(slots_per_epoch), + ), + index: index.index as i32, + source: epoch.1.source, + head: epoch.1.head, + target: epoch.1.target, + }) + } + } + } + } + Ok(output) +} diff --git a/watch/src/suboptimal_attestations/server.rs b/watch/src/suboptimal_attestations/server.rs new file mode 100644 index 00000000000..391db9a41b5 --- /dev/null +++ b/watch/src/suboptimal_attestations/server.rs @@ -0,0 +1,299 @@ +use crate::database::{ + get_canonical_slot, get_connection, get_validator_by_index, get_validator_by_public_key, + get_validators_clients_at_slot, get_validators_latest_proposer_info, PgPool, WatchPK, + WatchSlot, +}; + +use crate::blockprint::database::construct_validator_blockprints_at_slot; +use crate::server::Error; +use crate::suboptimal_attestations::database::{ + get_all_suboptimal_attestations_for_epoch, get_attestation_by_index, + get_validators_missed_head, get_validators_missed_source, get_validators_missed_target, + WatchAttestation, WatchSuboptimalAttestation, +}; + +use axum::{extract::Path, routing::get, Extension, Json, Router}; +use std::collections::{HashMap, HashSet}; +use std::str::FromStr; +use types::Epoch; + +// Will return Ok(None) if the epoch is not synced or if the validator does not exist. +// In the future it might be worth differentiating these events. +pub async fn get_validator_attestation( + Path((validator_query, epoch_query)): Path<(String, u64)>, + Extension(pool): Extension, + Extension(slots_per_epoch): Extension, +) -> Result>, Error> { + let mut conn = get_connection(&pool).map_err(Error::Database)?; + let epoch = Epoch::new(epoch_query); + + // Ensure the database has synced the target epoch. + if get_canonical_slot( + &mut conn, + WatchSlot::from_slot(epoch.end_slot(slots_per_epoch)), + )? + .is_none() + { + // Epoch is not fully synced. + return Ok(Json(None)); + } + + let index = if validator_query.starts_with("0x") { + let pubkey = WatchPK::from_str(&validator_query).map_err(|_| Error::BadRequest)?; + get_validator_by_public_key(&mut conn, pubkey)? + .ok_or(Error::NotFound)? + .index + } else { + i32::from_str(&validator_query).map_err(|_| Error::BadRequest)? + }; + let attestation = if let Some(suboptimal_attestation) = + get_attestation_by_index(&mut conn, index, epoch, slots_per_epoch)? + { + Some(suboptimal_attestation.to_attestation(slots_per_epoch)) + } else { + // Attestation was not in database. Check if the validator was active. + match get_validator_by_index(&mut conn, index)? { + Some(validator) => { + if let Some(activation_epoch) = validator.activation_epoch { + if activation_epoch <= epoch.as_u64() as i32 { + if let Some(exit_epoch) = validator.exit_epoch { + if exit_epoch > epoch.as_u64() as i32 { + // Validator is active and has not yet exited. + Some(WatchAttestation::optimal(index, epoch)) + } else { + // Validator has exited. + None + } + } else { + // Validator is active and has not yet exited. + Some(WatchAttestation::optimal(index, epoch)) + } + } else { + // Validator is not yet active. + None + } + } else { + // Validator is not yet active. + None + } + } + None => return Err(Error::Other("Validator index does not exist".to_string())), + } + }; + Ok(Json(attestation)) +} + +pub async fn get_all_validators_attestations( + Path(epoch): Path, + Extension(pool): Extension, + Extension(slots_per_epoch): Extension, +) -> Result>, Error> { + let mut conn = get_connection(&pool).map_err(Error::Database)?; + + let epoch_start_slot = WatchSlot::from_slot(Epoch::new(epoch).start_slot(slots_per_epoch)); + + Ok(Json(get_all_suboptimal_attestations_for_epoch( + &mut conn, + epoch_start_slot, + )?)) +} + +pub async fn get_validators_missed_vote( + Path((vote, epoch)): Path<(String, u64)>, + Extension(pool): Extension, + Extension(slots_per_epoch): Extension, +) -> Result>, Error> { + let mut conn = get_connection(&pool).map_err(Error::Database)?; + + let epoch_start_slot = WatchSlot::from_slot(Epoch::new(epoch).start_slot(slots_per_epoch)); + match vote.to_lowercase().as_str() { + "source" => Ok(Json(get_validators_missed_source( + &mut conn, + epoch_start_slot, + )?)), + "head" => Ok(Json(get_validators_missed_head( + &mut conn, + epoch_start_slot, + )?)), + "target" => Ok(Json(get_validators_missed_target( + &mut conn, + epoch_start_slot, + )?)), + _ => Err(Error::BadRequest), + } +} + +pub async fn get_validators_missed_vote_graffiti( + Path((vote, epoch)): Path<(String, u64)>, + Extension(pool): Extension, + Extension(slots_per_epoch): Extension, +) -> Result>, Error> { + let mut conn = get_connection(&pool).map_err(Error::Database)?; + + let Json(indices) = get_validators_missed_vote( + Path((vote, epoch)), + Extension(pool), + Extension(slots_per_epoch), + ) + .await?; + + let graffitis = get_validators_latest_proposer_info(&mut conn, indices)? + .values() + .map(|info| info.graffiti.clone()) + .collect::>(); + + let mut result = HashMap::new(); + for graffiti in graffitis { + if !result.contains_key(&graffiti) { + result.insert(graffiti.clone(), 0); + } + *result + .get_mut(&graffiti) + .ok_or_else(|| Error::Other("An unexpected error occurred".to_string()))? += 1; + } + + Ok(Json(result)) +} + +pub fn attestation_routes() -> Router { + Router::new() + .route( + "/v1/validators/:validator/attestation/:epoch", + get(get_validator_attestation), + ) + .route( + "/v1/validators/all/attestation/:epoch", + get(get_all_validators_attestations), + ) + .route( + "/v1/validators/missed/:vote/:epoch", + get(get_validators_missed_vote), + ) + .route( + "/v1/validators/missed/:vote/:epoch/graffiti", + get(get_validators_missed_vote_graffiti), + ) +} + +/// The functions below are dependent on Blockprint and if it is disabled, the endpoints will be +/// disabled. +pub async fn get_clients_missed_vote( + Path((vote, epoch)): Path<(String, u64)>, + Extension(pool): Extension, + Extension(slots_per_epoch): Extension, +) -> Result>, Error> { + let mut conn = get_connection(&pool).map_err(Error::Database)?; + + let Json(indices) = get_validators_missed_vote( + Path((vote, epoch)), + Extension(pool), + Extension(slots_per_epoch), + ) + .await?; + + // All validators which missed the vote. + let indices_map = indices.into_iter().collect::>(); + + let target_slot = WatchSlot::from_slot(Epoch::new(epoch).start_slot(slots_per_epoch)); + + // All validators. + let client_map = + construct_validator_blockprints_at_slot(&mut conn, target_slot, slots_per_epoch)?; + + let mut result = HashMap::new(); + + for index in indices_map { + if let Some(print) = client_map.get(&index) { + if !result.contains_key(print) { + result.insert(print.clone(), 0); + } + *result + .get_mut(print) + .ok_or_else(|| Error::Other("An unexpected error occurred".to_string()))? += 1; + } + } + + Ok(Json(result)) +} + +pub async fn get_clients_missed_vote_percentages( + Path((vote, epoch)): Path<(String, u64)>, + Extension(pool): Extension, + Extension(slots_per_epoch): Extension, +) -> Result>, Error> { + let Json(clients_counts) = get_clients_missed_vote( + Path((vote, epoch)), + Extension(pool.clone()), + Extension(slots_per_epoch), + ) + .await?; + + let target_slot = WatchSlot::from_slot(Epoch::new(epoch).start_slot(slots_per_epoch)); + + let mut conn = get_connection(&pool)?; + let totals = get_validators_clients_at_slot(&mut conn, target_slot, slots_per_epoch)?; + + let mut result = HashMap::new(); + for (client, count) in clients_counts.iter() { + let client_total: f64 = *totals + .get(client) + .ok_or_else(|| Error::Other("Client type mismatch".to_string()))? + as f64; + // `client_total` should never be `0`, but if it is, return `0` instead of `inf`. + if client_total == 0.0 { + result.insert(client.to_string(), 0.0); + } else { + let percentage: f64 = *count as f64 / client_total * 100.0; + result.insert(client.to_string(), percentage); + } + } + + Ok(Json(result)) +} + +pub async fn get_clients_missed_vote_percentages_relative( + Path((vote, epoch)): Path<(String, u64)>, + Extension(pool): Extension, + Extension(slots_per_epoch): Extension, +) -> Result>, Error> { + let Json(clients_counts) = get_clients_missed_vote( + Path((vote, epoch)), + Extension(pool), + Extension(slots_per_epoch), + ) + .await?; + + let mut total: u64 = 0; + for (_, count) in clients_counts.iter() { + total += *count + } + + let mut result = HashMap::new(); + for (client, count) in clients_counts.iter() { + // `total` should never be 0, but if it is, return `-` instead of `inf`. + if total == 0 { + result.insert(client.to_string(), 0.0); + } else { + let percentage: f64 = *count as f64 / total as f64 * 100.0; + result.insert(client.to_string(), percentage); + } + } + + Ok(Json(result)) +} + +pub fn blockprint_attestation_routes() -> Router { + Router::new() + .route( + "/v1/clients/missed/:vote/:epoch", + get(get_clients_missed_vote), + ) + .route( + "/v1/clients/missed/:vote/:epoch/percentages", + get(get_clients_missed_vote_percentages), + ) + .route( + "/v1/clients/missed/:vote/:epoch/percentages/relative", + get(get_clients_missed_vote_percentages_relative), + ) +} diff --git a/watch/src/suboptimal_attestations/updater.rs b/watch/src/suboptimal_attestations/updater.rs new file mode 100644 index 00000000000..aeabff2035c --- /dev/null +++ b/watch/src/suboptimal_attestations/updater.rs @@ -0,0 +1,236 @@ +use crate::database::{self, Error as DbError}; +use crate::updater::{Error, UpdateHandler}; + +use crate::suboptimal_attestations::get_attestation_performances; + +use eth2::types::EthSpec; +use log::{debug, error, warn}; + +const MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS: u64 = 50; + +impl UpdateHandler { + /// Forward fills the `suboptimal_attestations` table starting from the entry with the highest + /// slot. + /// + /// It construts a request to the `attestation_performance` API endpoint with: + /// `start_epoch` -> highest completely filled epoch + 1 (or epoch of lowest canonical slot) + /// `end_epoch` -> epoch of highest canonical slot + /// + /// It will resync the latest epoch if it is not fully filled but will not overwrite existing + /// values unless there is a re-org. + /// That is, `if highest_filled_slot % slots_per_epoch != 31`. + /// + /// In the event the most recent epoch has no suboptimal attestations, it will attempt to + /// resync that epoch. The odds of this occuring on mainnet are vanishingly small so it is not + /// accounted for. + /// + /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS`. + pub async fn fill_suboptimal_attestations(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + + let highest_filled_slot_opt = if self.config.attestations { + database::get_highest_attestation(&mut conn)? + .map(|attestation| attestation.epoch_start_slot.as_slot()) + } else { + return Err(Error::NotEnabled("attestations".to_string())); + }; + + let start_epoch = if let Some(highest_filled_slot) = highest_filled_slot_opt { + if highest_filled_slot % self.slots_per_epoch == self.slots_per_epoch.saturating_sub(1) + { + // The whole epoch is filled so we can begin syncing the next one. + highest_filled_slot.epoch(self.slots_per_epoch) + 1 + } else { + // The epoch is only partially synced. Try to sync it fully. + highest_filled_slot.epoch(self.slots_per_epoch) + } + } else { + // No rows present in the `suboptimal_attestations` table. Use `canonical_slots` + // instead. + if let Some(lowest_canonical_slot) = database::get_lowest_canonical_slot(&mut conn)? { + lowest_canonical_slot + .slot + .as_slot() + .epoch(self.slots_per_epoch) + } else { + // There are no slots in the database, do not fill the `suboptimal_attestations` + // table. + warn!("Refusing to fill the `suboptimal_attestations` table as there are no slots in the database"); + return Ok(()); + } + }; + + if let Some(highest_canonical_slot) = + database::get_highest_canonical_slot(&mut conn)?.map(|slot| slot.slot.as_slot()) + { + let mut end_epoch = highest_canonical_slot.epoch(self.slots_per_epoch); + + // The `lighthouse/analysis/attestation_performance` endpoint can only retrieve attestations + // which are more than 1 epoch old. + // We assume that `highest_canonical_slot` is near the head of the chain. + end_epoch = end_epoch.saturating_sub(2_u64); + + // If end_epoch == 0 then the chain just started so we need to wait until + // `current_epoch >= 2`. + if end_epoch == 0 { + debug!("Chain just begun, refusing to sync attestations"); + return Ok(()); + } + + if start_epoch > end_epoch { + debug!("Attestations are up to date with the head of the database"); + return Ok(()); + } + + // Ensure the size of the request does not exceed the maximum allowed value. + if start_epoch < end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS) { + end_epoch = start_epoch + MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS + } + + if let Some(lowest_canonical_slot) = + database::get_lowest_canonical_slot(&mut conn)?.map(|slot| slot.slot.as_slot()) + { + let mut attestations = get_attestation_performances( + &self.bn, + start_epoch, + end_epoch, + self.slots_per_epoch, + ) + .await?; + + // Only insert attestations with corresponding `canonical_slot`s. + attestations.retain(|attestation| { + attestation.epoch_start_slot.as_slot() >= lowest_canonical_slot + && attestation.epoch_start_slot.as_slot() <= highest_canonical_slot + }); + database::insert_batch_suboptimal_attestations(&mut conn, attestations)?; + } else { + return Err(Error::Database(DbError::Other( + "Database did not return a lowest canonical slot when one exists".to_string(), + ))); + } + } else { + // There are no slots in the `canonical_slots` table, but there are entries in the + // `suboptimal_attestations` table. This is a critical failure. It usually means + // someone has manually tampered with the database tables and should not occur during + // normal operation. + error!("Database is corrupted. Please re-sync the database"); + return Err(Error::Database(DbError::DatabaseCorrupted)); + } + + Ok(()) + } + + /// Backfill the `suboptimal_attestations` table starting from the entry with the lowest slot. + /// + /// It constructs a request to the `attestation_performance` API endpoint with: + /// `start_epoch` -> epoch of the lowest `canonical_slot`. + /// `end_epoch` -> epoch of the lowest filled `suboptimal_attestation` - 1 (or epoch of highest + /// canonical slot) + /// + /// It will resync the lowest epoch if it is not fully filled. + /// That is, `if lowest_filled_slot % slots_per_epoch != 0` + /// + /// In the event there are no suboptimal attestations present in the lowest epoch, it will attempt to + /// resync the epoch. The odds of this occuring on mainnet are vanishingly small so it is not + /// accounted for. + /// + /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS`. + pub async fn backfill_suboptimal_attestations(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + let max_attestation_backfill = self.config.max_backfill_size_epochs; + + // Get the slot of the lowest entry in the `suboptimal_attestations` table. + let lowest_filled_slot_opt = if self.config.attestations { + database::get_lowest_attestation(&mut conn)? + .map(|attestation| attestation.epoch_start_slot.as_slot()) + } else { + return Err(Error::NotEnabled("attestations".to_string())); + }; + + let end_epoch = if let Some(lowest_filled_slot) = lowest_filled_slot_opt { + if lowest_filled_slot % self.slots_per_epoch == 0 { + lowest_filled_slot + .epoch(self.slots_per_epoch) + .saturating_sub(1_u64) + } else { + // The epoch is only partially synced. Try to sync it fully. + lowest_filled_slot.epoch(self.slots_per_epoch) + } + } else { + // No entries in the `suboptimal_attestations` table. Use `canonical_slots` instead. + if let Some(highest_canonical_slot) = + database::get_highest_canonical_slot(&mut conn)?.map(|slot| slot.slot.as_slot()) + { + // Subtract 2 since `end_epoch` must be less than the current epoch - 1. + // We assume that `highest_canonical_slot` is near the head of the chain. + highest_canonical_slot + .epoch(self.slots_per_epoch) + .saturating_sub(2_u64) + } else { + // There are no slots in the database, do not backfill the + // `suboptimal_attestations` table. + warn!("Refusing to backfill attestations as there are no slots in the database"); + return Ok(()); + } + }; + + if end_epoch == 0 { + debug!("Attestations backfill is complete"); + return Ok(()); + } + + if let Some(lowest_canonical_slot) = + database::get_lowest_canonical_slot(&mut conn)?.map(|slot| slot.slot.as_slot()) + { + let mut start_epoch = lowest_canonical_slot.epoch(self.slots_per_epoch); + + if start_epoch > end_epoch { + debug!("Attestations are up to date with the base of the database"); + return Ok(()); + } + + // Ensure the request range does not exceed `max_attestation_backfill` or + // `MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS`. + if start_epoch < end_epoch.saturating_sub(max_attestation_backfill) { + start_epoch = end_epoch.saturating_sub(max_attestation_backfill) + } + if start_epoch < end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS) { + start_epoch = end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS) + } + + if let Some(highest_canonical_slot) = + database::get_highest_canonical_slot(&mut conn)?.map(|slot| slot.slot.as_slot()) + { + let mut attestations = get_attestation_performances( + &self.bn, + start_epoch, + end_epoch, + self.slots_per_epoch, + ) + .await?; + + // Only insert `suboptimal_attestations` with corresponding `canonical_slots`. + attestations.retain(|attestation| { + attestation.epoch_start_slot.as_slot() >= lowest_canonical_slot + && attestation.epoch_start_slot.as_slot() <= highest_canonical_slot + }); + + database::insert_batch_suboptimal_attestations(&mut conn, attestations)?; + } else { + return Err(Error::Database(DbError::Other( + "Database did not return a lowest slot when one exists".to_string(), + ))); + } + } else { + // There are no slots in the `canonical_slot` table, but there are entries in the + // `suboptimal_attestations` table. This is a critical failure. It usually means + // someone has manually tampered with the database tables and should not occur during + // normal operation. + error!("Database is corrupted. Please re-sync the database"); + return Err(Error::Database(DbError::DatabaseCorrupted)); + } + + Ok(()) + } +} diff --git a/watch/src/updater/config.rs b/watch/src/updater/config.rs new file mode 100644 index 00000000000..0179be73db6 --- /dev/null +++ b/watch/src/updater/config.rs @@ -0,0 +1,65 @@ +use serde::{Deserialize, Serialize}; + +pub const BEACON_NODE_URL: &str = "http://127.0.0.1:5052"; + +pub const fn max_backfill_size_epochs() -> u64 { + 2 +} +pub const fn backfill_stop_epoch() -> u64 { + 0 +} +pub const fn attestations() -> bool { + true +} +pub const fn proposer_info() -> bool { + true +} +pub const fn block_rewards() -> bool { + true +} +pub const fn block_packing() -> bool { + true +} + +fn beacon_node_url() -> String { + BEACON_NODE_URL.to_string() +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Config { + /// The URL of the beacon you wish to sync from. + #[serde(default = "beacon_node_url")] + pub beacon_node_url: String, + /// The maximum size each backfill iteration will allow per request (in epochs). + #[serde(default = "max_backfill_size_epochs")] + pub max_backfill_size_epochs: u64, + /// The epoch at which to never backfill past. + #[serde(default = "backfill_stop_epoch")] + pub backfill_stop_epoch: u64, + /// Whether to sync the suboptimal_attestations table. + #[serde(default = "attestations")] + pub attestations: bool, + /// Whether to sync the proposer_info table. + #[serde(default = "proposer_info")] + pub proposer_info: bool, + /// Whether to sync the block_rewards table. + #[serde(default = "block_rewards")] + pub block_rewards: bool, + /// Whether to sync the block_packing table. + #[serde(default = "block_packing")] + pub block_packing: bool, +} + +impl Default for Config { + fn default() -> Self { + Self { + beacon_node_url: beacon_node_url(), + max_backfill_size_epochs: max_backfill_size_epochs(), + backfill_stop_epoch: backfill_stop_epoch(), + attestations: attestations(), + proposer_info: proposer_info(), + block_rewards: block_rewards(), + block_packing: block_packing(), + } + } +} diff --git a/watch/src/updater/error.rs b/watch/src/updater/error.rs new file mode 100644 index 00000000000..74091c8f217 --- /dev/null +++ b/watch/src/updater/error.rs @@ -0,0 +1,56 @@ +use crate::blockprint::Error as BlockprintError; +use crate::database::Error as DbError; +use beacon_node::beacon_chain::BeaconChainError; +use eth2::{Error as Eth2Error, SensitiveError}; +use std::fmt; + +#[derive(Debug)] +pub enum Error { + BeaconChain(BeaconChainError), + Eth2(Eth2Error), + SensitiveUrl(SensitiveError), + Database(DbError), + Blockprint(BlockprintError), + UnableToGetRemoteHead, + BeaconNodeSyncing, + NotEnabled(String), + NoValidatorsFound, + BeaconNodeNotCompatible(String), + InvalidConfig(String), +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +impl From for Error { + fn from(e: BeaconChainError) -> Self { + Error::BeaconChain(e) + } +} + +impl From for Error { + fn from(e: Eth2Error) -> Self { + Error::Eth2(e) + } +} + +impl From for Error { + fn from(e: SensitiveError) -> Self { + Error::SensitiveUrl(e) + } +} + +impl From for Error { + fn from(e: DbError) -> Self { + Error::Database(e) + } +} + +impl From for Error { + fn from(e: BlockprintError) -> Self { + Error::Blockprint(e) + } +} diff --git a/watch/src/updater/handler.rs b/watch/src/updater/handler.rs new file mode 100644 index 00000000000..1e1662bf749 --- /dev/null +++ b/watch/src/updater/handler.rs @@ -0,0 +1,471 @@ +use crate::blockprint::WatchBlockprintClient; +use crate::config::Config as FullConfig; +use crate::database::{self, PgPool, WatchCanonicalSlot, WatchHash, WatchSlot}; +use crate::updater::{Config, Error, WatchSpec}; +use beacon_node::beacon_chain::BeaconChainError; +use eth2::{ + types::{BlockId, SyncingData}, + BeaconNodeHttpClient, SensitiveUrl, +}; +use log::{debug, error, info, warn}; +use std::collections::HashSet; +use std::iter::FromIterator; +use types::{BeaconBlockHeader, EthSpec, Hash256, SignedBeaconBlock, Slot}; + +use crate::updater::{get_beacon_block, get_header, get_validators}; + +const MAX_EXPECTED_REORG_LENGTH: u64 = 32; + +/// Ensure the existing database is valid for this run. +pub async fn ensure_valid_database( + spec: &WatchSpec, + pool: &mut PgPool, +) -> Result<(), Error> { + let mut conn = database::get_connection(pool)?; + + let bn_slots_per_epoch = spec.slots_per_epoch(); + let bn_config_name = spec.network.clone(); + + if let Some((db_config_name, db_slots_per_epoch)) = database::get_active_config(&mut conn)? { + if db_config_name != bn_config_name || db_slots_per_epoch != bn_slots_per_epoch as i32 { + Err(Error::InvalidConfig( + "The config stored in the database does not match the beacon node.".to_string(), + )) + } else { + // Configs match. + Ok(()) + } + } else { + // No config exists in the DB. + database::insert_active_config(&mut conn, bn_config_name, bn_slots_per_epoch)?; + Ok(()) + } +} + +pub struct UpdateHandler { + pub pool: PgPool, + pub bn: BeaconNodeHttpClient, + pub blockprint: Option, + pub config: Config, + pub slots_per_epoch: u64, + pub spec: WatchSpec, +} + +impl UpdateHandler { + pub async fn new( + bn: BeaconNodeHttpClient, + spec: WatchSpec, + config: FullConfig, + ) -> Result, Error> { + let blockprint = if config.blockprint.enabled { + if let Some(server) = config.blockprint.url { + let blockprint_url = SensitiveUrl::parse(&server).map_err(Error::SensitiveUrl)?; + Some(WatchBlockprintClient { + client: reqwest::Client::new(), + server: blockprint_url, + username: config.blockprint.username, + password: config.blockprint.password, + }) + } else { + return Err(Error::NotEnabled( + "blockprint was enabled but url was not set".to_string(), + )); + } + } else { + None + }; + + let mut pool = database::build_connection_pool(&config.database)?; + + ensure_valid_database(&spec, &mut pool).await?; + + Ok(Self { + pool, + bn, + blockprint, + config: config.updater, + slots_per_epoch: spec.slots_per_epoch(), + spec, + }) + } + + /// Gets the syncing status of the connected beacon node. + pub async fn get_bn_syncing_status(&mut self) -> Result { + Ok(self.bn.get_node_syncing().await?.data) + } + + /// Gets a list of block roots from the database which do not yet contain a corresponding + /// entry in the `beacon_blocks` table and inserts them. + pub async fn update_unknown_blocks(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + let roots = database::get_unknown_canonical_blocks(&mut conn)?; + for root in roots { + let block_opt: Option> = + get_beacon_block(&self.bn, BlockId::Root(root.as_hash())).await?; + if let Some(block) = block_opt { + database::insert_beacon_block(&mut conn, block, root)?; + } + } + + Ok(()) + } + + /// Performs a head update with the following steps: + /// 1. Pull the latest header from the beacon node and the latest canonical slot from the + /// database. + /// 2. Loop back through the beacon node and database to find the first matching slot -> root + /// pair. + /// 3. Go back `MAX_EXPECTED_REORG_LENGTH` slots through the database ensuring it is + /// consistent with the beacon node. If a re-org occurs beyond this range, we cannot recover. + /// 4. Remove any invalid slots from the database. + /// 5. Sync all blocks between the first valid block of the database and the head of the beacon + /// chain. + /// + /// In the event there are no slots present in the database, it will sync from the head block + /// block back to the first slot of the epoch. + /// This will ensure backfills are always done in full epochs (which helps keep certain syncing + /// tasks efficient). + pub async fn perform_head_update(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + // Load the head from the beacon node. + let bn_header = get_header(&self.bn, BlockId::Head) + .await? + .ok_or(Error::UnableToGetRemoteHead)?; + let header_root = bn_header.canonical_root(); + + if let Some(latest_matching_canonical_slot) = + self.get_first_matching_block(bn_header.clone()).await? + { + // Check for reorgs. + let latest_db_slot = self.check_for_reorg(latest_matching_canonical_slot).await?; + + // Remove all slots above `latest_db_slot` from the database. + let result = database::delete_canonical_slots_above( + &mut conn, + WatchSlot::from_slot(latest_db_slot), + )?; + info!("{result} old records removed during head update"); + + if result > 0 { + // If slots were removed, we need to resync the suboptimal_attestations table for + // the epoch since they will have changed and cannot be fixed by a simple update. + let epoch = latest_db_slot + .epoch(self.slots_per_epoch) + .saturating_sub(1_u64); + debug!("Preparing to resync attestations above epoch {epoch}"); + database::delete_suboptimal_attestations_above( + &mut conn, + WatchSlot::from_slot(epoch.start_slot(self.slots_per_epoch)), + )?; + } + + // Since we are syncing backwards, `start_slot > `end_slot`. + let start_slot = bn_header.slot; + let end_slot = latest_db_slot + 1; + self.reverse_fill_canonical_slots(bn_header, header_root, false, start_slot, end_slot) + .await?; + info!("Reverse sync begun at slot {start_slot} and stopped at slot {end_slot}"); + + // Attempt to sync new blocks with blockprint. + //self.sync_blockprint_until(start_slot).await?; + } else { + // There are no matching parent blocks. Sync from the head block back until the first + // block of the epoch. + let start_slot = bn_header.slot; + let end_slot = start_slot.saturating_sub(start_slot % self.slots_per_epoch); + self.reverse_fill_canonical_slots(bn_header, header_root, false, start_slot, end_slot) + .await?; + info!("Reverse sync begun at slot {start_slot} and stopped at slot {end_slot}"); + } + + Ok(()) + } + + /// Attempt to find a row in the `canonical_slots` table which matches the `canonical_root` of + /// the block header as reported by the beacon node. + /// + /// Any blocks above this value are not canonical according to the beacon node. + /// + /// Note: In the event that there are skip slots above the slot returned by the function, + /// they will not be returned, so may be pruned or re-synced by other code despite being + /// canonical. + pub async fn get_first_matching_block( + &mut self, + mut bn_header: BeaconBlockHeader, + ) -> Result, Error> { + let mut conn = database::get_connection(&self.pool)?; + + // Load latest non-skipped canonical slot from database. + if let Some(db_canonical_slot) = + database::get_highest_non_skipped_canonical_slot(&mut conn)? + { + // Check if the header or parent root matches the entry in the database. + if bn_header.parent_root == db_canonical_slot.root.as_hash() + || bn_header.canonical_root() == db_canonical_slot.root.as_hash() + { + Ok(Some(db_canonical_slot)) + } else { + // Header is not the child of the highest entry in the database. + // From here we need to iterate backwards through the database until we find + // a slot -> root pair that matches the beacon node. + loop { + // Store working `parent_root`. + let parent_root = bn_header.parent_root; + + // Try the next header. + let next_header = get_header(&self.bn, BlockId::Root(parent_root)).await?; + if let Some(header) = next_header { + bn_header = header.clone(); + if let Some(db_canonical_slot) = database::get_canonical_slot_by_root( + &mut conn, + WatchHash::from_hash(header.parent_root), + )? { + // Check if the entry in the database matches the parent of + // the header. + if header.parent_root == db_canonical_slot.root.as_hash() { + return Ok(Some(db_canonical_slot)); + } else { + // Move on to the next header. + continue; + } + } else { + // Database does not have the referenced root. Try the next header. + continue; + } + } else { + // If we get this error it means that the `parent_root` of the header + // did not reference a canonical block. + return Err(Error::BeaconChain(BeaconChainError::MissingBeaconBlock( + parent_root, + ))); + } + } + } + } else { + // There are no non-skipped blocks present in the database. + Ok(None) + } + } + + /// Given the latest slot in the database which matches a root in the beacon node, + /// traverse back through the database for `MAX_EXPECTED_REORG_LENGTH` slots to ensure the tip + /// of the database is consistent with the beacon node (in the case that reorgs have occured). + /// + /// Returns the slot before the oldest canonical_slot which has an invalid child. + pub async fn check_for_reorg( + &mut self, + latest_canonical_slot: WatchCanonicalSlot, + ) -> Result { + let mut conn = database::get_connection(&self.pool)?; + + let end_slot = latest_canonical_slot.slot.as_u64(); + let start_slot = end_slot.saturating_sub(MAX_EXPECTED_REORG_LENGTH); + + for i in start_slot..end_slot { + let slot = Slot::new(i); + let db_canonical_slot_opt = + database::get_canonical_slot(&mut conn, WatchSlot::from_slot(slot))?; + if let Some(db_canonical_slot) = db_canonical_slot_opt { + let header_opt = get_header(&self.bn, BlockId::Slot(slot)).await?; + if let Some(header) = header_opt { + if header.canonical_root() == db_canonical_slot.root.as_hash() { + // The roots match (or are both skip slots). + continue; + } else { + // The block roots do not match. We need to re-sync from here. + warn!("Block {slot} does not match the beacon node. Resyncing"); + return Ok(slot.saturating_sub(1_u64)); + } + } else if !db_canonical_slot.skipped { + // The block exists in the database, but does not exist on the beacon node. + // We need to re-sync from here. + warn!("Block {slot} does not exist on the beacon node. Resyncing"); + return Ok(slot.saturating_sub(1_u64)); + } + } else { + // This slot does not exist in the database. + let lowest_slot = database::get_lowest_canonical_slot(&mut conn)? + .map(|canonical_slot| canonical_slot.slot.as_slot()); + if lowest_slot > Some(slot) { + // The database has not back-filled this slot yet, so skip it. + continue; + } else { + // The database does not contain this block, but has back-filled past it. + // We need to resync from here. + warn!("Slot {slot} missing from database. Resyncing"); + return Ok(slot.saturating_sub(1_u64)); + } + } + } + + // The database is consistent with the beacon node, so return the head of the database. + Ok(latest_canonical_slot.slot.as_slot()) + } + + /// Fills the canonical slots table beginning from `start_slot` and ending at `end_slot`. + /// It fills in reverse order, that is, `start_slot` is higher than `end_slot`. + /// + /// Skip slots set `root` to the root of the previous non-skipped slot and also sets + /// `skipped == true`. + /// + /// Since it uses `insert_canonical_slot` to interact with the database, it WILL NOT overwrite + /// existing rows. This means that any part of the chain within `end_slot..=start_slot` that + /// needs to be resynced, must first be deleted from the database. + pub async fn reverse_fill_canonical_slots( + &mut self, + mut header: BeaconBlockHeader, + mut header_root: Hash256, + mut skipped: bool, + start_slot: Slot, + end_slot: Slot, + ) -> Result { + let mut count = 0; + + let mut conn = database::get_connection(&self.pool)?; + + // Iterate, descending from `start_slot` (higher) to `end_slot` (lower). + for slot in (end_slot.as_u64()..=start_slot.as_u64()).rev() { + // Insert header. + database::insert_canonical_slot( + &mut conn, + WatchCanonicalSlot { + slot: WatchSlot::new(slot), + root: WatchHash::from_hash(header_root), + skipped, + beacon_block: None, + }, + )?; + count += 1; + + // Load the next header: + // We must use BlockId::Slot since we want to include skip slots. + header = if let Some(new_header) = get_header( + &self.bn, + BlockId::Slot(Slot::new(slot.saturating_sub(1_u64))), + ) + .await? + { + header_root = new_header.canonical_root(); + skipped = false; + new_header + } else { + if header.slot == 0 { + info!("Reverse fill exhausted at slot 0"); + break; + } + // Slot was skipped, so use the parent_root (most recent non-skipped block). + skipped = true; + header_root = header.parent_root; + header + }; + } + + Ok(count) + } + + /// Backfills the `canonical_slots` table starting from the lowest non-skipped slot and + /// stopping after `max_backfill_size_epochs` epochs. + pub async fn backfill_canonical_slots(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + let backfill_stop_slot = self.config.backfill_stop_epoch * self.slots_per_epoch; + // Check to see if we have finished backfilling. + if let Some(lowest_slot) = database::get_lowest_canonical_slot(&mut conn)? { + if lowest_slot.slot.as_slot() == backfill_stop_slot { + debug!("Backfill sync complete, all slots filled"); + return Ok(()); + } + } + + let backfill_slot_count = self.config.max_backfill_size_epochs * self.slots_per_epoch; + + if let Some(lowest_non_skipped_canonical_slot) = + database::get_lowest_non_skipped_canonical_slot(&mut conn)? + { + // Set `start_slot` equal to the lowest non-skipped slot in the database. + // While this will attempt to resync some parts of the bottom of the chain, it reduces + // complexity when dealing with skip slots. + let start_slot = lowest_non_skipped_canonical_slot.slot.as_slot(); + let mut end_slot = lowest_non_skipped_canonical_slot + .slot + .as_slot() + .saturating_sub(backfill_slot_count); + + // Ensure end_slot doesn't go below `backfill_stop_epoch` + if end_slot <= backfill_stop_slot { + end_slot = Slot::new(backfill_stop_slot); + } + + let header_opt = get_header(&self.bn, BlockId::Slot(start_slot)).await?; + + if let Some(header) = header_opt { + let header_root = header.canonical_root(); + let count = self + .reverse_fill_canonical_slots(header, header_root, false, start_slot, end_slot) + .await?; + + info!("Backfill completed to slot: {end_slot}, records added: {count}"); + } else { + // The lowest slot of the database is inconsistent with the beacon node. + // Currently we have no way to recover from this. The entire database will need to + // be re-synced. + error!( + "Database is inconsistent with the beacon node. \ + Please ensure your beacon node is set to the right network, \ + otherwise you may need to resync" + ); + } + } else { + // There are no blocks in the database. Forward sync needs to happen first. + info!("Backfill was not performed since there are no blocks in the database"); + return Ok(()); + }; + + Ok(()) + } + + // Attempt to update the validator set. + // This downloads the latest validator set from the beacon node, and pulls the known validator + // set from the database. + // We then take any new or updated validators and insert them into the database (overwriting + // exiting validators). + // + // In the event there are no validators in the database, it will initialize the validator set. + pub async fn update_validator_set(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + + let current_validators = database::get_all_validators(&mut conn)?; + + if !current_validators.is_empty() { + let old_validators = HashSet::from_iter(current_validators); + + // Pull the new validator set from the beacon node. + let new_validators = get_validators(&self.bn).await?; + + // The difference should only contain validators that contain either a new `exit_epoch` (implying an + // exit) or a new `index` (implying a validator activation). + let val_diff = new_validators.difference(&old_validators); + + for diff in val_diff { + database::insert_validator(&mut conn, diff.clone())?; + } + } else { + info!("No validators present in database. Initializing the validator set"); + self.initialize_validator_set().await?; + } + + Ok(()) + } + + // Initialize the validator set by downloading it from the beacon node, inserting blockprint + // data (if required) and writing it to the database. + pub async fn initialize_validator_set(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + + // Pull all validators from the beacon node. + let validators = Vec::from_iter(get_validators(&self.bn).await?); + + database::insert_batch_validators(&mut conn, validators)?; + + Ok(()) + } +} diff --git a/watch/src/updater/mod.rs b/watch/src/updater/mod.rs new file mode 100644 index 00000000000..1fbb0107aef --- /dev/null +++ b/watch/src/updater/mod.rs @@ -0,0 +1,234 @@ +use crate::config::Config as FullConfig; +use crate::database::{WatchPK, WatchValidator}; +use eth2::{ + types::{BlockId, StateId}, + BeaconNodeHttpClient, SensitiveUrl, Timeouts, +}; +use log::{debug, error, info}; +use std::collections::{HashMap, HashSet}; +use std::marker::PhantomData; +use std::time::{Duration, Instant}; +use types::{BeaconBlockHeader, EthSpec, GnosisEthSpec, MainnetEthSpec, SignedBeaconBlock}; + +pub use config::Config; +pub use error::Error; +pub use handler::UpdateHandler; + +mod config; +pub mod error; +pub mod handler; + +const FAR_FUTURE_EPOCH: u64 = u64::MAX; +const DEFAULT_TIMEOUT: Duration = Duration::from_secs(5); + +const MAINNET: &str = "mainnet"; +const GNOSIS: &str = "gnosis"; + +pub struct WatchSpec { + network: String, + spec: PhantomData, +} + +impl WatchSpec { + fn slots_per_epoch(&self) -> u64 { + T::slots_per_epoch() + } +} + +impl WatchSpec { + pub fn mainnet(network: String) -> Self { + Self { + network, + spec: PhantomData, + } + } +} + +impl WatchSpec { + fn gnosis(network: String) -> Self { + Self { + network, + spec: PhantomData, + } + } +} + +pub async fn run_updater(config: FullConfig) -> Result<(), Error> { + let beacon_node_url = + SensitiveUrl::parse(&config.updater.beacon_node_url).map_err(Error::SensitiveUrl)?; + let bn = BeaconNodeHttpClient::new(beacon_node_url, Timeouts::set_all(DEFAULT_TIMEOUT)); + + let config_map = bn.get_config_spec::>().await?.data; + + let config_name = config_map + .get("CONFIG_NAME") + .ok_or_else(|| { + Error::BeaconNodeNotCompatible("No field CONFIG_NAME on beacon node spec".to_string()) + })? + .clone(); + + match config_map + .get("PRESET_BASE") + .ok_or_else(|| { + Error::BeaconNodeNotCompatible("No field PRESET_BASE on beacon node spec".to_string()) + })? + .to_lowercase() + .as_str() + { + MAINNET => { + let spec = WatchSpec::mainnet(config_name); + run_once(bn, spec, config).await + } + GNOSIS => { + let spec = WatchSpec::gnosis(config_name); + run_once(bn, spec, config).await + } + _ => unimplemented!("unsupported PRESET_BASE"), + } +} + +pub async fn run_once( + bn: BeaconNodeHttpClient, + spec: WatchSpec, + config: FullConfig, +) -> Result<(), Error> { + let mut watch = UpdateHandler::new(bn, spec, config.clone()).await?; + + let sync_data = watch.get_bn_syncing_status().await?; + if sync_data.is_syncing { + error!( + "Connected beacon node is still syncing: head_slot => {:?}, distance => {}", + sync_data.head_slot, sync_data.sync_distance + ); + return Err(Error::BeaconNodeSyncing); + } + + info!("Performing head update"); + let head_timer = Instant::now(); + watch.perform_head_update().await?; + let head_timer_elapsed = head_timer.elapsed(); + debug!("Head update complete, time taken: {head_timer_elapsed:?}"); + + info!("Performing block backfill"); + let block_backfill_timer = Instant::now(); + watch.backfill_canonical_slots().await?; + let block_backfill_timer_elapsed = block_backfill_timer.elapsed(); + debug!("Block backfill complete, time taken: {block_backfill_timer_elapsed:?}"); + + info!("Updating validator set"); + let validator_timer = Instant::now(); + watch.update_validator_set().await?; + let validator_timer_elapsed = validator_timer.elapsed(); + debug!("Validator update complete, time taken: {validator_timer_elapsed:?}"); + + // Update blocks after updating the validator set since the `proposer_index` must exist in the + // `validators` table. + info!("Updating unknown blocks"); + let unknown_block_timer = Instant::now(); + watch.update_unknown_blocks().await?; + let unknown_block_timer_elapsed = unknown_block_timer.elapsed(); + debug!("Unknown block update complete, time taken: {unknown_block_timer_elapsed:?}"); + + // Run additional modules + if config.updater.attestations { + info!("Updating suboptimal attestations"); + let attestation_timer = Instant::now(); + watch.fill_suboptimal_attestations().await?; + watch.backfill_suboptimal_attestations().await?; + let attestation_timer_elapsed = attestation_timer.elapsed(); + debug!("Attestation update complete, time taken: {attestation_timer_elapsed:?}"); + } + + if config.updater.block_rewards { + info!("Updating block rewards"); + let rewards_timer = Instant::now(); + watch.fill_block_rewards().await?; + watch.backfill_block_rewards().await?; + let rewards_timer_elapsed = rewards_timer.elapsed(); + debug!("Block Rewards update complete, time taken: {rewards_timer_elapsed:?}"); + } + + if config.updater.block_packing { + info!("Updating block packing statistics"); + let packing_timer = Instant::now(); + watch.fill_block_packing().await?; + watch.backfill_block_packing().await?; + let packing_timer_elapsed = packing_timer.elapsed(); + debug!("Block packing update complete, time taken: {packing_timer_elapsed:?}"); + } + + if config.blockprint.enabled { + info!("Updating blockprint"); + let blockprint_timer = Instant::now(); + watch.fill_blockprint().await?; + watch.backfill_blockprint().await?; + let blockprint_timer_elapsed = blockprint_timer.elapsed(); + debug!("Blockprint update complete, time taken: {blockprint_timer_elapsed:?}"); + } + + Ok(()) +} + +/// Queries the beacon node for a given `BlockId` and returns the `BeaconBlockHeader` if it exists. +pub async fn get_header( + bn: &BeaconNodeHttpClient, + block_id: BlockId, +) -> Result, Error> { + let resp = bn + .get_beacon_headers_block_id(block_id) + .await? + .map(|resp| (resp.data.root, resp.data.header.message)); + // When quering with root == 0x000... , slot 0 will be returned with parent_root == 0x0000... + // This check escapes the loop. + if let Some((root, header)) = resp { + if root == header.parent_root { + return Ok(None); + } else { + return Ok(Some(header)); + } + } + Ok(None) +} + +pub async fn get_beacon_block( + bn: &BeaconNodeHttpClient, + block_id: BlockId, +) -> Result>, Error> { + let block = bn.get_beacon_blocks(block_id).await?.map(|resp| resp.data); + + Ok(block) +} + +/// Queries the beacon node for the current validator set. +pub async fn get_validators(bn: &BeaconNodeHttpClient) -> Result, Error> { + let mut validator_map = HashSet::new(); + + let validators = bn + .get_beacon_states_validators(StateId::Head, None, None) + .await? + .ok_or(Error::NoValidatorsFound)? + .data; + + for val in validators { + // Only store `activation_epoch` if it not the `FAR_FUTURE_EPOCH`. + let activation_epoch = if val.validator.activation_epoch.as_u64() == FAR_FUTURE_EPOCH { + None + } else { + Some(val.validator.activation_epoch.as_u64() as i32) + }; + // Only store `exit_epoch` if it is not the `FAR_FUTURE_EPOCH`. + let exit_epoch = if val.validator.exit_epoch.as_u64() == FAR_FUTURE_EPOCH { + None + } else { + Some(val.validator.exit_epoch.as_u64() as i32) + }; + validator_map.insert(WatchValidator { + index: val.index as i32, + public_key: WatchPK::from_pubkey(val.validator.pubkey), + status: val.status.to_string(), + activation_epoch, + exit_epoch, + }); + } + Ok(validator_map) +} diff --git a/watch/tests/tests.rs b/watch/tests/tests.rs new file mode 100644 index 00000000000..acdda8c306a --- /dev/null +++ b/watch/tests/tests.rs @@ -0,0 +1,1254 @@ +#![recursion_limit = "256"] +#![cfg(unix)] + +use beacon_chain::test_utils::{ + AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, +}; +use eth2::{types::BlockId, BeaconNodeHttpClient, SensitiveUrl, Timeouts}; +use http_api::test_utils::{create_api_server, ApiServer}; +use network::NetworkReceivers; + +use rand::distributions::Alphanumeric; +use rand::{thread_rng, Rng}; +use tokio::sync::oneshot; +use types::{Hash256, MainnetEthSpec, Slot}; +use url::Url; +use watch::{ + client::WatchHttpClient, + config::Config, + database::{self, Config as DatabaseConfig, PgPool, WatchSlot}, + server::{start_server, Config as ServerConfig}, + updater::{handler::*, run_updater, Config as UpdaterConfig, WatchSpec}, +}; + +use log::error; +use std::net::SocketAddr; +use std::time::Duration; +use tokio::{runtime, task::JoinHandle}; +use tokio_postgres::{config::Config as PostgresConfig, Client, NoTls}; +use unused_port::unused_tcp4_port; + +use testcontainers::{clients::Cli, images::postgres::Postgres, RunnableImage}; + +type E = MainnetEthSpec; + +const VALIDATOR_COUNT: usize = 32; +const SLOTS_PER_EPOCH: u64 = 32; +const DEFAULT_TIMEOUT: Duration = Duration::from_secs(5); + +fn build_test_config(config: &DatabaseConfig) -> PostgresConfig { + let mut postgres_config = PostgresConfig::new(); + postgres_config + .user(&config.user) + .password(&config.password) + .dbname(&config.default_dbname) + .host(&config.host) + .port(config.port) + .connect_timeout(Duration::from_millis(config.connect_timeout_millis)); + postgres_config +} + +async fn connect(config: &DatabaseConfig) -> (Client, JoinHandle<()>) { + let db_config = build_test_config(config); + let (client, conn) = db_config + .connect(NoTls) + .await + .expect("Could not connect to db"); + let connection = runtime::Handle::current().spawn(async move { + if let Err(e) = conn.await { + error!("Connection error {:?}", e); + } + }); + + (client, connection) +} + +pub async fn create_test_database(config: &DatabaseConfig) { + let (db, _) = connect(config).await; + + db.execute(&format!("CREATE DATABASE {};", config.dbname), &[]) + .await + .expect("Database creation failed"); +} + +struct TesterBuilder { + pub harness: BeaconChainHarness>, + pub config: Config, + _bn_network_rx: NetworkReceivers, + _bn_api_shutdown_tx: oneshot::Sender<()>, +} + +impl TesterBuilder { + pub async fn new() -> TesterBuilder { + let harness = BeaconChainHarness::builder(E::default()) + .default_spec() + .deterministic_keypairs(VALIDATOR_COUNT) + .fresh_ephemeral_store() + .build(); + + /* + * Spawn a Beacon Node HTTP API. + */ + let ApiServer { + server, + listening_socket: bn_api_listening_socket, + shutdown_tx: _bn_api_shutdown_tx, + network_rx: _bn_network_rx, + .. + } = create_api_server(harness.chain.clone(), harness.logger().clone()).await; + tokio::spawn(server); + + /* + * Create a watch configuration + */ + let database_port = unused_tcp4_port().expect("Unable to find unused port."); + let server_port = unused_tcp4_port().expect("Unable to find unused port."); + let config = Config { + database: DatabaseConfig { + dbname: random_dbname(), + port: database_port, + ..Default::default() + }, + server: ServerConfig { + listen_port: server_port, + ..Default::default() + }, + updater: UpdaterConfig { + beacon_node_url: format!( + "http://{}:{}", + bn_api_listening_socket.ip(), + bn_api_listening_socket.port() + ), + ..Default::default() + }, + ..Default::default() + }; + + Self { + harness, + config, + _bn_network_rx, + _bn_api_shutdown_tx, + } + } + pub async fn build(self, pool: PgPool) -> Tester { + /* + * Spawn a Watch HTTP API. + */ + let (_watch_shutdown_tx, watch_shutdown_rx) = oneshot::channel(); + let watch_server = start_server(&self.config, SLOTS_PER_EPOCH, pool, async { + let _ = watch_shutdown_rx.await; + }) + .unwrap(); + tokio::spawn(watch_server); + + let addr = SocketAddr::new( + self.config.server.listen_addr, + self.config.server.listen_port, + ); + + /* + * Create a HTTP client to talk to the watch HTTP API. + */ + let client = WatchHttpClient { + client: reqwest::Client::new(), + server: Url::parse(&format!("http://{}:{}", addr.ip(), addr.port())).unwrap(), + }; + + /* + * Create a HTTP client to talk to the Beacon Node API. + */ + let beacon_node_url = SensitiveUrl::parse(&self.config.updater.beacon_node_url).unwrap(); + let bn = BeaconNodeHttpClient::new(beacon_node_url, Timeouts::set_all(DEFAULT_TIMEOUT)); + let spec = WatchSpec::mainnet("mainnet".to_string()); + + /* + * Build update service + */ + let updater = UpdateHandler::new(bn, spec, self.config.clone()) + .await + .unwrap(); + + Tester { + harness: self.harness, + client, + config: self.config, + updater, + _bn_network_rx: self._bn_network_rx, + _bn_api_shutdown_tx: self._bn_api_shutdown_tx, + _watch_shutdown_tx, + } + } + async fn initialize_database(&self) -> PgPool { + create_test_database(&self.config.database).await; + database::utils::run_migrations(&self.config.database); + database::build_connection_pool(&self.config.database) + .expect("Could not build connection pool") + } +} + +struct Tester { + pub harness: BeaconChainHarness>, + pub client: WatchHttpClient, + pub config: Config, + pub updater: UpdateHandler, + _bn_network_rx: NetworkReceivers, + _bn_api_shutdown_tx: oneshot::Sender<()>, + _watch_shutdown_tx: oneshot::Sender<()>, +} + +impl Tester { + /// Extend the chain on the beacon chain harness. Do not update the beacon watch database. + pub async fn extend_chain(&mut self, num_blocks: u64) -> &mut Self { + self.harness.advance_slot(); + self.harness + .extend_chain( + num_blocks as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + self + } + + // Advance the slot clock without a block. This results in a skipped slot. + pub fn skip_slot(&mut self) -> &mut Self { + self.harness.advance_slot(); + self + } + + // Perform a single slot re-org. + pub async fn reorg_chain(&mut self) -> &mut Self { + let previous_slot = self.harness.get_current_slot(); + self.harness.advance_slot(); + let first_slot = self.harness.get_current_slot(); + self.harness + .extend_chain( + 1, + BlockStrategy::ForkCanonicalChainAt { + previous_slot, + first_slot, + }, + AttestationStrategy::AllValidators, + ) + .await; + self + } + + /// Run the watch updater service. + pub async fn run_update_service(&mut self, num_runs: usize) -> &mut Self { + for _ in 0..num_runs { + run_updater(self.config.clone()).await.unwrap(); + } + self + } + + pub async fn perform_head_update(&mut self) -> &mut Self { + self.updater.perform_head_update().await.unwrap(); + self + } + + pub async fn perform_backfill(&mut self) -> &mut Self { + self.updater.backfill_canonical_slots().await.unwrap(); + self + } + + pub async fn update_unknown_blocks(&mut self) -> &mut Self { + self.updater.update_unknown_blocks().await.unwrap(); + self + } + + pub async fn update_validator_set(&mut self) -> &mut Self { + self.updater.update_validator_set().await.unwrap(); + self + } + + pub async fn fill_suboptimal_attestations(&mut self) -> &mut Self { + self.updater.fill_suboptimal_attestations().await.unwrap(); + + self + } + + pub async fn backfill_suboptimal_attestations(&mut self) -> &mut Self { + self.updater + .backfill_suboptimal_attestations() + .await + .unwrap(); + + self + } + + pub async fn fill_block_rewards(&mut self) -> &mut Self { + self.updater.fill_block_rewards().await.unwrap(); + + self + } + + pub async fn backfill_block_rewards(&mut self) -> &mut Self { + self.updater.backfill_block_rewards().await.unwrap(); + + self + } + + pub async fn fill_block_packing(&mut self) -> &mut Self { + self.updater.fill_block_packing().await.unwrap(); + + self + } + + pub async fn backfill_block_packing(&mut self) -> &mut Self { + self.updater.backfill_block_packing().await.unwrap(); + + self + } + + pub async fn assert_canonical_slots_empty(&mut self) -> &mut Self { + let lowest_slot = self + .client + .get_lowest_canonical_slot() + .await + .unwrap() + .map(|slot| slot.slot.as_slot()); + + assert_eq!(lowest_slot, None); + + self + } + + pub async fn assert_lowest_canonical_slot(&mut self, expected: u64) -> &mut Self { + let slot = self + .client + .get_lowest_canonical_slot() + .await + .unwrap() + .unwrap() + .slot + .as_slot(); + + assert_eq!(slot, Slot::new(expected)); + + self + } + + pub async fn assert_highest_canonical_slot(&mut self, expected: u64) -> &mut Self { + let slot = self + .client + .get_highest_canonical_slot() + .await + .unwrap() + .unwrap() + .slot + .as_slot(); + + assert_eq!(slot, Slot::new(expected)); + + self + } + + pub async fn assert_canonical_slots_not_empty(&mut self) -> &mut Self { + self.client + .get_lowest_canonical_slot() + .await + .unwrap() + .unwrap(); + + self + } + + pub async fn assert_slot_is_skipped(&mut self, slot: u64) -> &mut Self { + assert!(self + .client + .get_beacon_blocks(BlockId::Slot(Slot::new(slot))) + .await + .unwrap() + .is_none()); + self + } + + pub async fn assert_all_validators_exist(&mut self) -> &mut Self { + assert_eq!( + self.client + .get_all_validators() + .await + .unwrap() + .unwrap() + .len(), + VALIDATOR_COUNT + ); + self + } + + pub async fn assert_lowest_block_has_proposer_info(&mut self) -> &mut Self { + let mut block = self + .client + .get_lowest_beacon_block() + .await + .unwrap() + .unwrap(); + + if block.slot.as_slot() == 0 { + block = self + .client + .get_next_beacon_block(block.root.as_hash()) + .await + .unwrap() + .unwrap() + } + + self.client + .get_proposer_info(BlockId::Root(block.root.as_hash())) + .await + .unwrap() + .unwrap(); + + self + } + + pub async fn assert_highest_block_has_proposer_info(&mut self) -> &mut Self { + let block = self + .client + .get_highest_beacon_block() + .await + .unwrap() + .unwrap(); + + self.client + .get_proposer_info(BlockId::Root(block.root.as_hash())) + .await + .unwrap() + .unwrap(); + + self + } + + pub async fn assert_lowest_block_has_block_rewards(&mut self) -> &mut Self { + let mut block = self + .client + .get_lowest_beacon_block() + .await + .unwrap() + .unwrap(); + + if block.slot.as_slot() == 0 { + block = self + .client + .get_next_beacon_block(block.root.as_hash()) + .await + .unwrap() + .unwrap() + } + + self.client + .get_block_reward(BlockId::Root(block.root.as_hash())) + .await + .unwrap() + .unwrap(); + + self + } + + pub async fn assert_highest_block_has_block_rewards(&mut self) -> &mut Self { + let block = self + .client + .get_highest_beacon_block() + .await + .unwrap() + .unwrap(); + + self.client + .get_block_reward(BlockId::Root(block.root.as_hash())) + .await + .unwrap() + .unwrap(); + + self + } + + pub async fn assert_lowest_block_has_block_packing(&mut self) -> &mut Self { + let mut block = self + .client + .get_lowest_beacon_block() + .await + .unwrap() + .unwrap(); + + while block.slot.as_slot() <= SLOTS_PER_EPOCH { + block = self + .client + .get_next_beacon_block(block.root.as_hash()) + .await + .unwrap() + .unwrap() + } + + self.client + .get_block_packing(BlockId::Root(block.root.as_hash())) + .await + .unwrap() + .unwrap(); + + self + } + + pub async fn assert_highest_block_has_block_packing(&mut self) -> &mut Self { + let block = self + .client + .get_highest_beacon_block() + .await + .unwrap() + .unwrap(); + + self.client + .get_block_packing(BlockId::Root(block.root.as_hash())) + .await + .unwrap() + .unwrap(); + + self + } + + /// Check that the canonical chain in watch matches that of the harness. Also check that all + /// canonical blocks can be retrieved. + pub async fn assert_canonical_chain_consistent(&mut self, last_slot: u64) -> &mut Self { + let head_root = self.harness.chain.head_beacon_block_root(); + let mut chain: Vec<(Hash256, Slot)> = self + .harness + .chain + .rev_iter_block_roots_from(head_root) + .unwrap() + .map(Result::unwrap) + .collect(); + + // `chain` contains skip slots, but the `watch` API will not return blocks that do not + // exist. + // We need to filter them out. + chain.reverse(); + chain.dedup_by(|(hash1, _), (hash2, _)| hash1 == hash2); + + // Remove any slots below `last_slot` since it is known that the database has not + // backfilled past it. + chain.retain(|(_, slot)| slot.as_u64() >= last_slot); + + for (root, slot) in &chain { + let block = self + .client + .get_beacon_blocks(BlockId::Root(*root)) + .await + .unwrap() + .unwrap(); + assert_eq!(block.slot.as_slot(), *slot); + } + + self + } + + /// Check that every block in the `beacon_blocks` table has corresponding entries in the + /// `proposer_info`, `block_rewards` and `block_packing` tables. + pub async fn assert_all_blocks_have_metadata(&mut self) -> &mut Self { + let pool = database::build_connection_pool(&self.config.database).unwrap(); + + let mut conn = database::get_connection(&pool).unwrap(); + let highest_block_slot = database::get_highest_beacon_block(&mut conn) + .unwrap() + .unwrap() + .slot + .as_slot(); + let lowest_block_slot = database::get_lowest_beacon_block(&mut conn) + .unwrap() + .unwrap() + .slot + .as_slot(); + for slot in lowest_block_slot.as_u64()..=highest_block_slot.as_u64() { + let canonical_slot = database::get_canonical_slot(&mut conn, WatchSlot::new(slot)) + .unwrap() + .unwrap(); + if !canonical_slot.skipped { + database::get_block_rewards_by_slot(&mut conn, WatchSlot::new(slot)) + .unwrap() + .unwrap(); + database::get_proposer_info_by_slot(&mut conn, WatchSlot::new(slot)) + .unwrap() + .unwrap(); + database::get_block_packing_by_slot(&mut conn, WatchSlot::new(slot)) + .unwrap() + .unwrap(); + } + } + + self + } +} + +pub fn random_dbname() -> String { + let mut s: String = thread_rng() + .sample_iter(&Alphanumeric) + .take(8) + .map(char::from) + .collect(); + // Postgres gets weird about capitals in database names. + s.make_ascii_lowercase(); + format!("test_{}", s) +} + +#[cfg(unix)] +#[tokio::test] +async fn short_chain() { + let builder = TesterBuilder::new().await; + + let docker = Cli::default(); + let image = RunnableImage::from(Postgres::default()) + .with_mapped_port((builder.config.database.port, 5432)); + let _node = docker.run(image); + + let pool = builder.initialize_database().await; + let mut tester = builder.build(pool).await; + + tester + .extend_chain(16) + .await + .assert_canonical_slots_empty() + .await + .run_update_service(1) + .await + .assert_all_validators_exist() + .await + .assert_canonical_slots_not_empty() + .await + .assert_canonical_chain_consistent(0) + .await; +} + +#[cfg(unix)] +#[tokio::test] +async fn short_chain_sync_starts_on_skip_slot() { + let builder = TesterBuilder::new().await; + + let docker = Cli::default(); + let image = RunnableImage::from(Postgres::default()) + .with_mapped_port((builder.config.database.port, 5432)); + let _node = docker.run(image); + + let pool = builder.initialize_database().await; + let mut tester = builder.build(pool).await; + + tester + .skip_slot() + .skip_slot() + .extend_chain(6) + .await + .skip_slot() + .extend_chain(6) + .await + .skip_slot() + .assert_canonical_slots_empty() + .await + .run_update_service(1) + .await + .assert_all_validators_exist() + .await + .assert_canonical_slots_not_empty() + .await + .assert_canonical_chain_consistent(0) + .await + .assert_lowest_block_has_block_rewards() + .await + .assert_highest_block_has_block_rewards() + .await; +} + +#[cfg(unix)] +#[tokio::test] +async fn short_chain_with_skip_slot() { + let builder = TesterBuilder::new().await; + + let docker = Cli::default(); + let image = RunnableImage::from(Postgres::default()) + .with_mapped_port((builder.config.database.port, 5432)); + let _node = docker.run(image); + + let pool = builder.initialize_database().await; + let mut tester = builder.build(pool).await; + + tester + .extend_chain(5) + .await + .assert_canonical_slots_empty() + .await + .run_update_service(1) + .await + .assert_all_validators_exist() + .await + .assert_canonical_slots_not_empty() + .await + .assert_highest_canonical_slot(5) + .await + .assert_lowest_canonical_slot(0) + .await + .assert_canonical_chain_consistent(0) + .await + .skip_slot() + .extend_chain(1) + .await + .run_update_service(1) + .await + .assert_all_validators_exist() + .await + .assert_highest_canonical_slot(7) + .await + .assert_slot_is_skipped(6) + .await + .assert_canonical_chain_consistent(0) + .await; +} + +#[cfg(unix)] +#[tokio::test] +async fn short_chain_with_reorg() { + let builder = TesterBuilder::new().await; + + let docker = Cli::default(); + let image = RunnableImage::from(Postgres::default()) + .with_mapped_port((builder.config.database.port, 5432)); + let _node = docker.run(image); + + let pool = builder.initialize_database().await; + let mut tester = builder.build(pool).await; + + tester + .extend_chain(5) + .await + .assert_canonical_slots_empty() + .await + .run_update_service(1) + .await + .assert_all_validators_exist() + .await + .assert_canonical_slots_not_empty() + .await + .assert_highest_canonical_slot(5) + .await + .assert_lowest_canonical_slot(0) + .await + .assert_canonical_chain_consistent(0) + .await + .skip_slot() + .reorg_chain() + .await + .extend_chain(1) + .await + .run_update_service(1) + .await + .assert_all_validators_exist() + .await + .assert_highest_canonical_slot(8) + .await + .assert_slot_is_skipped(6) + .await + .assert_canonical_chain_consistent(0) + .await; +} + +#[cfg(unix)] +#[tokio::test] +async fn chain_grows() { + let builder = TesterBuilder::new().await; + + let docker = Cli::default(); + let image = RunnableImage::from(Postgres::default()) + .with_mapped_port((builder.config.database.port, 5432)); + let _node = docker.run(image); + + let pool = builder.initialize_database().await; + let mut tester = builder.build(pool).await; + + // Apply four blocks to the chain. + tester + .extend_chain(4) + .await + .perform_head_update() + .await + // Head update should insert the head block. + .assert_highest_canonical_slot(4) + .await + // And also backfill to the epoch boundary. + .assert_lowest_canonical_slot(0) + .await + // Fill back to genesis. + .perform_backfill() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(4) + .await + // Apply one block to the chain. + .extend_chain(1) + .await + .perform_head_update() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(5) + .await + // Apply two blocks to the chain. + .extend_chain(2) + .await + // Update the head. + .perform_head_update() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(7) + .await + .update_validator_set() + .await + // Insert all blocks. + .update_unknown_blocks() + .await + // Check the chain is consistent + .assert_canonical_chain_consistent(0) + .await; +} + +#[cfg(unix)] +#[tokio::test] +async fn chain_grows_with_metadata() { + let builder = TesterBuilder::new().await; + + let docker = Cli::default(); + let image = RunnableImage::from(Postgres::default()) + .with_mapped_port((builder.config.database.port, 5432)); + let _node = docker.run(image); + + let pool = builder.initialize_database().await; + let mut tester = builder.build(pool).await; + + tester + // Apply four blocks to the chain. + .extend_chain(4) + .await + .perform_head_update() + .await + // Head update should insert the head block. + .assert_highest_canonical_slot(4) + .await + // And also backfill to the epoch boundary. + .assert_lowest_canonical_slot(0) + .await + // Fill back to genesis. + .perform_backfill() + .await + // Insert all validators + .update_validator_set() + .await + // Insert all blocks. + .update_unknown_blocks() + .await + // All validators should be present. + .assert_all_validators_exist() + .await + // Check the chain is consistent + .assert_canonical_chain_consistent(0) + .await + // Get other chain data. + // Backfill before forward fill to ensure order is arbitrary. + .backfill_block_rewards() + .await + .fill_block_rewards() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(4) + .await + // All rewards should be present. + .assert_lowest_block_has_block_rewards() + .await + .assert_highest_block_has_block_rewards() + .await + // All proposers should be present. + .assert_lowest_block_has_proposer_info() + .await + .assert_highest_block_has_proposer_info() + .await + // Apply one block to the chain. + .extend_chain(1) + .await + .perform_head_update() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(5) + .await + // Apply two blocks to the chain. + .extend_chain(2) + .await + // Update the head. + .perform_head_update() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(7) + .await + .update_validator_set() + .await + // Insert all blocks. + .update_unknown_blocks() + .await + // Check the chain is consistent + .assert_canonical_chain_consistent(0) + .await + // Get other chain data. + .fill_block_rewards() + .await + .backfill_block_rewards() + .await + // All rewards should be present. + .assert_lowest_block_has_block_rewards() + .await + .assert_highest_block_has_block_rewards() + .await + // All proposers should be present. + .assert_lowest_block_has_proposer_info() + .await + .assert_highest_block_has_proposer_info() + .await; +} + +#[cfg(unix)] +#[tokio::test] +async fn chain_grows_with_metadata_and_multiple_skip_slots() { + let builder = TesterBuilder::new().await; + + let docker = Cli::default(); + let image = RunnableImage::from(Postgres::default()) + .with_mapped_port((builder.config.database.port, 5432)); + let _node = docker.run(image); + + let pool = builder.initialize_database().await; + let mut tester = builder.build(pool).await; + + // Apply four blocks to the chain. + tester + .extend_chain(4) + .await + .perform_head_update() + .await + // Head update should insert the head block. + .assert_highest_canonical_slot(4) + // And also backfill to the epoch boundary. + .await + .assert_lowest_canonical_slot(0) + .await + // Fill back to genesis. + .perform_backfill() + .await + // Insert all validators + .update_validator_set() + .await + // Insert all blocks. + .update_unknown_blocks() + .await + // All validators should be present. + .assert_all_validators_exist() + .await + // Check the chain is consistent. + .assert_canonical_chain_consistent(0) + .await + // Get other chain data. + .fill_block_rewards() + .await + .backfill_block_rewards() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(4) + .await + // All rewards should be present. + .assert_lowest_block_has_block_rewards() + .await + .assert_highest_block_has_block_rewards() + .await + // All proposers should be present. + .assert_lowest_block_has_proposer_info() + .await + .assert_highest_block_has_proposer_info() + .await + // Add multiple skip slots. + .skip_slot() + .skip_slot() + .skip_slot() + // Apply one block to the chain. + .extend_chain(1) + .await + .perform_head_update() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(8) + .await + // Apply two blocks to the chain. + .extend_chain(2) + .await + // Update the head. + .perform_head_update() + .await + // All validators should be present. + .assert_all_validators_exist() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(10) + .await + .update_validator_set() + .await + // Insert all blocks. + .update_unknown_blocks() + .await + // Check the chain is consistent + .assert_canonical_chain_consistent(0) + .await + // Get other chain data. + // Backfill before forward fill to ensure order is arbitrary. + .backfill_block_rewards() + .await + .fill_block_rewards() + .await + // All rewards should be present. + .assert_lowest_block_has_block_rewards() + .await + .assert_highest_block_has_block_rewards() + .await + // All proposers should be present. + .assert_lowest_block_has_proposer_info() + .await + .assert_highest_block_has_proposer_info() + .await; +} + +#[cfg(unix)] +#[tokio::test] +async fn chain_grows_to_second_epoch() { + let builder = TesterBuilder::new().await; + + let docker = Cli::default(); + let image = RunnableImage::from(Postgres::default()) + .with_mapped_port((builder.config.database.port, 5432)); + let _node = docker.run(image); + + let pool = builder.initialize_database().await; + let mut tester = builder.build(pool).await; + // Apply 40 blocks to the chain. + tester + .extend_chain(40) + .await + .perform_head_update() + .await + // Head update should insert the head block. + .assert_highest_canonical_slot(40) + .await + // And also backfill to the epoch boundary. + .assert_lowest_canonical_slot(32) + .await + // Fill back to genesis. + .perform_backfill() + .await + // Insert all validators + .update_validator_set() + .await + // Insert all blocks. + .update_unknown_blocks() + .await + // All validators should be present. + .assert_all_validators_exist() + .await + // Check the chain is consistent. + .assert_canonical_chain_consistent(0) + .await + // Get block packings. + .fill_block_packing() + .await + .backfill_block_packing() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(40) + .await + // All packings should be present. + .assert_lowest_block_has_block_packing() + .await + .assert_highest_block_has_block_packing() + .await + // Skip a slot + .skip_slot() + // Apply two blocks to the chain. + .extend_chain(2) + .await + // Update the head. + .perform_head_update() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(43) + .await + .update_validator_set() + .await + // Insert all blocks. + .update_unknown_blocks() + .await + // Update new block_packing + // Backfill before forward fill to ensure order is arbitrary + .backfill_block_packing() + .await + .fill_block_packing() + .await + // All packings should be present. + .assert_lowest_block_has_block_packing() + .await + .assert_highest_block_has_block_packing() + .await + // Check the chain is consistent + .assert_canonical_chain_consistent(0) + .await; +} + +#[cfg(unix)] +#[tokio::test] +async fn large_chain() { + let builder = TesterBuilder::new().await; + + let docker = Cli::default(); + let image = RunnableImage::from(Postgres::default()) + .with_mapped_port((builder.config.database.port, 5432)); + let _node = docker.run(image); + + let pool = builder.initialize_database().await; + let mut tester = builder.build(pool).await; + // Apply 40 blocks to the chain. + tester + .extend_chain(400) + .await + .perform_head_update() + .await + // Head update should insert the head block. + .assert_highest_canonical_slot(400) + .await + // And also backfill to the epoch boundary. + .assert_lowest_canonical_slot(384) + .await + // Backfill 2 epochs as per default config. + .perform_backfill() + .await + // Insert all validators + .update_validator_set() + .await + // Insert all blocks. + .update_unknown_blocks() + .await + // All validators should be present. + .assert_all_validators_exist() + .await + // Check the chain is consistent. + .assert_canonical_chain_consistent(384) + .await + // Get block rewards and proposer info. + .fill_block_rewards() + .await + .backfill_block_rewards() + .await + // Get block packings. + .fill_block_packing() + .await + .backfill_block_packing() + .await + // Should have backfilled 2 more epochs. + .assert_lowest_canonical_slot(320) + .await + .assert_highest_canonical_slot(400) + .await + // All rewards should be present. + .assert_lowest_block_has_block_rewards() + .await + .assert_highest_block_has_block_rewards() + .await + // All proposers should be present. + .assert_lowest_block_has_proposer_info() + .await + .assert_highest_block_has_proposer_info() + .await + // All packings should be present. + .assert_lowest_block_has_block_packing() + .await + .assert_highest_block_has_block_packing() + .await + // Skip a slot + .skip_slot() + // Apply two blocks to the chain. + .extend_chain(2) + .await + // Update the head. + .perform_head_update() + .await + .perform_backfill() + .await + // Should have backfilled 2 more epochs + .assert_lowest_canonical_slot(256) + .await + .assert_highest_canonical_slot(403) + .await + // Update validators + .update_validator_set() + .await + // Insert all blocks. + .update_unknown_blocks() + .await + // All validators should be present. + .assert_all_validators_exist() + .await + // Get suboptimal attestations. + .fill_suboptimal_attestations() + .await + .backfill_suboptimal_attestations() + .await + // Get block rewards and proposer info. + .fill_block_rewards() + .await + .backfill_block_rewards() + .await + // Get block packing. + // Backfill before forward fill to ensure order is arbitrary. + .backfill_block_packing() + .await + .fill_block_packing() + .await + // All rewards should be present. + .assert_lowest_block_has_block_rewards() + .await + .assert_highest_block_has_block_rewards() + .await + // All proposers should be present. + .assert_lowest_block_has_proposer_info() + .await + .assert_highest_block_has_proposer_info() + .await + // All packings should be present. + .assert_lowest_block_has_block_packing() + .await + .assert_highest_block_has_block_packing() + .await + // Check the chain is consistent. + .assert_canonical_chain_consistent(256) + .await + // Check every block has rewards, proposer info and packing statistics. + .assert_all_blocks_have_metadata() + .await; +}