diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 255a78ce56..afb3d67763 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -44,4 +44,5 @@ crates/tokio-util/ @fgimenez @emhane crates/tracing/ @onbjerg crates/transaction-pool/ @mattsse crates/trie/ @rkrasiuk @Rjected +etc/ @Rjected @onbjerg @shekhirin .github/ @onbjerg @gakonst @DaniPopes diff --git a/.github/assets/check_wasm.sh b/.github/assets/check_wasm.sh index 709cb6d01b..8fdf38d1a2 100755 --- a/.github/assets/check_wasm.sh +++ b/.github/assets/check_wasm.sh @@ -45,6 +45,7 @@ exclude_crates=( reth-net-nat reth-network reth-node-api + reth-node-types reth-node-builder reth-node-core reth-node-ethereum @@ -68,18 +69,16 @@ exclude_crates=( reth-stages reth-storage-errors # The following are not supposed to be working - reth # all of the crates below - reth-db # mdbx + reth # all of the crates below + reth-invalid-block-hooks # reth-provider reth-libmdbx # mdbx reth-mdbx-sys # mdbx - reth-nippy-jar # sucds - reth-provider # reth-db, reth-nippy-jar - reth-prune # reth-db + reth-provider # tokio + reth-prune # tokio reth-stages-api # reth-provider, reth-prune - reth-static-file # reth-nippy-jar + reth-static-file # tokio reth-transaction-pool # c-kzg - reth-trie-db # reth-db - reth-trie-parallel # reth-db + reth-trie-parallel # tokio ) # Array to hold the results diff --git a/.github/assets/hive/expected_failures.yaml b/.github/assets/hive/expected_failures.yaml index b82c9d131e..7633eac7ff 100644 --- a/.github/assets/hive/expected_failures.yaml +++ b/.github/assets/hive/expected_failures.yaml @@ -13,6 +13,13 @@ rpc-compat: - eth_getTransactionReceipt/get-access-list (reth) - eth_getTransactionReceipt/get-blob-tx (reth) - eth_getTransactionReceipt/get-dynamic-fee (reth) + - eth_getBlockByHash/get-block-by-hash (reth) + - eth_getBlockByNumber/get-block-n (reth) + - eth_getBlockByNumber/get-finalized (reth) + - eth_getBlockByNumber/get-genesis (reth) + - eth_getBlockByNumber/get-latest (reth) + - eth_getBlockByNumber/get-safe (reth) + - eth_sendRawTransaction/send-blob-tx (reth) # https://github.com/paradigmxyz/reth/issues/8732 engine-withdrawals: @@ -63,8 +70,6 @@ engine-cancun: - Invalid Missing Ancestor Syncing ReOrg, StateRoot, EmptyTxs=False, CanonicalReOrg=True, Invalid P9 (Cancun) (reth) - Invalid PayloadAttributes, Missing BeaconRoot, Syncing=True (Cancun) (reth) - Invalid NewPayload, ParentBeaconBlockRoot, Syncing=True, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) - - Invalid NewPayload, BlobGasUsed, Syncing=True, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) - - Invalid NewPayload, Blob Count on BlobGasUsed, Syncing=True, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) - Invalid NewPayload, ExcessBlobGas, Syncing=True, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) # https://github.com/paradigmxyz/reth/issues/8579 diff --git a/.github/assets/hive/expected_failures_experimental.yaml b/.github/assets/hive/expected_failures_experimental.yaml index 66a68f0c67..3209902bb9 100644 --- a/.github/assets/hive/expected_failures_experimental.yaml +++ b/.github/assets/hive/expected_failures_experimental.yaml @@ -13,6 +13,13 @@ rpc-compat: - eth_getTransactionReceipt/get-access-list (reth) - eth_getTransactionReceipt/get-blob-tx (reth) - eth_getTransactionReceipt/get-dynamic-fee (reth) + - eth_getBlockByHash/get-block-by-hash (reth) + - eth_getBlockByNumber/get-block-n (reth) + - eth_getBlockByNumber/get-finalized (reth) + - eth_getBlockByNumber/get-genesis (reth) + - eth_getBlockByNumber/get-latest (reth) + - eth_getBlockByNumber/get-safe (reth) + - eth_sendRawTransaction/send-blob-tx (reth) # https://github.com/paradigmxyz/reth/issues/8732 engine-withdrawals: @@ -37,12 +44,7 @@ engine-withdrawals: # https://github.com/paradigmxyz/reth/issues/8305 # https://github.com/paradigmxyz/reth/issues/6217 -engine-api: - - Re-org to Previously Validated Sidechain Payload (Paris) (reth) - - Invalid Missing Ancestor ReOrg, StateRoot, EmptyTxs=False, Invalid P9 (Paris) (reth) - - Invalid Missing Ancestor ReOrg, StateRoot, EmptyTxs=True, Invalid P9 (Paris) (reth) - - Invalid Missing Ancestor ReOrg, StateRoot, EmptyTxs=False, Invalid P10 (Paris) (reth) - - Invalid Missing Ancestor ReOrg, StateRoot, EmptyTxs=True, Invalid P10 (Paris) (reth) +engine-api: [] # https://github.com/paradigmxyz/reth/issues/8305 # https://github.com/paradigmxyz/reth/issues/6217 @@ -51,14 +53,11 @@ engine-api: engine-cancun: - Blob Transaction Ordering, Multiple Clients (Cancun) (reth) - Invalid PayloadAttributes, Missing BeaconRoot, Syncing=True (Cancun) (reth) - - Invalid NewPayload, BlobGasUsed, Syncing=True, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) - - Invalid NewPayload, Blob Count on BlobGasUsed, Syncing=True, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) - Invalid NewPayload, ExcessBlobGas, Syncing=True, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) - - Re-org to Previously Validated Sidechain Payload (Cancun) (reth) - - Invalid Missing Ancestor ReOrg, StateRoot, EmptyTxs=False, Invalid P9 (Cancun) (reth) - - Invalid Missing Ancestor ReOrg, StateRoot, EmptyTxs=True, Invalid P9 (Cancun) (reth) - - Invalid Missing Ancestor ReOrg, StateRoot, EmptyTxs=False, Invalid P10 (Cancun) (reth) - - Invalid Missing Ancestor ReOrg, StateRoot, EmptyTxs=True, Invalid P10 (Cancun) (reth) + - Invalid NewPayload, VersionedHashes, Syncing=False, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) + - Invalid NewPayload, VersionedHashes Version, Syncing=False, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) + - Invalid NewPayload, Incomplete VersionedHashes, Syncing=False, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) + - Invalid NewPayload, Extra VersionedHashes, Syncing=False, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) # https://github.com/paradigmxyz/reth/issues/8579 sync: diff --git a/.github/workflows/dependencies.yml b/.github/workflows/dependencies.yml index 22bfeafc8c..ed4b131ce5 100644 --- a/.github/workflows/dependencies.yml +++ b/.github/workflows/dependencies.yml @@ -54,7 +54,7 @@ jobs: echo "EOF" >> $GITHUB_OUTPUT - name: Create Pull Request - uses: peter-evans/create-pull-request@v6 + uses: peter-evans/create-pull-request@v7 with: add-paths: ./Cargo.lock commit-message: ${{ steps.msg.outputs.commit_message }} diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 50fb9ffc71..2a0f6736d3 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -8,8 +8,8 @@ on: env: CARGO_TERM_COLOR: always - TOOL_CHAIN: "1.80" - TOOL_CHAIN_NIGHTLY: "nightly-2024-09-01" + TOOL_CHAIN: "1.81" + TOOL_CHAIN_NIGHTLY: "nightly-2024-09-09" jobs: clippy-binaries: @@ -40,21 +40,38 @@ jobs: env: RUSTFLAGS: -D warnings - # wasm: - # runs-on: ubuntu-latest - # timeout-minutes: 30 - # steps: - # - uses: actions/checkout@v4 - # - uses: dtolnay/rust-toolchain@stable - # with: - # target: wasm32-wasip1 - # toolchain: ${{ env.TOOL_CHAIN }} - # - uses: taiki-e/install-action@cargo-hack - # - uses: Swatinem/rust-cache@v2 - # with: - # cache-on-failure: true - # - name: Run Wasm checks - # run: .github/assets/check_wasm.sh ${{ env.TOOL_CHAIN }} + # clippy: + # name: clippy + # runs-on: ubuntu-latest + # timeout-minutes: 30 + # steps: + # - uses: actions/checkout@v4 + # - uses: dtolnay/rust-toolchain@nightly + # with: + # toolchain: ${{ env.TOOL_CHAIN_NIGHTLY }} + # components: clippy + # - uses: Swatinem/rust-cache@v2 + # with: + # cache-on-failure: true + # - run: cargo clippy --workspace --lib --examples --tests --benches --all-features --locked + # env: + # RUSTFLAGS: -D warnings + + # wasm: + # runs-on: ubuntu-latest + # timeout-minutes: 30 + # steps: + # - uses: actions/checkout@v4 + # - uses: dtolnay/rust-toolchain@stable + # with: + # target: wasm32-wasip1 + # - uses: taiki-e/install-action@cargo-hack + # - uses: Swatinem/rust-cache@v2 + # with: + # cache-on-failure: true + # - uses: dcarbone/install-jq-action@v2 + # - name: Run Wasm checks + # run: .github/assets/check_wasm.sh crate-checks: runs-on: ubuntu-latest @@ -68,7 +85,8 @@ jobs: - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true - - run: cargo hack check + - run: cargo hack check --workspace --exclude op-reth + - run: cargo check -p op-reth --features "optimism opbnb" msrv: name: MSRV / ${{ matrix.network }} @@ -110,6 +128,39 @@ jobs: toolchain: ${{ env.TOOL_CHAIN_NIGHTLY }} # fmt need the nightly flag, please update this if reallly needed - run: cargo fmt --all --check + # udeps: + # name: udeps + # runs-on: ubuntu-latest + # timeout-minutes: 30 + # steps: + # - uses: actions/checkout@v4 + # - uses: dtolnay/rust-toolchain@nightly + # - uses: Swatinem/rust-cache@v2 + # with: + # cache-on-failure: true + # - uses: taiki-e/install-action@cargo-udeps + # - run: cargo udeps --workspace --lib --examples --tests --benches --all-features --locked + + # book: + # name: book + # runs-on: ubuntu-latest + # timeout-minutes: 30 + # steps: + # - uses: actions/checkout@v4 + # - uses: dtolnay/rust-toolchain@nightly + # - uses: dtolnay/rust-toolchain@master + # with: + # toolchain: ${{ env.TOOL_CHAIN }} # MSRV + # - uses: Swatinem/rust-cache@v2 + # with: + # cache-on-failure: true + # - run: cargo build --bin reth --workspace --features ethereum + # env: + # RUSTFLAGS: -D warnings + # - run: ./book/cli/update.sh target/debug/reth + # - name: Check book changes + # run: git diff --exit-code + codespell: runs-on: ubuntu-latest timeout-minutes: 30 @@ -163,6 +214,8 @@ jobs: - clippy-binaries - crate-checks - fmt + # - udeps + # - book - codespell - grafana - no-test-deps diff --git a/.github/workflows/op-sync.yml b/.github/workflows/op-sync.yml index 26fa68d7f9..2a223391d7 100644 --- a/.github/workflows/op-sync.yml +++ b/.github/workflows/op-sync.yml @@ -33,8 +33,7 @@ jobs: with: cache-on-failure: true - name: Build op-reth - run: | - cargo install --features asm-keccak,jemalloc,optimism --bin op-reth --path bin/reth + run: make install-op - name: Run sync # https://basescan.org/block/10000 run: | diff --git a/Cargo.lock b/Cargo.lock index 4f5a1054e5..004ddeef09 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,19 +4,13 @@ version = 3 [[package]] name = "addr2line" -version = "0.22.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678" +checksum = "f5fb1d8e4442bd405fdfd1dacb42792696b0cf9cb15882e5d097b742a676d375" dependencies = [ "gimli", ] -[[package]] -name = "adler" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" - [[package]] name = "adler2" version = "2.0.0" @@ -103,8 +97,8 @@ checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "alloy-chains" -version = "0.1.18" -source = "git+https://github.com/bnb-chain/alloy-chains-rs.git?tag=v1.0.0#b7c5379cf47345181f8dce350acafb958f47152a" +version = "0.1.32" +source = "git+https://github.com/bnb-chain/alloy-chains-rs.git?rev=6be74c75424a31a0d98a906084c778a9d74769fc#6be74c75424a31a0d98a906084c778a9d74769fc" dependencies = [ "alloy-rlp", "arbitrary", @@ -116,11 +110,11 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "0.3.0" -source = "git+https://github.com/bnb-chain/alloy?rev=133e5b3dd8af4bc1ceb9018be42c156727eef619#133e5b3dd8af4bc1ceb9018be42c156727eef619" +version = "0.3.6" +source = "git+https://github.com/bnb-chain/alloy?rev=718aee579dc000019582245226eebf8b40d24c41#718aee579dc000019582245226eebf8b40d24c41" dependencies = [ "alloy-eips", - "alloy-primitives 0.8.0", + "alloy-primitives", "alloy-rlp", "alloy-serde", "arbitrary", @@ -130,16 +124,16 @@ dependencies = [ [[package]] name = "alloy-dyn-abi" -version = "0.8.0" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba5b68572f5dfa99ede0a491d658c9842626c956b840d0b97d0bbc9637742504" +checksum = "0b499852e1d0e9b8c6db0f24c48998e647c0d5762a01090f955106a7700e4611" dependencies = [ "alloy-json-abi", - "alloy-primitives 0.8.0", + "alloy-primitives", "alloy-sol-type-parser", "alloy-sol-types", "const-hex", - "derive_more 0.99.18", + "derive_more 1.0.0", "itoa", "serde", "serde_json", @@ -152,7 +146,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0069cf0642457f87a01a014f6dc29d5d893cd4fd8fddf0c3cdfad1bb3ebafc41" dependencies = [ - "alloy-primitives 0.8.0", + "alloy-primitives", "alloy-rlp", "arbitrary", "rand 0.8.5", @@ -165,7 +159,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37d319bb544ca6caeab58c39cea8921c55d924d4f68f2c60f24f914673f9a74a" dependencies = [ - "alloy-primitives 0.8.0", + "alloy-primitives", "alloy-rlp", "arbitrary", "k256", @@ -175,12 +169,12 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "0.3.0" -source = "git+https://github.com/bnb-chain/alloy?rev=133e5b3dd8af4bc1ceb9018be42c156727eef619#133e5b3dd8af4bc1ceb9018be42c156727eef619" +version = "0.3.6" +source = "git+https://github.com/bnb-chain/alloy?rev=718aee579dc000019582245226eebf8b40d24c41#718aee579dc000019582245226eebf8b40d24c41" dependencies = [ "alloy-eip2930", "alloy-eip7702", - "alloy-primitives 0.8.0", + "alloy-primitives", "alloy-rlp", "alloy-serde", "arbitrary", @@ -193,32 +187,32 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "0.3.0" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "210f4b358d724f85df8adaec753c583defb58169ad3cad3d48c80d1a25a6ff0e" +checksum = "3a7a18afb0b318616b6b2b0e2e7ac5529d32a966c673b48091c9919e284e6aca" dependencies = [ - "alloy-primitives 0.8.0", + "alloy-primitives", "alloy-serde", "serde", ] [[package]] name = "alloy-genesis" -version = "0.3.0" -source = "git+https://github.com/bnb-chain/alloy?rev=133e5b3dd8af4bc1ceb9018be42c156727eef619#133e5b3dd8af4bc1ceb9018be42c156727eef619" +version = "0.3.6" +source = "git+https://github.com/bnb-chain/alloy?rev=718aee579dc000019582245226eebf8b40d24c41#718aee579dc000019582245226eebf8b40d24c41" dependencies = [ - "alloy-primitives 0.8.0", + "alloy-primitives", "alloy-serde", "serde", ] [[package]] name = "alloy-json-abi" -version = "0.8.0" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "299d2a937b6c60968df3dad2a988b0f0e03277b344639a4f7a31bd68e6285e59" +checksum = "a438d4486b5d525df3b3004188f9d5cd1d65cd30ecc41e5a3ccef6f6342e8af9" dependencies = [ - "alloy-primitives 0.8.0", + "alloy-primitives", "alloy-sol-type-parser", "serde", "serde_json", @@ -226,10 +220,10 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "0.3.0" -source = "git+https://github.com/bnb-chain/alloy?rev=133e5b3dd8af4bc1ceb9018be42c156727eef619#133e5b3dd8af4bc1ceb9018be42c156727eef619" +version = "0.3.6" +source = "git+https://github.com/bnb-chain/alloy?rev=718aee579dc000019582245226eebf8b40d24c41#718aee579dc000019582245226eebf8b40d24c41" dependencies = [ - "alloy-primitives 0.8.0", + "alloy-primitives", "alloy-sol-types", "serde", "serde_json", @@ -239,14 +233,14 @@ dependencies = [ [[package]] name = "alloy-network" -version = "0.3.0" -source = "git+https://github.com/bnb-chain/alloy?rev=133e5b3dd8af4bc1ceb9018be42c156727eef619#133e5b3dd8af4bc1ceb9018be42c156727eef619" +version = "0.3.6" +source = "git+https://github.com/bnb-chain/alloy?rev=718aee579dc000019582245226eebf8b40d24c41#718aee579dc000019582245226eebf8b40d24c41" dependencies = [ "alloy-consensus", "alloy-eips", "alloy-json-rpc", "alloy-network-primitives", - "alloy-primitives 0.8.0", + "alloy-primitives", "alloy-rpc-types-eth", "alloy-serde", "alloy-signer", @@ -259,23 +253,25 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "0.3.0" -source = "git+https://github.com/bnb-chain/alloy?rev=133e5b3dd8af4bc1ceb9018be42c156727eef619#133e5b3dd8af4bc1ceb9018be42c156727eef619" +version = "0.3.6" +source = "git+https://github.com/bnb-chain/alloy?rev=718aee579dc000019582245226eebf8b40d24c41#718aee579dc000019582245226eebf8b40d24c41" dependencies = [ - "alloy-primitives 0.8.0", + "alloy-eips", + "alloy-primitives", "alloy-serde", "serde", ] [[package]] name = "alloy-node-bindings" -version = "0.3.0" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2657dae91ae61ed6cdd4c58b7e09330de934eea4e14d2f54f72f2a6720b23437" +checksum = "5988a227293f949525f0a1b3e1ef728d2ef24afa96bad2b7788c6c9617fa3eec" dependencies = [ - "alloy-genesis 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "alloy-primitives 0.8.0", + "alloy-genesis 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-primitives", "k256", + "rand 0.8.5", "serde_json", "tempfile", "thiserror", @@ -285,56 +281,39 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "0.7.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccb3ead547f4532bc8af961649942f0b9c16ee9226e26caa3f38420651cc0bf4" -dependencies = [ - "alloy-rlp", - "bytes 1.7.1", - "cfg-if", - "const-hex", - "derive_more 0.99.18", - "hex-literal", - "itoa", - "k256", - "keccak-asm", - "proptest", - "rand 0.8.5", - "ruint", - "serde", - "tiny-keccak", -] - -[[package]] -name = "alloy-primitives" -version = "0.8.0" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a767e59c86900dd7c3ce3ecef04f3ace5ac9631ee150beb8b7d22f7fa3bbb2d7" +checksum = "260d3ff3bff0bb84599f032a2f2c6828180b0ea0cd41fdaf44f39cef3ba41861" dependencies = [ "alloy-rlp", "arbitrary", - "bytes 1.7.1", + "bytes 1.7.2", "cfg-if", "const-hex", "derive_arbitrary", - "derive_more 0.99.18", + "derive_more 1.0.0", "getrandom 0.2.15", + "hashbrown 0.14.5", "hex-literal", + "indexmap 2.5.0", "itoa", "k256", "keccak-asm", + "paste", "proptest", "proptest-derive", "rand 0.8.5", "ruint", + "rustc-hash 2.0.0", "serde", + "sha3 0.10.8", "tiny-keccak", ] [[package]] name = "alloy-provider" -version = "0.3.0" -source = "git+https://github.com/bnb-chain/alloy?rev=133e5b3dd8af4bc1ceb9018be42c156727eef619#133e5b3dd8af4bc1ceb9018be42c156727eef619" +version = "0.3.6" +source = "git+https://github.com/bnb-chain/alloy?rev=718aee579dc000019582245226eebf8b40d24c41#718aee579dc000019582245226eebf8b40d24c41" dependencies = [ "alloy-chains", "alloy-consensus", @@ -342,19 +321,19 @@ dependencies = [ "alloy-json-rpc", "alloy-network", "alloy-network-primitives", - "alloy-primitives 0.8.0", - "alloy-pubsub 0.3.0 (git+https://github.com/bnb-chain/alloy?rev=133e5b3dd8af4bc1ceb9018be42c156727eef619)", + "alloy-primitives", + "alloy-pubsub 0.3.6 (git+https://github.com/bnb-chain/alloy?rev=718aee579dc000019582245226eebf8b40d24c41)", "alloy-rpc-client", - "alloy-rpc-types-admin 0.3.0 (git+https://github.com/bnb-chain/alloy?rev=133e5b3dd8af4bc1ceb9018be42c156727eef619)", + "alloy-rpc-types-admin 0.3.6 (git+https://github.com/bnb-chain/alloy?rev=718aee579dc000019582245226eebf8b40d24c41)", "alloy-rpc-types-engine", "alloy-rpc-types-eth", "alloy-transport", "alloy-transport-http", - "alloy-transport-ws 0.3.0 (git+https://github.com/bnb-chain/alloy?rev=133e5b3dd8af4bc1ceb9018be42c156727eef619)", + "alloy-transport-ws 0.3.6 (git+https://github.com/bnb-chain/alloy?rev=718aee579dc000019582245226eebf8b40d24c41)", "async-stream", "async-trait", "auto_impl", - "dashmap 6.0.1", + "dashmap 6.1.0", "futures", "futures-utils-wasm", "lru", @@ -370,12 +349,12 @@ dependencies = [ [[package]] name = "alloy-pubsub" -version = "0.3.0" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4c59e13200322138fe4279b4676b0d78c4f55502de127f5a448495d3ddfaa43" +checksum = "2d05f63677e210d758cd5d6d1ce10f20c980c3560ccfbe79ba1997791862a04f" dependencies = [ "alloy-json-rpc", - "alloy-primitives 0.8.0", + "alloy-primitives", "alloy-transport", "bimap", "futures", @@ -383,17 +362,17 @@ dependencies = [ "serde_json", "tokio", "tokio-stream", - "tower", + "tower 0.5.1", "tracing", ] [[package]] name = "alloy-pubsub" -version = "0.3.0" -source = "git+https://github.com/bnb-chain/alloy?rev=133e5b3dd8af4bc1ceb9018be42c156727eef619#133e5b3dd8af4bc1ceb9018be42c156727eef619" +version = "0.3.6" +source = "git+https://github.com/bnb-chain/alloy?rev=718aee579dc000019582245226eebf8b40d24c41#718aee579dc000019582245226eebf8b40d24c41" dependencies = [ "alloy-json-rpc", - "alloy-primitives 0.8.0", + "alloy-primitives", "alloy-transport", "bimap", "futures", @@ -401,7 +380,7 @@ dependencies = [ "serde_json", "tokio", "tokio-stream", - "tower", + "tower 0.5.1", "tracing", ] @@ -413,7 +392,7 @@ checksum = "26154390b1d205a4a7ac7352aa2eb4f81f391399d4e2f546fb81a2f8bb383f62" dependencies = [ "alloy-rlp-derive", "arrayvec", - "bytes 1.7.1", + "bytes 1.7.2", ] [[package]] @@ -424,20 +403,20 @@ checksum = "4d0f2d905ebd295e7effec65e5f6868d153936130ae718352771de3e7d03c75c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] name = "alloy-rpc-client" -version = "0.3.0" -source = "git+https://github.com/bnb-chain/alloy?rev=133e5b3dd8af4bc1ceb9018be42c156727eef619#133e5b3dd8af4bc1ceb9018be42c156727eef619" +version = "0.3.6" +source = "git+https://github.com/bnb-chain/alloy?rev=718aee579dc000019582245226eebf8b40d24c41#718aee579dc000019582245226eebf8b40d24c41" dependencies = [ "alloy-json-rpc", - "alloy-primitives 0.8.0", - "alloy-pubsub 0.3.0 (git+https://github.com/bnb-chain/alloy?rev=133e5b3dd8af4bc1ceb9018be42c156727eef619)", + "alloy-primitives", + "alloy-pubsub 0.3.6 (git+https://github.com/bnb-chain/alloy?rev=718aee579dc000019582245226eebf8b40d24c41)", "alloy-transport", "alloy-transport-http", - "alloy-transport-ws 0.3.0 (git+https://github.com/bnb-chain/alloy?rev=133e5b3dd8af4bc1ceb9018be42c156727eef619)", + "alloy-transport-ws 0.3.6 (git+https://github.com/bnb-chain/alloy?rev=718aee579dc000019582245226eebf8b40d24c41)", "futures", "pin-project", "reqwest 0.12.7", @@ -445,118 +424,128 @@ dependencies = [ "serde_json", "tokio", "tokio-stream", - "tower", + "tower 0.5.1", "tracing", "url", ] [[package]] name = "alloy-rpc-types" -version = "0.3.0" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2d758f65aa648491c6358335c578de45cd7de6fdf2877c3cef61f2c9bebea21" +checksum = "64333d639f2a0cf73491813c629a405744e16343a4bc5640931be707c345ecc5" dependencies = [ "alloy-rpc-types-engine", "alloy-rpc-types-eth", - "alloy-rpc-types-trace", "alloy-serde", "serde", ] [[package]] name = "alloy-rpc-types-admin" -version = "0.3.0" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e41c33bbddaec71ca1bd7a4df38f95f408ef4fa3b3c29a7e9cc8d0e43be5fbe" +checksum = "fefd12e99dd6b7de387ed13ad047ce2c90d8950ca62fc48b8a457ebb8f936c61" dependencies = [ - "alloy-genesis 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "alloy-primitives 0.8.0", + "alloy-genesis 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-primitives", "serde", "serde_json", ] [[package]] name = "alloy-rpc-types-admin" -version = "0.3.0" -source = "git+https://github.com/bnb-chain/alloy?rev=133e5b3dd8af4bc1ceb9018be42c156727eef619#133e5b3dd8af4bc1ceb9018be42c156727eef619" +version = "0.3.6" +source = "git+https://github.com/bnb-chain/alloy?rev=718aee579dc000019582245226eebf8b40d24c41#718aee579dc000019582245226eebf8b40d24c41" dependencies = [ - "alloy-genesis 0.3.0 (git+https://github.com/bnb-chain/alloy?rev=133e5b3dd8af4bc1ceb9018be42c156727eef619)", - "alloy-primitives 0.8.0", + "alloy-genesis 0.3.6 (git+https://github.com/bnb-chain/alloy?rev=718aee579dc000019582245226eebf8b40d24c41)", + "alloy-primitives", "serde", "serde_json", ] [[package]] name = "alloy-rpc-types-anvil" -version = "0.3.0" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa5ee4ffe3e687a6372dd02e998f4f65e512ffdfe0d2c248db822649814c36cd" +checksum = "d25cb45ad7c0930dd62eecf164d2afe4c3d2dd2c82af85680ad1f118e1e5cb83" dependencies = [ - "alloy-primitives 0.8.0", + "alloy-primitives", "alloy-serde", "serde", ] [[package]] name = "alloy-rpc-types-beacon" -version = "0.3.0" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3173bf0239a59d3616f4f4ab1682de25dd30b13fb8f52bf7ee7503729354f3c4" +checksum = "2e7081d2206dca51ce23a06338d78d9b536931cc3f15134fc1c6535eb2b77f18" dependencies = [ "alloy-eips", - "alloy-primitives 0.8.0", + "alloy-primitives", "alloy-rpc-types-engine", "serde", "serde_with", "thiserror", ] +[[package]] +name = "alloy-rpc-types-debug" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f9f9033796bb3078d11cc9c839f00e277431ef997db2849a46045fcffee3835" +dependencies = [ + "alloy-primitives", + "serde", +] + [[package]] name = "alloy-rpc-types-engine" -version = "0.3.0" -source = "git+https://github.com/bnb-chain/alloy?rev=133e5b3dd8af4bc1ceb9018be42c156727eef619#133e5b3dd8af4bc1ceb9018be42c156727eef619" +version = "0.3.6" +source = "git+https://github.com/bnb-chain/alloy?rev=718aee579dc000019582245226eebf8b40d24c41#718aee579dc000019582245226eebf8b40d24c41" dependencies = [ "alloy-consensus", "alloy-eips", - "alloy-primitives 0.8.0", + "alloy-primitives", "alloy-rlp", - "alloy-rpc-types-eth", "alloy-serde", + "derive_more 1.0.0", "jsonrpsee-types", "jsonwebtoken", "rand 0.8.5", "serde", - "thiserror", ] [[package]] name = "alloy-rpc-types-eth" -version = "0.3.0" -source = "git+https://github.com/bnb-chain/alloy?rev=133e5b3dd8af4bc1ceb9018be42c156727eef619#133e5b3dd8af4bc1ceb9018be42c156727eef619" +version = "0.3.6" +source = "git+https://github.com/bnb-chain/alloy?rev=718aee579dc000019582245226eebf8b40d24c41#718aee579dc000019582245226eebf8b40d24c41" dependencies = [ "alloy-consensus", "alloy-eips", "alloy-network-primitives", - "alloy-primitives 0.8.0", + "alloy-primitives", "alloy-rlp", "alloy-serde", "alloy-sol-types", "arbitrary", + "cfg-if", + "derive_more 1.0.0", + "hashbrown 0.14.5", "itertools 0.13.0", "jsonrpsee-types", "serde", "serde_json", - "thiserror", ] [[package]] name = "alloy-rpc-types-mev" -version = "0.3.0" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a0b28949d1077826684b5912fe9ab1c752a863af0419b1ba9abff19006d61b1" +checksum = "922d92389e5022650c4c60ffd2f9b2467c3f853764f0f74ff16a23106f9017d5" dependencies = [ "alloy-eips", - "alloy-primitives 0.8.0", + "alloy-primitives", "alloy-serde", "serde", "serde_json", @@ -564,11 +553,11 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" -version = "0.3.0" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd2af822ed58f2b6dd7cfccf88bf69f42c9a8cbf4663316227646a8a3e5a591f" +checksum = "98db35cd42c90b484377e6bc44d95377a7a38a5ebee996e67754ac0446d542ab" dependencies = [ - "alloy-primitives 0.8.0", + "alloy-primitives", "alloy-rpc-types-eth", "alloy-serde", "serde", @@ -578,11 +567,11 @@ dependencies = [ [[package]] name = "alloy-rpc-types-txpool" -version = "0.3.0" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a8fbdf39e93a9b213df39541be51671e93e6e8b142c3602ddb4ff6219a1bc85" +checksum = "6bac37082c3b21283b3faf5cc0e08974272aee2f756ce1adeb26db56a5fce0d5" dependencies = [ - "alloy-primitives 0.8.0", + "alloy-primitives", "alloy-rpc-types-eth", "alloy-serde", "serde", @@ -590,10 +579,10 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "0.3.0" -source = "git+https://github.com/bnb-chain/alloy?rev=133e5b3dd8af4bc1ceb9018be42c156727eef619#133e5b3dd8af4bc1ceb9018be42c156727eef619" +version = "0.3.6" +source = "git+https://github.com/bnb-chain/alloy?rev=718aee579dc000019582245226eebf8b40d24c41#718aee579dc000019582245226eebf8b40d24c41" dependencies = [ - "alloy-primitives 0.8.0", + "alloy-primitives", "arbitrary", "serde", "serde_json", @@ -601,10 +590,10 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "0.3.0" -source = "git+https://github.com/bnb-chain/alloy?rev=133e5b3dd8af4bc1ceb9018be42c156727eef619#133e5b3dd8af4bc1ceb9018be42c156727eef619" +version = "0.3.6" +source = "git+https://github.com/bnb-chain/alloy?rev=718aee579dc000019582245226eebf8b40d24c41#718aee579dc000019582245226eebf8b40d24c41" dependencies = [ - "alloy-primitives 0.8.0", + "alloy-primitives", "async-trait", "auto_impl", "elliptic-curve", @@ -614,12 +603,12 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "0.3.0" -source = "git+https://github.com/bnb-chain/alloy?rev=133e5b3dd8af4bc1ceb9018be42c156727eef619#133e5b3dd8af4bc1ceb9018be42c156727eef619" +version = "0.3.6" +source = "git+https://github.com/bnb-chain/alloy?rev=718aee579dc000019582245226eebf8b40d24c41#718aee579dc000019582245226eebf8b40d24c41" dependencies = [ "alloy-consensus", "alloy-network", - "alloy-primitives 0.8.0", + "alloy-primitives", "alloy-signer", "async-trait", "coins-bip32", @@ -631,56 +620,56 @@ dependencies = [ [[package]] name = "alloy-sol-macro" -version = "0.8.0" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "183bcfc0f3291d9c41a3774172ee582fb2ce6eb6569085471d8f225de7bb86fc" +checksum = "68e7f6e8fe5b443f82b3f1e15abfa191128f71569148428e49449d01f6f49e8b" dependencies = [ "alloy-sol-macro-expander", "alloy-sol-macro-input", - "proc-macro-error", + "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] name = "alloy-sol-macro-expander" -version = "0.8.0" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71c4d842beb7a6686d04125603bc57614d5ed78bf95e4753274db3db4ba95214" +checksum = "6b96ce28d2fde09abb6135f410c41fad670a3a770b6776869bd852f1df102e6f" dependencies = [ "alloy-sol-macro-input", "const-hex", - "heck 0.5.0", - "indexmap 2.4.0", - "proc-macro-error", + "heck", + "indexmap 2.5.0", + "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", "syn-solidity", "tiny-keccak", ] [[package]] name = "alloy-sol-macro-input" -version = "0.8.0" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1306e8d3c9e6e6ecf7a39ffaf7291e73a5f655a2defd366ee92c2efebcdf7fee" +checksum = "906746396a8296537745711630d9185746c0b50c033d5e9d18b0a6eba3d53f90" dependencies = [ "const-hex", "dunce", - "heck 0.5.0", + "heck", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", "syn-solidity", ] [[package]] name = "alloy-sol-type-parser" -version = "0.8.0" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4691da83dce9c9b4c775dd701c87759f173bd3021cbf2e60cde00c5fe6d7241" +checksum = "bc85178909a49c8827ffccfc9103a7ce1767ae66a801b69bdc326913870bf8e6" dependencies = [ "serde", "winnow", @@ -688,12 +677,12 @@ dependencies = [ [[package]] name = "alloy-sol-types" -version = "0.8.0" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "577e262966e92112edbd15b1b2c0947cc434d6e8311df96d3329793fe8047da9" +checksum = "d86a533ce22525969661b25dfe296c112d35eb6861f188fd284f8bd4bb3842ae" dependencies = [ "alloy-json-abi", - "alloy-primitives 0.8.0", + "alloy-primitives", "alloy-sol-macro", "const-hex", "serde", @@ -701,8 +690,8 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "0.3.0" -source = "git+https://github.com/bnb-chain/alloy?rev=133e5b3dd8af4bc1ceb9018be42c156727eef619#133e5b3dd8af4bc1ceb9018be42c156727eef619" +version = "0.3.6" +source = "git+https://github.com/bnb-chain/alloy?rev=718aee579dc000019582245226eebf8b40d24c41#718aee579dc000019582245226eebf8b40d24c41" dependencies = [ "alloy-json-rpc", "base64 0.22.1", @@ -712,35 +701,35 @@ dependencies = [ "serde_json", "thiserror", "tokio", - "tower", + "tower 0.5.1", "tracing", "url", ] [[package]] name = "alloy-transport-http" -version = "0.3.0" -source = "git+https://github.com/bnb-chain/alloy?rev=133e5b3dd8af4bc1ceb9018be42c156727eef619#133e5b3dd8af4bc1ceb9018be42c156727eef619" +version = "0.3.6" +source = "git+https://github.com/bnb-chain/alloy?rev=718aee579dc000019582245226eebf8b40d24c41#718aee579dc000019582245226eebf8b40d24c41" dependencies = [ "alloy-json-rpc", "alloy-transport", "reqwest 0.12.7", "serde_json", - "tower", + "tower 0.5.1", "tracing", "url", ] [[package]] name = "alloy-transport-ipc" -version = "0.3.0" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8653c47dcc30326fb09a34140e8800fa21987fc52453de6cfcdd5c7b8b6e9886" +checksum = "09fd8491249f74d16ec979b1f5672377b12ebb818e6056478ffa386954dbd350" dependencies = [ "alloy-json-rpc", - "alloy-pubsub 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-pubsub 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "alloy-transport", - "bytes 1.7.1", + "bytes 1.7.2", "futures", "interprocess", "pin-project", @@ -752,15 +741,15 @@ dependencies = [ [[package]] name = "alloy-transport-ws" -version = "0.3.0" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26d43ba8e9a3a7fef626d5fd93cc87ff2d6d2c81acfb866f068b3dce31dda060" +checksum = "a9704761f6297fe482276bee7f77a93cb42bd541c2bd6c1c560b6f3a9ece672e" dependencies = [ - "alloy-pubsub 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-pubsub 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "alloy-transport", "futures", "http 1.1.0", - "rustls 0.23.12", + "rustls 0.23.13", "serde_json", "tokio", "tokio-tungstenite", @@ -770,14 +759,14 @@ dependencies = [ [[package]] name = "alloy-transport-ws" -version = "0.3.0" -source = "git+https://github.com/bnb-chain/alloy?rev=133e5b3dd8af4bc1ceb9018be42c156727eef619#133e5b3dd8af4bc1ceb9018be42c156727eef619" +version = "0.3.6" +source = "git+https://github.com/bnb-chain/alloy?rev=718aee579dc000019582245226eebf8b40d24c41#718aee579dc000019582245226eebf8b40d24c41" dependencies = [ - "alloy-pubsub 0.3.0 (git+https://github.com/bnb-chain/alloy?rev=133e5b3dd8af4bc1ceb9018be42c156727eef619)", + "alloy-pubsub 0.3.6 (git+https://github.com/bnb-chain/alloy?rev=718aee579dc000019582245226eebf8b40d24c41)", "alloy-transport", "futures", "http 1.1.0", - "rustls 0.23.12", + "rustls 0.23.13", "serde_json", "tokio", "tokio-tungstenite", @@ -787,11 +776,11 @@ dependencies = [ [[package]] name = "alloy-trie" -version = "0.5.0" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd491aade72a82d51db430379f48a44a1d388ff03711a2023f1faa302c5b675d" +checksum = "0a46c9c4fdccda7982e7928904bd85fe235a0404ee3d7e197fff13d61eac8b4f" dependencies = [ - "alloy-primitives 0.8.0", + "alloy-primitives", "alloy-rlp", "arbitrary", "derive_arbitrary", @@ -886,9 +875,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.86" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" +checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" [[package]] name = "aquamarine" @@ -901,7 +890,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -976,7 +965,7 @@ dependencies = [ "num-bigint", "num-traits", "paste", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "zeroize", ] @@ -1093,9 +1082,9 @@ dependencies = [ [[package]] name = "arrayref" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d151e35f61089500b617991b791fc8bfd237ae50cd5950803758a179b41e67a" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" [[package]] name = "arrayvec" @@ -1175,18 +1164,18 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] name = "async-trait" -version = "0.1.81" +version = "0.1.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" +checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -1197,7 +1186,7 @@ checksum = "b6d7b9decdf35d8908a7e3ef02f64c5e9b1695e230154c0e8de3969142d9b94c" dependencies = [ "futures", "pharos", - "rustc_version 0.4.0", + "rustc_version 0.4.1", ] [[package]] @@ -1224,7 +1213,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -1247,17 +1236,17 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.73" +version = "0.3.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cc23269a4f8976d0a4d2e7109211a419fe30e8d88d677cd60b6bc79c5732e0a" +checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" dependencies = [ "addr2line", - "cc", "cfg-if", "libc", - "miniz_oxide 0.7.4", + "miniz_oxide", "object", "rustc-demangle", + "windows-targets 0.52.6", ] [[package]] @@ -1334,15 +1323,9 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.76", + "syn 2.0.77", ] -[[package]] -name = "binout" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b60b1af88a588fca5fe424ae7d735bc52814f80ff57614f57043cc4e2024f2ea" - [[package]] name = "bit-set" version = "0.5.3" @@ -1374,15 +1357,6 @@ dependencies = [ "serde", ] -[[package]] -name = "bitm" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b06e8e5bec3490b9f6f3adbb78aa4f53e8396fd9994e8a62a346b44ea7c15f35" -dependencies = [ - "dyn_size_of", -] - [[package]] name = "bitset" version = "0.1.2" @@ -1481,23 +1455,23 @@ dependencies = [ [[package]] name = "boa_ast" -version = "0.19.0" +version = "0.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b49637e7ecb7c541c46c3e885d4c49326ad8076dbfb88bef2cf3165d8ea7df2b" +checksum = "3a69ee3a749ea36d4e56d92941e7b25076b493d4917c3d155b6cf369e23547d9" dependencies = [ "bitflags 2.6.0", "boa_interner", "boa_macros", - "indexmap 2.4.0", + "indexmap 2.5.0", "num-bigint", "rustc-hash 2.0.0", ] [[package]] name = "boa_engine" -version = "0.19.0" +version = "0.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "411558b4cbc7d0303012e26721815e612fed78179313888fd5dd8d6c50d70099" +checksum = "06e4559b35b80ceb2e6328481c0eca9a24506663ea33ee1e279be6b5b618b25c" dependencies = [ "arrayvec", "bitflags 2.6.0", @@ -1514,7 +1488,7 @@ dependencies = [ "fast-float", "hashbrown 0.14.5", "icu_normalizer", - "indexmap 2.4.0", + "indexmap 2.5.0", "intrusive-collections", "itertools 0.13.0", "num-bigint", @@ -1540,9 +1514,9 @@ dependencies = [ [[package]] name = "boa_gc" -version = "0.19.0" +version = "0.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8eff345a85a39cf9b8ed863198947d61e6df2b1d774002b57341158b0ce2c525" +checksum = "716406f57d67bc3ac7fd227d5513b42df401dff14a3be22cbd8ee29817225363" dependencies = [ "boa_macros", "boa_profiler", @@ -1553,14 +1527,14 @@ dependencies = [ [[package]] name = "boa_interner" -version = "0.19.0" +version = "0.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b779280420804c70da9043d152c84eb96e2f7c9e7d1ec3262decf59f9349df" +checksum = "4e18df2272616e1ba0322a69333d37dbb78797f1aa0595aad9dc41e8ecd06ad9" dependencies = [ "boa_gc", "boa_macros", "hashbrown 0.14.5", - "indexmap 2.4.0", + "indexmap 2.5.0", "once_cell", "phf", "rustc-hash 2.0.0", @@ -1569,21 +1543,21 @@ dependencies = [ [[package]] name = "boa_macros" -version = "0.19.0" +version = "0.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25e0097fa69cde4c95f9869654004340fbbe2bcf3ce9189ba2a31a65ac40e0a1" +checksum = "240f4126219a83519bad05c9a40bfc0303921eeb571fc2d7e44c17ffac99d3f1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", "synstructure 0.13.1", ] [[package]] name = "boa_parser" -version = "0.19.0" +version = "0.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd63fe8faf62561fc8c50f9402687e8cfde720b57d292fb3b4ac17c821878ac1" +checksum = "62b59dc05bf1dc019b11478a92986f590cff43fced4d20e866eefb913493e91c" dependencies = [ "bitflags 2.6.0", "boa_ast", @@ -1600,15 +1574,15 @@ dependencies = [ [[package]] name = "boa_profiler" -version = "0.19.0" +version = "0.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd9da895f0df9e2a97b36c1f98e0c5d2ab963abc8679d80f2a66f7bcb211ce90" +checksum = "00ee0645509b3b91abd724f25072649d9e8e65653a78ff0b6e592788a58dd838" [[package]] name = "boa_string" -version = "0.19.0" +version = "0.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9ca6668df83fcd3c2903f6f296b7180421908c5b478ebe0d1c468be9fd60e1c" +checksum = "ae85205289bab1f2c7c8a30ddf0541cf89ba2ff7dbd144feef50bbfa664288d4" dependencies = [ "fast-float", "paste", @@ -1699,9 +1673,9 @@ checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" [[package]] name = "bytemuck" -version = "1.17.1" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "773d90827bc3feecfb67fab12e24de0749aad83c74b9504ecde46237b5cd24e2" +checksum = "94bbb0ad554ad961ddc5da507a12a29b14e4ae5bda06b19f575a3e6079d2e2ae" dependencies = [ "bytemuck_derive", ] @@ -1714,7 +1688,7 @@ checksum = "0cc8b54b395f2fcfbb3d90c47b01c7f444d94d05bdeb775811dec868ac3bbc26" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -1731,9 +1705,9 @@ checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" [[package]] name = "bytes" -version = "1.7.1" +version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" +checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" dependencies = [ "serde", ] @@ -1808,9 +1782,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.15" +version = "1.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57b6a275aa2903740dc87da01c62040406b8812552e97129a63ea8850a17c6e6" +checksum = "07b1695e2c7e8fc85310cde85aeaab7e3097f593c91d209d3f9df76c928100f0" dependencies = [ "jobserver", "libc", @@ -1903,9 +1877,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.16" +version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed6719fffa43d0d87e5fd8caeab59be1554fb028cd30edc88fc4369b17971019" +checksum = "b0956a43b323ac1afaffc053ed5c4b7c1f1800bacd1683c353aabbb752515dd3" dependencies = [ "clap_builder", "clap_derive", @@ -1913,9 +1887,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.15" +version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "216aec2b177652e3846684cbfe25c9964d18ec45234f0f5da5157b207ed1aab6" +checksum = "4d72166dd41634086d5803a47eb71ae740e61d84709c36f3c34110173db3961b" dependencies = [ "anstream", "anstyle", @@ -1925,14 +1899,14 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.13" +version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "501d359d5f3dcaf6ecdeee48833ae73ec6e42723a1e52419c79abf9507eec0a0" +checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" dependencies = [ - "heck 0.5.0", + "heck", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -2004,7 +1978,7 @@ version = "4.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "memchr", ] @@ -2013,7 +1987,7 @@ name = "cometbft" version = "0.1.0-alpha.2" source = "git+https://github.com/bnb-chain/greenfield-cometbft-rs.git?rev=1282547#12825470f8b2df8e960e39a84ad4c9921492f512" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "cometbft-proto", "digest 0.10.7", "ed25519 2.2.3", @@ -2090,7 +2064,7 @@ name = "cometbft-proto" version = "0.1.0-alpha.2" source = "git+https://github.com/bnb-chain/greenfield-cometbft-rs.git?rev=1282547#12825470f8b2df8e960e39a84ad4c9921492f512" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "flex-error", "num-derive 0.4.2", "num-traits", @@ -2108,7 +2082,7 @@ version = "0.1.0-alpha.2" source = "git+https://github.com/bnb-chain/greenfield-cometbft-rs.git?rev=1282547#12825470f8b2df8e960e39a84ad4c9921492f512" dependencies = [ "async-trait", - "bytes 1.7.1", + "bytes 1.7.2", "cometbft", "cometbft-config", "cometbft-proto", @@ -2210,9 +2184,9 @@ checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" [[package]] name = "const_format" -version = "0.2.32" +version = "0.2.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3a214c7af3d04997541b18d432afaff4c455e79e2029079647e72fc2bd27673" +checksum = "50c655d81ff1114fb0dcdea9225ea9f0cc712a6f8d189378e82bdf62a473a64b" dependencies = [ "const_format_proc_macros", "konst", @@ -2220,9 +2194,9 @@ dependencies = [ [[package]] name = "const_format_proc_macros" -version = "0.2.32" +version = "0.2.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7f6ff08fd20f4f299298a28e2dfa8a8ba1036e6cd2460ac1de7b425d76f2500" +checksum = "eff1a44b93f47b1bac19a27932f5c591e43d1ba357ee4f61526c8a25603f0eb1" dependencies = [ "proc-macro2", "quote", @@ -2240,12 +2214,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "convert_case" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" - [[package]] name = "convert_case" version = "0.6.0" @@ -2282,18 +2250,18 @@ dependencies = [ [[package]] name = "cpp_demangle" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e8227005286ec39567949b33df9896bcadfa6051bccca2488129f108ca23119" +checksum = "96e58d342ad113c2b878f16d5d034c03be492ae460cdbc02b7f0f2284d310c7d" dependencies = [ "cfg-if", ] [[package]] name = "cpufeatures" -version = "0.2.13" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51e852e6dc9a5bed1fae92dd2375037bf2b768725bf3be87811edee3249d09ad" +checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0" dependencies = [ "libc", ] @@ -2488,20 +2456,6 @@ dependencies = [ "cipher", ] -[[package]] -name = "cuckoofilter" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b810a8449931679f64cd7eef1bbd0fa315801b6d5d9cdc1ace2804d6529eee18" -dependencies = [ - "byteorder", - "fnv", - "rand 0.7.3", - "serde", - "serde_bytes", - "serde_derive", -] - [[package]] name = "curve25519-dalek" version = "3.2.0" @@ -2526,7 +2480,7 @@ dependencies = [ "curve25519-dalek-derive", "digest 0.10.7", "fiat-crypto", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "subtle", "zeroize", ] @@ -2539,7 +2493,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -2576,7 +2530,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -2587,7 +2541,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -2605,9 +2559,9 @@ dependencies = [ [[package]] name = "dashmap" -version = "6.0.1" +version = "6.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "804c8821570c3f8b70230c2ba75ffa5c0f9a4189b9a432b6656c536712acae28" +checksum = "5041cc499144891f3790297212f32a74fb938e5136a14943f338ef9e0ae276cf" dependencies = [ "cfg-if", "crossbeam-utils", @@ -2707,7 +2661,7 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -2716,11 +2670,9 @@ version = "0.99.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" dependencies = [ - "convert_case 0.4.0", "proc-macro2", "quote", - "rustc_version 0.4.0", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -2738,13 +2690,19 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ - "convert_case 0.6.0", + "convert_case", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", "unicode-xid", ] +[[package]] +name = "diff" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8" + [[package]] name = "digest" version = "0.8.1" @@ -2858,7 +2816,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -2885,12 +2843,6 @@ version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125" -[[package]] -name = "dyn_size_of" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33d4f78a40b1ec35bf8cafdaaf607ba2f773c366b0b3bda48937cacd7a8d5134" - [[package]] name = "ecdsa" version = "0.16.9" @@ -2972,6 +2924,7 @@ dependencies = [ name = "ef-tests" version = "1.0.4" dependencies = [ + "alloy-primitives", "alloy-rlp", "rayon", "reth-chainspec", @@ -3041,7 +2994,7 @@ checksum = "972070166c68827e64bd1ebc8159dd8e32d9bc2da7ebe8f20b61308f7974ad30" dependencies = [ "alloy-rlp", "base64 0.21.7", - "bytes 1.7.1", + "bytes 1.7.2", "ed25519-dalek 2.1.1", "hex 0.4.3", "k256", @@ -3055,14 +3008,14 @@ dependencies = [ [[package]] name = "enum-as-inner" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ffccbb6966c05b32ef8fbac435df276c4ae4d3dc55a8cd0eb9745e6c12f546a" +checksum = "a1e6a265c649f3f5979b601d26f1d05ada116434c87741c9493cb56218f76cbc" dependencies = [ - "heck 0.4.1", + "heck", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -3073,7 +3026,7 @@ checksum = "2f9ed6b3789237c8a0c1c505af1c7eb2c560df6186f01b098c3a1064ea532f38" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -3164,7 +3117,7 @@ dependencies = [ name = "example-custom-engine-types" version = "0.0.0" dependencies = [ - "alloy-genesis 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-genesis 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "eyre", "reth", "reth-basic-payload-builder", @@ -3186,7 +3139,7 @@ dependencies = [ name = "example-custom-evm" version = "0.0.0" dependencies = [ - "alloy-genesis 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-genesis 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "eyre", "reth", "reth-chainspec", @@ -3265,6 +3218,8 @@ dependencies = [ "eyre", "reth-chainspec", "reth-db", + "reth-node-ethereum", + "reth-node-types", "reth-primitives", "reth-provider", "reth-rpc-types", @@ -3355,7 +3310,6 @@ dependencies = [ "reth", "reth-chainspec", "reth-db", - "reth-db-api", "reth-node-ethereum", "reth-provider", "tokio", @@ -3365,7 +3319,7 @@ dependencies = [ name = "example-stateful-precompile" version = "0.0.0" dependencies = [ - "alloy-genesis 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-genesis 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "eyre", "parking_lot 0.12.3", "reth", @@ -3456,7 +3410,7 @@ checksum = "139834ddba373bbdd213dffe02c8d110508dcf1726c2be27e8d1f7d7e1856418" dependencies = [ "arrayvec", "auto_impl", - "bytes 1.7.1", + "bytes 1.7.2", ] [[package]] @@ -3485,6 +3439,18 @@ version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" +[[package]] +name = "filetime" +version = "0.2.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35c0522e981e68cbfa8c3f978441a5f34b30b96e146b33cd3359176b50fe8586" +dependencies = [ + "cfg-if", + "libc", + "libredox", + "windows-sys 0.59.0", +] + [[package]] name = "findshlibs" version = "0.10.2" @@ -3516,7 +3482,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "324a1be68054ef05ad64b861cc9eaf1d623d2d8cb25b4bf2cb9cdd902b4bf253" dependencies = [ "crc32fast", - "miniz_oxide 0.8.0", + "miniz_oxide", ] [[package]] @@ -3549,6 +3515,15 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" +[[package]] +name = "fsevent-sys" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76ee7a02da4d231650c7cea31349b889be2f45ddb3ef3032d2ec8185f6313fd2" +dependencies = [ + "libc", +] + [[package]] name = "funty" version = "2.0.0" @@ -3626,7 +3601,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -3675,6 +3650,19 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42012b0f064e01aa58b545fe3727f90f7dd4020f4a3ea735b50344965f5a57e9" +[[package]] +name = "generator" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbb949699c3e4df3a183b1d2142cb24277057055ed23c68ed58894f76c517223" +dependencies = [ + "cfg-if", + "libc", + "log", + "rustversion", + "windows 0.58.0", +] + [[package]] name = "generic-array" version = "0.12.4" @@ -3731,9 +3719,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.29.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" +checksum = "32085ea23f3234fc7846555e85283ba4de91e21016dc0455a16286d87a292d64" [[package]] name = "glob" @@ -3804,13 +3792,13 @@ version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "fnv", "futures-core", "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.4.0", + "indexmap 2.5.0", "slab", "tokio", "tokio-util", @@ -3824,12 +3812,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "524e8ac6999421f49a846c2d4411f337e53497d8ec55d67753beffa43c5d9205" dependencies = [ "atomic-waker", - "bytes 1.7.1", + "bytes 1.7.2", "fnv", "futures-core", "futures-sink", "http 1.1.0", - "indexmap 2.4.0", + "indexmap 2.5.0", "slab", "tokio", "tokio-util", @@ -3903,12 +3891,6 @@ dependencies = [ "num-traits", ] -[[package]] -name = "heck" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" - [[package]] name = "heck" version = "0.5.0" @@ -4004,7 +3986,7 @@ version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "fnv", "itoa", ] @@ -4015,7 +3997,7 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "fnv", "itoa", ] @@ -4026,7 +4008,7 @@ version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "http 0.2.12", "pin-project-lite", ] @@ -4037,7 +4019,7 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "http 1.1.0", ] @@ -4047,7 +4029,7 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "futures-util", "http 1.1.0", "http-body 1.0.1", @@ -4120,7 +4102,7 @@ version = "0.14.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a152ddd61dfaec7273fe8419ab357f33aee0d914c5f4efbf0d96fa749eea5ec9" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "futures-channel", "futures-core", "futures-util", @@ -4144,7 +4126,7 @@ version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "futures-channel", "futures-util", "h2 0.4.6", @@ -4175,17 +4157,17 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.27.2" +version = "0.27.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee4be2c948921a1a5320b629c4193916ed787a7f7f293fd3f7f5a6c9de74155" +checksum = "08afdbb5c31130e3034af566421053ab03787c640246a446327f550d11bcb333" dependencies = [ "futures-util", "http 1.1.0", "hyper 1.4.1", "hyper-util", "log", - "rustls 0.23.12", - "rustls-native-certs 0.7.2", + "rustls 0.23.13", + "rustls-native-certs 0.8.0", "rustls-pki-types", "tokio", "tokio-rustls 0.26.0", @@ -4195,11 +4177,11 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.7" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cde7055719c54e36e95e8719f95883f22072a48ede39db7fc17a4e1d5281e9b9" +checksum = "41296eb09f183ac68eec06e03cdbea2e759633d4067b2f6552fc2e009bcad08b" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "futures-channel", "futures-util", "http 1.1.0", @@ -4208,7 +4190,6 @@ dependencies = [ "pin-project-lite", "socket2 0.5.7", "tokio", - "tower", "tower-service", "tracing", ] @@ -4233,7 +4214,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -4247,9 +4228,9 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.60" +version = "0.1.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" +checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -4398,7 +4379,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -4427,6 +4408,16 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "if-addrs" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a78a89907582615b19f6f0da1af18abf6ff08be259395669b834b057a7ee92d8" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + [[package]] name = "impl-codec" version = "0.6.0" @@ -4485,10 +4476,11 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93ead53efc7ea8ed3cfb0c79fc8023fbb782a5432b52830b6518941cebe6505c" +checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5" dependencies = [ + "arbitrary", "equivalent", "hashbrown 0.14.5", "serde", @@ -4507,7 +4499,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "232929e1d75fe899576a3d5c7416ad0d88dbfbb3c3d6aa00873a7408a50ddb88" dependencies = [ "ahash", - "indexmap 2.4.0", + "indexmap 2.5.0", "is-terminal", "itoa", "log", @@ -4518,6 +4510,26 @@ dependencies = [ "str_stack", ] +[[package]] +name = "inotify" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8069d3ec154eb856955c1c0fbffefbf5f3c40a104ec912d4797314c1801abff" +dependencies = [ + "bitflags 1.3.2", + "inotify-sys", + "libc", +] + +[[package]] +name = "inotify-sys" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e05c02b5e89bff3b946cedeca278abc628fe811e604f027c45a8aa3cf793d0eb" +dependencies = [ + "libc", +] + [[package]] name = "inout" version = "0.1.3" @@ -4554,9 +4566,9 @@ dependencies = [ [[package]] name = "intrusive-collections" -version = "0.9.6" +version = "0.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b694dc9f70c3bda874626d2aed13b780f137aab435f4e9814121955cf706122e" +checksum = "189d0897e4cbe8c75efedf3502c18c887b05046e59d28404d4d8e46cbc4d1e86" dependencies = [ "memoffset", ] @@ -4575,15 +4587,15 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.9.0" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" +checksum = "187674a687eed5fe42285b40c6291f9a01517d415fad1c3cbc6a9f778af7fcd4" [[package]] name = "iri-string" -version = "0.7.2" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f5f6c2df22c009ac44f6f1499308e7a3ac7ba42cd2378475cc691510e1eef1b" +checksum = "9c25163201be6ded9e686703e85532f8f852ea1f92ba625cb3c51f7fe6d07a4a" dependencies = [ "memchr", "serde", @@ -4697,9 +4709,9 @@ dependencies = [ [[package]] name = "jsonrpsee" -version = "0.24.3" +version = "0.24.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ec465b607a36dc5dd45d48b7689bc83f679f66a3ac6b6b21cc787a11e0f8685" +checksum = "126b48a5acc3c52fbd5381a77898cb60e145123179588a29e7ac48f9c06e401b" dependencies = [ "jsonrpsee-client-transport", "jsonrpsee-core", @@ -4715,9 +4727,9 @@ dependencies = [ [[package]] name = "jsonrpsee-client-transport" -version = "0.24.3" +version = "0.24.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90f0977f9c15694371b8024c35ab58ca043dbbf4b51ccb03db8858a021241df1" +checksum = "bf679a8e0e083c77997f7c4bb4ca826577105906027ae462aac70ff348d02c6a" dependencies = [ "base64 0.22.1", "futures-channel", @@ -4726,7 +4738,7 @@ dependencies = [ "http 1.1.0", "jsonrpsee-core", "pin-project", - "rustls 0.23.12", + "rustls 0.23.13", "rustls-pki-types", "rustls-platform-verifier", "soketto", @@ -4740,12 +4752,12 @@ dependencies = [ [[package]] name = "jsonrpsee-core" -version = "0.24.3" +version = "0.24.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e942c55635fbf5dc421938b8558a8141c7e773720640f4f1dbe1f4164ca4e221" +checksum = "b0e503369a76e195b65af35058add0e6900b794a4e9a9316900ddd3a87a80477" dependencies = [ "async-trait", - "bytes 1.7.1", + "bytes 1.7.2", "futures-timer", "futures-util", "http 1.1.0", @@ -4767,47 +4779,47 @@ dependencies = [ [[package]] name = "jsonrpsee-http-client" -version = "0.24.3" +version = "0.24.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e33774602df12b68a2310b38a535733c477ca4a498751739f89fe8dbbb62ec4c" +checksum = "f2c0caba4a6a8efbafeec9baa986aa22a75a96c29d3e4b0091b0098d6470efb5" dependencies = [ "async-trait", "base64 0.22.1", "http-body 1.0.1", "hyper 1.4.1", - "hyper-rustls 0.27.2", + "hyper-rustls 0.27.3", "hyper-util", "jsonrpsee-core", "jsonrpsee-types", - "rustls 0.23.12", + "rustls 0.23.13", "rustls-platform-verifier", "serde", "serde_json", "thiserror", "tokio", - "tower", + "tower 0.4.13", "tracing", "url", ] [[package]] name = "jsonrpsee-proc-macros" -version = "0.24.3" +version = "0.24.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b07a2daf52077ab1b197aea69a5c990c060143835bf04c77070e98903791715" +checksum = "fc660a9389e2748e794a40673a4155d501f32db667757cdb80edeff0306b489b" dependencies = [ - "heck 0.5.0", + "heck", "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] name = "jsonrpsee-server" -version = "0.24.3" +version = "0.24.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "038fb697a709bec7134e9ccbdbecfea0e2d15183f7140254afef7c5610a3f488" +checksum = "af6e6c9b6d975edcb443565d648b605f3e85a04ec63aa6941811a8894cc9cded" dependencies = [ "futures-util", "http 1.1.0", @@ -4826,15 +4838,15 @@ dependencies = [ "tokio", "tokio-stream", "tokio-util", - "tower", + "tower 0.4.13", "tracing", ] [[package]] name = "jsonrpsee-types" -version = "0.24.3" +version = "0.24.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23b67d6e008164f027afbc2e7bb79662650158d26df200040282d2aa1cbb093b" +checksum = "d8fb16314327cbc94fdf7965ef7e4422509cd5597f76d137bd104eb34aeede67" dependencies = [ "http 1.1.0", "serde", @@ -4844,9 +4856,9 @@ dependencies = [ [[package]] name = "jsonrpsee-wasm-client" -version = "0.24.3" +version = "0.24.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0470d0ae043ffcb0cd323797a631e637fb4b55fe3eaa6002934819458bba62a7" +checksum = "e0da62b43702bd5640ea305d35df95da30abc878e79a7b4b01feda3beaf35d3c" dependencies = [ "jsonrpsee-client-transport", "jsonrpsee-core", @@ -4855,9 +4867,9 @@ dependencies = [ [[package]] name = "jsonrpsee-ws-client" -version = "0.24.3" +version = "0.24.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "992bf67d1132f88edf4a4f8cff474cf01abb2be203004a2b8e11c2b20795b99e" +checksum = "39aabf5d6c6f22da8d5b808eea1fab0736059f11fb42f71f141b14f404e5046a" dependencies = [ "http 1.1.0", "jsonrpsee-client-transport", @@ -4883,9 +4895,9 @@ dependencies = [ [[package]] name = "k256" -version = "0.13.3" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "956ff9b67e26e1a6a866cb758f12c6f8746208489e3e4a4b5580802f2f0a587b" +checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b" dependencies = [ "cfg-if", "ecdsa", @@ -4906,9 +4918,9 @@ dependencies = [ [[package]] name = "keccak-asm" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "422fbc7ff2f2f5bdffeb07718e5a5324dca72b0c9293d50df4026652385e3314" +checksum = "505d1856a39b200489082f90d897c3f07c455563880bc5952e38eabf731c83b6" dependencies = [ "digest 0.10.7", "sha3-asm", @@ -4929,6 +4941,26 @@ version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4933f3f57a8e9d9da04db23fb153356ecaf00cbd14aee46279c33dc80925c37" +[[package]] +name = "kqueue" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7447f1ca1b7b563588a205fe93dea8df60fd981423a768bc1c0ded35ed147d0c" +dependencies = [ + "kqueue-sys", + "libc", +] + +[[package]] +name = "kqueue-sys" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed9625ffda8729b85e45cf04090035ac368927b8cebc34898e7c120f52e4838b" +dependencies = [ + "bitflags 1.3.2", + "libc", +] + [[package]] name = "lazy_static" version = "1.5.0" @@ -4946,9 +4978,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.158" +version = "0.2.159" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439" +checksum = "561d97a539a36e26a9a5fad1ea11a3039a67714694aaa379433e580854bc3dc5" [[package]] name = "libloading" @@ -5004,6 +5036,7 @@ checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ "bitflags 2.6.0", "libc", + "redox_syscall 0.5.6", ] [[package]] @@ -5097,6 +5130,19 @@ version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" +[[package]] +name = "loom" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "419e0dc8046cb947daa77eb95ae174acfbddb7673b4151f56d1eed8e93fbfaca" +dependencies = [ + "cfg-if", + "generator", + "scoped-tls", + "tracing", + "tracing-subscriber", +] + [[package]] name = "lru" version = "0.12.4" @@ -5153,9 +5199,9 @@ checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" [[package]] name = "memmap2" -version = "0.9.4" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe751422e4a8caa417e13c3ea66452215d7d63e19e604f4980461212f3ae1322" +checksum = "fd3f7eed9d3848f8b98834af67102b720745c4ec028fcd0aa0239277e7de374f" dependencies = [ "libc", ] @@ -5186,7 +5232,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4f0c8427b39666bf970460908b213ec09b3b350f20c0c2eabcbba51704a08e6" dependencies = [ "base64 0.22.1", - "indexmap 2.4.0", + "indexmap 2.5.0", "metrics", "metrics-util", "quanta", @@ -5218,7 +5264,7 @@ dependencies = [ "crossbeam-epoch", "crossbeam-utils", "hashbrown 0.14.5", - "indexmap 2.4.0", + "indexmap 2.5.0", "metrics", "num_cpus", "ordered-float", @@ -5229,13 +5275,13 @@ dependencies = [ [[package]] name = "mev-share-sse" -version = "0.3.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e00cdd87dab765e7dac55c21eb680bfd10655b6c2530f6fe578acdfbb66c757c" +checksum = "fc8342aaf4a3c2a1b2612bdf5cd1aa423918e0f1a0d9242aaeefbffd49457cad" dependencies = [ - "alloy-primitives 0.7.7", + "alloy-primitives", "async-sse", - "bytes 1.7.1", + "bytes 1.7.2", "futures-util", "http-types", "pin-project-lite", @@ -5269,15 +5315,6 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" -[[package]] -name = "miniz_oxide" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8a240ddb74feaf34a79a7add65a741f3167852fba007066dcac1ca548d89c08" -dependencies = [ - "adler", -] - [[package]] name = "miniz_oxide" version = "0.8.0" @@ -5335,7 +5372,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -5367,9 +5404,9 @@ checksum = "1fafa6961cabd9c63bcd77a45d7e3b7f3b552b70417831fb0f56db717e72407e" [[package]] name = "multiaddr" -version = "0.18.1" +version = "0.18.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b852bc02a2da5feed68cd14fa50d0774b92790a5bdbfa932a813926c8472070" +checksum = "fe6351f60b488e04c1d21bc69e56b89cb3f5e8f5d22557d6e8031bdfd79b6961" dependencies = [ "arrayref", "byteorder", @@ -5380,7 +5417,7 @@ dependencies = [ "percent-encoding", "serde", "static_assertions", - "unsigned-varint", + "unsigned-varint 0.8.0", "url", ] @@ -5402,7 +5439,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "076d548d76a0e2a0d4ab471d0b1c36c577786dfc4471242035d97a12a735c492" dependencies = [ "core2", - "unsigned-varint", + "unsigned-varint 0.7.2", ] [[package]] @@ -5435,6 +5472,24 @@ dependencies = [ "minimal-lexical", ] +[[package]] +name = "notify" +version = "6.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6205bd8bb1e454ad2e27422015fb5e4f2bcc7e08fa8f27058670d208324a4d2d" +dependencies = [ + "bitflags 2.6.0", + "filetime", + "fsevent-sys", + "inotify", + "kqueue", + "libc", + "log", + "mio 0.8.11", + "walkdir", + "windows-sys 0.48.0", +] + [[package]] name = "ntapi" version = "0.4.1" @@ -5513,7 +5568,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -5595,7 +5650,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -5623,9 +5678,9 @@ dependencies = [ [[package]] name = "object" -version = "0.36.3" +version = "0.36.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27b64972346851a39438c60b341ebc01bba47464ae329e55cf343eb93964efd9" +checksum = "084f1a5821ac4c651660a94a7153d27ac9d8a53736203f58b31945ded098070a" dependencies = [ "memchr", ] @@ -5644,44 +5699,79 @@ checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "op-alloy-consensus" -version = "0.2.2" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0db6e3a9bbbcef7cef19d77aa2cc76d61377376e3bb86f89167e7e3f30ea023" +checksum = "21aad1fbf80d2bcd7406880efc7ba109365f44bbb72896758ddcbfa46bf1592c" dependencies = [ "alloy-consensus", "alloy-eips", - "alloy-primitives 0.8.0", + "alloy-primitives", "alloy-rlp", "alloy-serde", + "arbitrary", "derive_more 1.0.0", "serde", + "spin", ] [[package]] -name = "op-alloy-network" -version = "0.2.2" +name = "op-alloy-genesis" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66184e6c92269ba4ef1f80e8566ce11d41b584884ce7476d4b1b5e0e38503ecb" +checksum = "6e1b8a9b70da0e027242ec1762f0f3a386278b6291d00d12ff5a64929dc19f68" dependencies = [ "alloy-consensus", "alloy-eips", + "alloy-primitives", + "alloy-sol-types", + "serde", + "serde_repr", +] + +[[package]] +name = "op-alloy-network" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "783ce4ebc0a994eee2188431511b16692b704e1e8fff0c77d8c0354d3c2b1fc8" +dependencies = [ + "alloy-consensus", "alloy-network", - "alloy-primitives 0.8.0", + "alloy-primitives", "alloy-rpc-types-eth", "op-alloy-consensus", "op-alloy-rpc-types", ] +[[package]] +name = "op-alloy-protocol" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf300a82ae2d30e2255bfea87a2259da49f63a25a44db561ae64cc9e3084139f" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "hashbrown 0.14.5", + "op-alloy-consensus", + "op-alloy-genesis", + "serde", +] + [[package]] name = "op-alloy-rpc-types" -version = "0.2.2" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9c604cd3b9680d0edd0b7127f3550bcff634c2d2efe27b2b4853e72320186a8" +checksum = "e281fbfc2198b7c0c16457d6524f83d192662bc9f3df70f24c3038d4521616df" dependencies = [ - "alloy-network", - "alloy-primitives 0.8.0", + "alloy-eips", + "alloy-network-primitives", + "alloy-primitives", "alloy-rpc-types-eth", "alloy-serde", + "cfg-if", + "hashbrown 0.14.5", "op-alloy-consensus", "serde", "serde_json", @@ -5689,16 +5779,34 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types-engine" -version = "0.2.2" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "620e645c36cc66220909bf97e6632e7a154a2309356221cbf33ae78bf5294478" +checksum = "2947272a81ebf988f4804b6f0f6a7c0b2f6f89a908cb410e36f8f3828f81c778" dependencies = [ - "alloy-primitives 0.8.0", + "alloy-eips", + "alloy-primitives", "alloy-rpc-types-engine", "alloy-serde", + "derive_more 1.0.0", + "op-alloy-consensus", + "op-alloy-genesis", + "op-alloy-protocol", "serde", ] +[[package]] +name = "op-reth" +version = "1.0.4" +dependencies = [ + "clap", + "reth-cli-util", + "reth-node-builder", + "reth-node-optimism", + "reth-optimism-cli", + "reth-optimism-rpc", + "reth-provider", +] + [[package]] name = "opaque-debug" version = "0.2.3" @@ -5725,9 +5833,9 @@ checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" [[package]] name = "ordered-float" -version = "4.2.2" +version = "4.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a91171844676f8c7990ce64959210cd2eaef32c2612c50f9fae9f8aaa6065a6" +checksum = "44d501f1a72f71d3c063a6bbc8f7271fa73aa09fe5d6283b6571e2ed176a2537" dependencies = [ "num-traits", ] @@ -5775,7 +5883,7 @@ dependencies = [ "arrayvec", "bitvec", "byte-slice-cast", - "bytes 1.7.1", + "bytes 1.7.2", "impl-trait-for-tuples", "parity-scale-codec-derive", "serde", @@ -5795,9 +5903,9 @@ dependencies = [ [[package]] name = "parking" -version = "2.2.0" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" +checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" [[package]] name = "parking_lot" @@ -5842,7 +5950,7 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.3", + "redox_syscall 0.5.6", "smallvec", "windows-targets 0.52.6", ] @@ -5908,28 +6016,15 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.11" +version = "2.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd53dff83f26735fdc1ca837098ccf133605d794cdae66acfc2bfac3ec809d95" +checksum = "fdbef9d1d47087a895abd220ed25eb4ad973a5e26f6a4367b038c25e28dfc2d9" dependencies = [ "memchr", "thiserror", "ucd-trie", ] -[[package]] -name = "ph" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b7b74d575d7c11fb653fae69688be5206cafc1ead33c01ce61ac7f36eae45b" -dependencies = [ - "binout", - "bitm", - "dyn_size_of", - "rayon", - "wyhash", -] - [[package]] name = "pharos" version = "0.5.3" @@ -5937,7 +6032,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e9567389417feee6ce15dd6527a8a1ecac205ef62c2932bcf3d9f6fc5b78b414" dependencies = [ "futures", - "rustc_version 0.4.0", + "rustc_version 0.4.1", ] [[package]] @@ -5970,7 +6065,7 @@ dependencies = [ "phf_shared", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -5999,7 +6094,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -6026,9 +6121,9 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" +checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" [[package]] name = "plain_hasher" @@ -6041,9 +6136,9 @@ dependencies = [ [[package]] name = "plotters" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a15b6eccb8484002195a3e44fe65a4ce8e93a625797a063735536fd59cb01cf3" +checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747" dependencies = [ "num-traits", "plotters-backend", @@ -6054,15 +6149,15 @@ dependencies = [ [[package]] name = "plotters-backend" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "414cec62c6634ae900ea1c56128dfe87cf63e7caece0852ec76aba307cebadb7" +checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a" [[package]] name = "plotters-svg" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81b30686a7d9c3e010b84284bdd26a29f2138574f52f5eb6f794fc0ad924e705" +checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670" dependencies = [ "plotters-backend", ] @@ -6087,9 +6182,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da544ee218f0d287a911e9c99a39a8c9bc8fcad3cb8db5959940044ecfc67265" +checksum = "d30538d42559de6b034bc76fd6dd4c38961b1ee5c6c56e3808c50128fdbc22ce" [[package]] name = "powerfmt" @@ -6154,6 +6249,16 @@ dependencies = [ "termtree", ] +[[package]] +name = "pretty_assertions" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ae130e2f271fbc2ac3a40fb1d07180839cdbbe443c7a27e1e3c13c5cac0116d" +dependencies = [ + "diff", + "yansi", +] + [[package]] name = "prettyplease" version = "0.2.22" @@ -6161,7 +6266,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "479cf940fbbb3426c32c5d5176f62ad57549a0bb84773423ba8be9d089f5faba" dependencies = [ "proc-macro2", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -6217,6 +6322,28 @@ dependencies = [ "version_check", ] +[[package]] +name = "proc-macro-error-attr2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" +dependencies = [ + "proc-macro2", + "quote", +] + +[[package]] +name = "proc-macro-error2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" +dependencies = [ + "proc-macro-error-attr2", + "proc-macro2", + "quote", + "syn 2.0.77", +] + [[package]] name = "proc-macro2" version = "1.0.86" @@ -6290,7 +6417,7 @@ checksum = "6ff7ff745a347b87471d859a377a9a404361e7efc2a971d73424a6d183c0fc77" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -6309,7 +6436,7 @@ version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "deb1435c188b76130da55f17a466d252ff7b1418b2ad3e037d127b94e3411f29" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "prost-derive 0.12.6", ] @@ -6360,7 +6487,7 @@ dependencies = [ "itertools 0.12.1", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -6423,16 +6550,16 @@ dependencies = [ [[package]] name = "quinn" -version = "0.11.3" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b22d8e7369034b9a7132bc2008cac12f2013c8132b45e0554e6e20e2617f2156" +checksum = "8c7c5fdde3cdae7203427dc4f0a68fe0ed09833edc525a03456b153b79828684" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "pin-project-lite", "quinn-proto", "quinn-udp", "rustc-hash 2.0.0", - "rustls 0.23.12", + "rustls 0.23.13", "socket2 0.5.7", "thiserror", "tokio", @@ -6441,15 +6568,15 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.11.6" +version = "0.11.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba92fb39ec7ad06ca2582c0ca834dfeadcaf06ddfc8e635c80aa7e1c05315fdd" +checksum = "fadfaed2cd7f389d0161bb73eeb07b7b78f8691047a6f3e73caaeae55310a4a6" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "rand 0.8.5", "ring", "rustc-hash 2.0.0", - "rustls 0.23.12", + "rustls 0.23.13", "slab", "thiserror", "tinyvec", @@ -6458,15 +6585,15 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.4" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bffec3605b73c6f1754535084a85229fa8a30f86014e6c81aeec4abb68b0285" +checksum = "4fe68c2e9e1a1234e218683dbdf9f9dfcb094113c5ac2b938dfcb9bab4c4140b" dependencies = [ "libc", "once_cell", "socket2 0.5.7", "tracing", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -6516,6 +6643,7 @@ dependencies = [ "libc", "rand_chacha 0.3.1", "rand_core 0.6.4", + "serde", ] [[package]] @@ -6641,9 +6769,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.3" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a908a6e00f1fdd0dfd9c0eb08ce85126f6d8bbda50017e74bc4a4b7d4a926a4" +checksum = "355ae415ccd3a04315d3f8246e86d67689ea74d88d915576e1589a351062a13b" dependencies = [ "bitflags 2.6.0", ] @@ -6705,9 +6833,9 @@ checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" [[package]] name = "regress" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16fe0a24af5daaae947294213d2fd2646fbf5e1fbacc1d4ba3e84b2393854842" +checksum = "1541daf4e4ed43a0922b7969bdc2170178bcacc5dabf7e39bc508a9fa3953a7a" dependencies = [ "hashbrown 0.14.5", "memchr", @@ -6720,7 +6848,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" dependencies = [ "base64 0.21.7", - "bytes 1.7.1", + "bytes 1.7.2", "encoding_rs", "futures-core", "futures-util", @@ -6761,14 +6889,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8f4955649ef5c38cc7f9e8aa41761d48fb9677197daea9984dc54f56aad5e63" dependencies = [ "base64 0.22.1", - "bytes 1.7.1", + "bytes 1.7.2", "futures-core", "futures-util", "http 1.1.0", "http-body 1.0.1", "http-body-util", "hyper 1.4.1", - "hyper-rustls 0.27.2", + "hyper-rustls 0.27.3", "hyper-util", "ipnet", "js-sys", @@ -6778,8 +6906,8 @@ dependencies = [ "percent-encoding", "pin-project-lite", "quinn", - "rustls 0.23.12", - "rustls-native-certs 0.7.2", + "rustls 0.23.13", + "rustls-native-certs 0.7.3", "rustls-pemfile 2.1.3", "rustls-pki-types", "serde", @@ -6817,17 +6945,13 @@ dependencies = [ "aquamarine", "backon", "clap", - "discv5", "eyre", - "fdlimit", "futures", - "itertools 0.13.0", - "libc", - "metrics-process", "reth-basic-payload-builder", "reth-beacon-consensus", "reth-blockchain-tree", "reth-chainspec", + "reth-cli", "reth-cli-commands", "reth-cli-runner", "reth-cli-util", @@ -6836,13 +6960,13 @@ dependencies = [ "reth-consensus-common", "reth-db", "reth-db-api", - "reth-db-common", "reth-discv4", "reth-downloaders", "reth-engine-util", "reth-errors", "reth-ethereum-payload-builder", "reth-evm", + "reth-evm-bsc", "reth-execution-types", "reth-exex", "reth-fs-util", @@ -6857,9 +6981,6 @@ dependencies = [ "reth-node-events", "reth-node-metrics", "reth-node-optimism", - "reth-optimism-cli", - "reth-optimism-primitives", - "reth-optimism-rpc", "reth-payload-builder", "reth-payload-primitives", "reth-payload-validator", @@ -6875,21 +6996,16 @@ dependencies = [ "reth-rpc-types", "reth-rpc-types-compat", "reth-stages", - "reth-stages-api", "reth-static-file", - "reth-static-file-types", "reth-tasks", "reth-tracing", "reth-transaction-pool", "reth-trie", "reth-trie-db", - "serde", "serde_json", "similar-asserts", "tempfile", - "tikv-jemallocator", "tokio", - "toml 0.8.19", "tracing", ] @@ -6897,6 +7013,7 @@ dependencies = [ name = "reth-auto-seal-consensus" version = "1.0.4" dependencies = [ + "alloy-primitives", "futures-util", "reth-beacon-consensus", "reth-chainspec", @@ -6946,7 +7063,8 @@ dependencies = [ name = "reth-beacon-consensus" version = "1.0.4" dependencies = [ - "alloy-genesis 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-genesis 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-primitives", "assert_matches", "futures", "itertools 0.13.0", @@ -6969,6 +7087,7 @@ dependencies = [ "reth-exex-types", "reth-metrics", "reth-network-p2p", + "reth-node-types", "reth-payload-builder", "reth-payload-primitives", "reth-payload-validator", @@ -6996,40 +7115,34 @@ dependencies = [ name = "reth-bench" version = "1.0.4" dependencies = [ - "alloy-consensus", "alloy-eips", "alloy-json-rpc", "alloy-provider", - "alloy-pubsub 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-pubsub 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "alloy-rpc-client", "alloy-rpc-types-engine", "alloy-transport", "alloy-transport-http", "alloy-transport-ipc", - "alloy-transport-ws 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-transport-ws 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "async-trait", "clap", "csv", "eyre", "futures", - "libc", "reqwest 0.12.7", "reth-cli-runner", - "reth-db", + "reth-cli-util", "reth-node-api", "reth-node-core", "reth-primitives", - "reth-provider", "reth-rpc-types", "reth-rpc-types-compat", "reth-tracing", "serde", - "serde_json", "thiserror", - "tikv-jemallocator", "tokio", - "tokio-util", - "tower", + "tower 0.4.13", "tracing", ] @@ -7037,7 +7150,8 @@ dependencies = [ name = "reth-blockchain-tree" version = "1.0.4" dependencies = [ - "alloy-genesis 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-genesis 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-primitives", "aquamarine", "assert_matches", "linked_hash_set", @@ -7054,9 +7168,9 @@ dependencies = [ "reth-execution-types", "reth-metrics", "reth-network", + "reth-node-types", "reth-primitives", "reth-provider", - "reth-prune-types", "reth-revm", "reth-stages-api", "reth-storage-errors", @@ -7073,6 +7187,7 @@ dependencies = [ name = "reth-blockchain-tree-api" version = "1.0.4" dependencies = [ + "alloy-primitives", "reth-consensus", "reth-execution-errors", "reth-primitives", @@ -7085,8 +7200,8 @@ name = "reth-bsc-chainspec" version = "1.0.4" dependencies = [ "alloy-chains", - "alloy-genesis 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "alloy-primitives 0.8.0", + "alloy-genesis 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-primitives", "derive_more 1.0.0", "once_cell", "op-alloy-rpc-types", @@ -7116,7 +7231,7 @@ dependencies = [ "alloy-rlp", "bitset", "blst", - "bytes 1.7.1", + "bytes 1.7.2", "futures-util", "lazy_static", "lru", @@ -7155,7 +7270,7 @@ dependencies = [ "alloy-rlp", "bitset", "blst", - "bytes 1.7.1", + "bytes 1.7.2", "futures-util", "lazy_static", "lru", @@ -7195,6 +7310,7 @@ dependencies = [ name = "reth-chain-state" version = "1.0.4" dependencies = [ + "alloy-primitives", "alloy-signer", "alloy-signer-local", "auto_impl", @@ -7222,8 +7338,8 @@ version = "1.0.4" dependencies = [ "alloy-chains", "alloy-eips", - "alloy-genesis 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "alloy-primitives 0.8.0", + "alloy-genesis 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-primitives", "alloy-rlp", "alloy-trie", "auto_impl", @@ -7252,6 +7368,8 @@ name = "reth-cli-commands" version = "1.0.4" dependencies = [ "ahash", + "alloy-eips", + "alloy-primitives", "arbitrary", "backon", "clap", @@ -7267,6 +7385,7 @@ dependencies = [ "ratatui", "reth-beacon-consensus", "reth-chainspec", + "reth-cli", "reth-cli-runner", "reth-cli-util", "reth-config", @@ -7318,13 +7437,16 @@ name = "reth-cli-util" version = "1.0.4" dependencies = [ "alloy-eips", - "alloy-primitives 0.8.0", + "alloy-primitives", + "cfg-if", "eyre", "libc", "rand 0.8.5", "reth-fs-util", "secp256k1", "thiserror", + "tikv-jemallocator", + "tracy-client", ] [[package]] @@ -7333,12 +7455,13 @@ version = "1.0.4" dependencies = [ "alloy-consensus", "alloy-eips", - "alloy-genesis 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "alloy-primitives 0.8.0", + "alloy-genesis 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-primitives", "alloy-trie", "arbitrary", - "bytes 1.7.1", + "bytes 1.7.2", "modular-bitfield", + "op-alloy-consensus", "proptest", "proptest-arbitrary-interop", "reth-codecs-derive", @@ -7351,11 +7474,11 @@ dependencies = [ name = "reth-codecs-derive" version = "1.0.4" dependencies = [ - "convert_case 0.6.0", + "convert_case", "proc-macro2", "quote", "similar-asserts", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -7378,6 +7501,7 @@ dependencies = [ name = "reth-consensus" version = "1.0.4" dependencies = [ + "alloy-primitives", "auto_impl", "derive_more 1.0.0", "reth-primitives", @@ -7387,6 +7511,7 @@ dependencies = [ name = "reth-consensus-common" version = "1.0.4" dependencies = [ + "alloy-primitives", "mockall", "rand 0.8.5", "reth-chainspec", @@ -7421,9 +7546,10 @@ dependencies = [ name = "reth-db" version = "1.0.4" dependencies = [ + "alloy-primitives", "arbitrary", "assert_matches", - "bytes 1.7.1", + "bytes 1.7.2", "criterion", "derive_more 1.0.0", "eyre", @@ -7460,16 +7586,13 @@ dependencies = [ name = "reth-db-api" version = "1.0.4" dependencies = [ + "alloy-primitives", "arbitrary", - "assert_matches", - "bytes 1.7.1", - "criterion", + "bytes 1.7.2", "derive_more 1.0.0", - "iai-callgrind", "metrics", "modular-bitfield", "parity-scale-codec", - "pprof", "proptest", "proptest-arbitrary-interop", "rand 0.8.5", @@ -7490,7 +7613,8 @@ dependencies = [ name = "reth-db-common" version = "1.0.4" dependencies = [ - "alloy-genesis 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-genesis 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-primitives", "boyer-moore-magiclen", "eyre", "reth-chainspec", @@ -7500,6 +7624,7 @@ dependencies = [ "reth-db-api", "reth-etl", "reth-fs-util", + "reth-node-types", "reth-primitives", "reth-primitives-traits", "reth-provider", @@ -7516,8 +7641,9 @@ dependencies = [ name = "reth-db-models" version = "1.0.4" dependencies = [ + "alloy-primitives", "arbitrary", - "bytes 1.7.1", + "bytes 1.7.2", "modular-bitfield", "proptest", "proptest-arbitrary-interop", @@ -7531,7 +7657,7 @@ dependencies = [ name = "reth-discv4" version = "1.0.4" dependencies = [ - "alloy-primitives 0.8.0", + "alloy-primitives", "alloy-rlp", "assert_matches", "discv5", @@ -7557,7 +7683,7 @@ dependencies = [ name = "reth-discv5" version = "1.0.4" dependencies = [ - "alloy-primitives 0.8.0", + "alloy-primitives", "alloy-rlp", "derive_more 1.0.0", "discv5", @@ -7582,7 +7708,7 @@ name = "reth-dns-discovery" version = "1.0.4" dependencies = [ "alloy-chains", - "alloy-primitives 0.8.0", + "alloy-primitives", "alloy-rlp", "data-encoding", "enr", @@ -7646,6 +7772,7 @@ version = "1.0.4" dependencies = [ "alloy-consensus", "alloy-network", + "alloy-primitives", "alloy-rpc-types", "alloy-signer", "alloy-signer-local", @@ -7680,7 +7807,7 @@ name = "reth-ecies" version = "1.0.4" dependencies = [ "aes", - "alloy-primitives 0.8.0", + "alloy-primitives", "alloy-rlp", "block-padding 0.3.3", "byteorder", @@ -7710,7 +7837,10 @@ name = "reth-engine-primitives" version = "1.0.4" dependencies = [ "reth-chainspec", + "reth-execution-types", "reth-payload-primitives", + "reth-primitives", + "reth-trie", "serde", ] @@ -7721,23 +7851,20 @@ dependencies = [ "futures", "pin-project", "reth-beacon-consensus", - "reth-blockchain-tree", "reth-chainspec", "reth-consensus", - "reth-db-api", - "reth-engine-primitives", "reth-engine-tree", "reth-ethereum-engine-primitives", "reth-evm", "reth-evm-ethereum", "reth-exex-types", "reth-network-p2p", + "reth-node-types", "reth-payload-builder", "reth-payload-validator", "reth-primitives", "reth-provider", "reth-prune", - "reth-prune-types", "reth-stages-api", "reth-tasks", "thiserror", @@ -7761,7 +7888,6 @@ dependencies = [ "reth-chainspec", "reth-consensus", "reth-db", - "reth-db-api", "reth-engine-primitives", "reth-errors", "reth-ethereum-engine-primitives", @@ -7769,6 +7895,7 @@ dependencies = [ "reth-exex-types", "reth-metrics", "reth-network-p2p", + "reth-node-types", "reth-payload-builder", "reth-payload-primitives", "reth-payload-validator", @@ -7785,6 +7912,7 @@ dependencies = [ "reth-tasks", "reth-tracing", "reth-trie", + "reth-trie-parallel", "thiserror", "tokio", "tracing", @@ -7838,7 +7966,7 @@ dependencies = [ "alloy-rlp", "arbitrary", "async-stream", - "bytes 1.7.1", + "bytes 1.7.2", "derive_more 1.0.0", "futures", "pin-project", @@ -7869,10 +7997,10 @@ name = "reth-eth-wire-types" version = "1.0.4" dependencies = [ "alloy-chains", - "alloy-genesis 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-genesis 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "alloy-rlp", "arbitrary", - "bytes 1.7.1", + "bytes 1.7.2", "derive_more 1.0.0", "proptest", "proptest-arbitrary-interop", @@ -7888,8 +8016,7 @@ dependencies = [ name = "reth-ethereum-cli" version = "1.0.4" dependencies = [ - "alloy-genesis 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "clap", + "alloy-genesis 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "eyre", "reth-chainspec", "reth-cli", @@ -7913,14 +8040,13 @@ name = "reth-ethereum-engine-primitives" version = "1.0.4" dependencies = [ "alloy-rlp", + "reth-chain-state", "reth-chainspec", "reth-engine-primitives", - "reth-evm-ethereum", "reth-payload-primitives", "reth-primitives", "reth-rpc-types", "reth-rpc-types-compat", - "revm-primitives", "serde", "serde_json", "sha2 0.10.8", @@ -7931,7 +8057,7 @@ name = "reth-ethereum-forks" version = "1.0.4" dependencies = [ "alloy-chains", - "alloy-primitives 0.8.0", + "alloy-primitives", "alloy-rlp", "arbitrary", "auto_impl", @@ -7950,11 +8076,13 @@ name = "reth-ethereum-payload-builder" version = "1.0.4" dependencies = [ "reth-basic-payload-builder", + "reth-chain-state", "reth-errors", "reth-evm", "reth-evm-ethereum", "reth-execution-types", "reth-payload-builder", + "reth-payload-primitives", "reth-primitives", "reth-provider", "reth-revm", @@ -7968,7 +8096,7 @@ dependencies = [ name = "reth-etl" version = "1.0.4" dependencies = [ - "alloy-primitives 0.8.0", + "alloy-primitives", "rayon", "reth-db-api", "tempfile", @@ -7981,11 +8109,14 @@ dependencies = [ "alloy-eips", "auto_impl", "futures-util", + "metrics", "parking_lot 0.12.3", "reth-chainspec", "reth-execution-errors", "reth-execution-types", + "reth-metrics", "reth-primitives", + "reth-primitives-traits", "reth-prune-types", "reth-storage-errors", "revm", @@ -8043,6 +8174,7 @@ dependencies = [ name = "reth-evm-optimism" version = "1.0.4" dependencies = [ + "alloy-primitives", "reth-chainspec", "reth-ethereum-forks", "reth-evm", @@ -8065,7 +8197,7 @@ name = "reth-execution-errors" version = "1.0.4" dependencies = [ "alloy-eips", - "alloy-primitives 0.8.0", + "alloy-primitives", "alloy-rlp", "derive_more 1.0.0", "nybbles", @@ -8080,7 +8212,7 @@ name = "reth-execution-types" version = "1.0.4" dependencies = [ "alloy-eips", - "alloy-primitives 0.8.0", + "alloy-primitives", "reth-chainspec", "reth-execution-errors", "reth-primitives", @@ -8156,7 +8288,8 @@ dependencies = [ name = "reth-exex-types" version = "1.0.4" dependencies = [ - "alloy-primitives 0.8.0", + "alloy-primitives", + "reth-primitives", "reth-provider", "serde", ] @@ -8170,12 +8303,34 @@ dependencies = [ "thiserror", ] +[[package]] +name = "reth-invalid-block-hooks" +version = "1.0.4" +dependencies = [ + "alloy-rlp", + "alloy-rpc-types-debug", + "eyre", + "futures", + "jsonrpsee", + "pretty_assertions", + "reth-chainspec", + "reth-engine-primitives", + "reth-evm", + "reth-primitives", + "reth-provider", + "reth-revm", + "reth-rpc-api", + "reth-tracing", + "reth-trie", + "serde_json", +] + [[package]] name = "reth-ipc" version = "1.0.4" dependencies = [ "async-trait", - "bytes 1.7.1", + "bytes 1.7.2", "futures", "futures-util", "interprocess", @@ -8188,7 +8343,7 @@ dependencies = [ "tokio", "tokio-stream", "tokio-util", - "tower", + "tower 0.4.13", "tracing", ] @@ -8199,9 +8354,9 @@ dependencies = [ "bitflags 2.6.0", "byteorder", "criterion", - "dashmap 6.0.1", + "dashmap 6.1.0", "derive_more 1.0.0", - "indexmap 2.4.0", + "indexmap 2.5.0", "parking_lot 0.12.3", "pprof", "rand 0.8.5", @@ -8240,7 +8395,7 @@ dependencies = [ "quote", "regex", "serial_test", - "syn 2.0.76", + "syn 2.0.77", "trybuild", ] @@ -8248,7 +8403,7 @@ dependencies = [ name = "reth-net-banlist" version = "1.0.4" dependencies = [ - "alloy-primitives 0.8.0", + "alloy-primitives", ] [[package]] @@ -8256,6 +8411,7 @@ name = "reth-net-nat" version = "1.0.4" dependencies = [ "futures-util", + "if-addrs", "reqwest 0.12.7", "reth-tracing", "serde_with", @@ -8324,8 +8480,8 @@ dependencies = [ name = "reth-network-api" version = "1.0.4" dependencies = [ - "alloy-primitives 0.8.0", - "alloy-rpc-types-admin 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-primitives", + "alloy-rpc-types-admin 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "auto_impl", "derive_more 1.0.0", "enr", @@ -8364,7 +8520,7 @@ dependencies = [ name = "reth-network-peers" version = "1.0.4" dependencies = [ - "alloy-primitives 0.8.0", + "alloy-primitives", "alloy-rlp", "enr", "rand 0.8.5", @@ -8395,15 +8551,12 @@ version = "1.0.4" dependencies = [ "anyhow", "bincode", - "cuckoofilter", "derive_more 1.0.0", "lz4_flex", "memmap2", - "ph", "rand 0.8.5", "reth-fs-util", "serde", - "sucds", "tempfile", "thiserror", "tracing", @@ -8414,13 +8567,13 @@ dependencies = [ name = "reth-node-api" version = "1.0.4" dependencies = [ - "reth-chainspec", - "reth-db-api", "reth-engine-primitives", "reth-evm", "reth-network-api", + "reth-node-types", "reth-payload-builder", "reth-payload-primitives", + "reth-primitives", "reth-provider", "reth-rpc-eth-api", "reth-tasks", @@ -8464,10 +8617,12 @@ name = "reth-node-builder" version = "1.0.4" dependencies = [ "alloy-network", + "alloy-primitives", "aquamarine", "eyre", "fdlimit", "futures", + "jsonrpsee", "rayon", "reth-auto-seal-consensus", "reth-beacon-consensus", @@ -8488,6 +8643,8 @@ dependencies = [ "reth-engine-util", "reth-evm", "reth-exex", + "reth-fs-util", + "reth-invalid-block-hooks", "reth-network", "reth-network-api", "reth-network-p2p", @@ -8501,6 +8658,7 @@ dependencies = [ "reth-provider", "reth-prune", "reth-rpc", + "reth-rpc-api", "reth-rpc-builder", "reth-rpc-engine-api", "reth-rpc-eth-types", @@ -8523,7 +8681,8 @@ dependencies = [ name = "reth-node-core" version = "1.0.4" dependencies = [ - "alloy-genesis 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-genesis 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-primitives", "alloy-rpc-types-engine", "clap", "const_format", @@ -8536,11 +8695,11 @@ dependencies = [ "rand 0.8.5", "reth-bsc-chainspec", "reth-chainspec", + "reth-cli", "reth-cli-util", "reth-config", "reth-consensus-common", "reth-db", - "reth-db-api", "reth-discv4", "reth-discv5", "reth-fs-util", @@ -8550,7 +8709,6 @@ dependencies = [ "reth-network-peers", "reth-optimism-chainspec", "reth-primitives", - "reth-provider", "reth-prune-types", "reth-rpc-api", "reth-rpc-eth-api", @@ -8559,6 +8717,7 @@ dependencies = [ "reth-rpc-types", "reth-rpc-types-compat", "reth-stages-types", + "reth-storage-api", "reth-storage-errors", "reth-tracing", "reth-transaction-pool", @@ -8566,7 +8725,9 @@ dependencies = [ "serde", "serde_json", "shellexpand", + "strum", "tempfile", + "thiserror", "tokio", "toml 0.8.19", "tracing", @@ -8577,8 +8738,8 @@ dependencies = [ name = "reth-node-ethereum" version = "1.0.4" dependencies = [ - "alloy-genesis 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "alloy-primitives 0.8.0", + "alloy-genesis 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-primitives", "eyre", "futures", "futures-util", @@ -8599,6 +8760,7 @@ dependencies = [ "reth-node-builder", "reth-node-core", "reth-payload-builder", + "reth-primitives", "reth-provider", "reth-rpc", "reth-tasks", @@ -8612,6 +8774,7 @@ dependencies = [ name = "reth-node-events" version = "1.0.4" dependencies = [ + "alloy-primitives", "alloy-rpc-types-engine", "futures", "humantime", @@ -8643,7 +8806,6 @@ dependencies = [ "procfs", "reqwest 0.12.7", "reth-chainspec", - "reth-db", "reth-db-api", "reth-metrics", "reth-provider", @@ -8651,7 +8813,7 @@ dependencies = [ "socket2 0.4.10", "tikv-jemalloc-ctl", "tokio", - "tower", + "tower 0.4.13", "tracing", "vergen", ] @@ -8660,8 +8822,8 @@ dependencies = [ name = "reth-node-optimism" version = "1.0.4" dependencies = [ - "alloy-genesis 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "alloy-primitives 0.8.0", + "alloy-genesis 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-primitives", "async-trait", "clap", "eyre", @@ -8705,13 +8867,22 @@ dependencies = [ "tracing", ] +[[package]] +name = "reth-node-types" +version = "1.0.4" +dependencies = [ + "reth-chainspec", + "reth-db-api", + "reth-engine-primitives", +] + [[package]] name = "reth-optimism-chainspec" version = "1.0.4" dependencies = [ "alloy-chains", - "alloy-genesis 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "alloy-primitives 0.8.0", + "alloy-genesis 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-primitives", "derive_more 1.0.0", "once_cell", "op-alloy-rpc-types", @@ -8725,7 +8896,7 @@ dependencies = [ name = "reth-optimism-cli" version = "1.0.4" dependencies = [ - "alloy-primitives 0.8.0", + "alloy-primitives", "alloy-rlp", "clap", "eyre", @@ -8733,6 +8904,7 @@ dependencies = [ "reth-chainspec", "reth-cli", "reth-cli-commands", + "reth-cli-runner", "reth-config", "reth-consensus", "reth-db", @@ -8743,8 +8915,10 @@ dependencies = [ "reth-evm-optimism", "reth-execution-types", "reth-network-p2p", + "reth-node-builder", "reth-node-core", "reth-node-events", + "reth-node-optimism", "reth-optimism-chainspec", "reth-optimism-primitives", "reth-primitives", @@ -8754,6 +8928,7 @@ dependencies = [ "reth-stages-types", "reth-static-file", "reth-static-file-types", + "reth-tracing", "tempfile", "tokio", "tokio-util", @@ -8764,10 +8939,13 @@ dependencies = [ name = "reth-optimism-consensus" version = "1.0.4" dependencies = [ + "alloy-primitives", "reth-chainspec", "reth-consensus", "reth-consensus-common", + "reth-optimism-chainspec", "reth-primitives", + "reth-trie-common", "tracing", ] @@ -8775,6 +8953,7 @@ dependencies = [ name = "reth-optimism-payload-builder" version = "1.0.4" dependencies = [ + "alloy-primitives", "alloy-rlp", "reth-basic-payload-builder", "reth-chain-state", @@ -8800,14 +8979,21 @@ dependencies = [ [[package]] name = "reth-optimism-primitives" version = "1.0.4" +dependencies = [ + "alloy-primitives", + "reth-primitives", + "reth-primitives-traits", +] [[package]] name = "reth-optimism-rpc" version = "1.0.4" dependencies = [ - "alloy-primitives 0.8.0", + "alloy-primitives", "jsonrpsee-types", + "op-alloy-consensus", "op-alloy-network", + "op-alloy-rpc-types", "parking_lot 0.12.3", "reqwest 0.12.7", "reth-chainspec", @@ -8816,6 +9002,7 @@ dependencies = [ "reth-network-api", "reth-node-api", "reth-node-builder", + "reth-optimism-chainspec", "reth-primitives", "reth-provider", "reth-rpc", @@ -8886,14 +9073,14 @@ dependencies = [ "alloy-chains", "alloy-consensus", "alloy-eips", - "alloy-genesis 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "alloy-primitives 0.8.0", + "alloy-genesis 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-primitives", "alloy-rlp", "alloy-rpc-types", "alloy-serde", "arbitrary", "assert_matches", - "bytes 1.7.1", + "bytes 1.7.2", "c-kzg", "criterion", "derive_more 1.0.0", @@ -8902,6 +9089,7 @@ dependencies = [ "lazy_static", "modular-bitfield", "once_cell", + "op-alloy-consensus", "op-alloy-rpc-types", "pprof", "proptest", @@ -8921,7 +9109,6 @@ dependencies = [ "secp256k1", "serde", "serde_json", - "tempfile", "test-fuzz", "thiserror", "zstd", @@ -8933,13 +9120,13 @@ version = "1.0.4" dependencies = [ "alloy-consensus", "alloy-eips", - "alloy-genesis 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "alloy-primitives 0.8.0", + "alloy-genesis 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-primitives", "alloy-rlp", "alloy-rpc-types-eth", "arbitrary", "byteorder", - "bytes 1.7.1", + "bytes 1.7.2", "derive_more 1.0.0", "modular-bitfield", "proptest", @@ -8957,13 +9144,15 @@ dependencies = [ name = "reth-provider" version = "1.0.4" dependencies = [ + "alloy-primitives", "alloy-rpc-types-engine", "assert_matches", "auto_impl", - "dashmap 6.0.1", + "dashmap 6.1.0", "eyre", "itertools 0.13.0", "metrics", + "notify", "once_cell", "parking_lot 0.12.3", "rand 0.8.5", @@ -8975,12 +9164,14 @@ dependencies = [ "reth-db", "reth-db-api", "reth-errors", + "reth-ethereum-engine-primitives", "reth-evm", "reth-execution-types", "reth-fs-util", "reth-metrics", "reth-network-p2p", "reth-nippy-jar", + "reth-node-types", "reth-primitives", "reth-prune-types", "reth-stages-types", @@ -9000,7 +9191,7 @@ dependencies = [ name = "reth-prune" version = "1.0.4" dependencies = [ - "alloy-primitives 0.8.0", + "alloy-primitives", "assert_matches", "itertools 0.13.0", "metrics", @@ -9029,10 +9220,10 @@ dependencies = [ name = "reth-prune-types" version = "1.0.4" dependencies = [ - "alloy-primitives 0.8.0", + "alloy-primitives", "arbitrary", "assert_matches", - "bytes 1.7.1", + "bytes 1.7.2", "derive_more 1.0.0", "modular-bitfield", "proptest", @@ -9066,9 +9257,10 @@ name = "reth-rpc" version = "1.0.4" dependencies = [ "alloy-dyn-abi", - "alloy-genesis 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-eips", + "alloy-genesis 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "alloy-network", - "alloy-primitives 0.8.0", + "alloy-primitives", "alloy-rlp", "async-trait", "cfg-if", @@ -9115,7 +9307,7 @@ dependencies = [ "thiserror", "tokio", "tokio-stream", - "tower", + "tower 0.4.13", "tracing", "tracing-futures", ] @@ -9124,7 +9316,9 @@ dependencies = [ name = "reth-rpc-api" version = "1.0.4" dependencies = [ + "alloy-eips", "alloy-json-rpc", + "alloy-primitives", "jsonrpsee", "reth-engine-primitives", "reth-network-peers", @@ -9138,6 +9332,7 @@ dependencies = [ name = "reth-rpc-api-testing-util" version = "1.0.4" dependencies = [ + "alloy-primitives", "futures", "jsonrpsee", "jsonrpsee-http-client", @@ -9154,6 +9349,8 @@ dependencies = [ name = "reth-rpc-builder" version = "1.0.4" dependencies = [ + "alloy-network", + "alloy-primitives", "clap", "http 1.1.0", "jsonrpsee", @@ -9191,7 +9388,7 @@ dependencies = [ "serde_json", "thiserror", "tokio", - "tower", + "tower 0.4.13", "tower-http", "tracing", ] @@ -9200,6 +9397,8 @@ dependencies = [ name = "reth-rpc-engine-api" version = "1.0.4" dependencies = [ + "alloy-eips", + "alloy-primitives", "alloy-rlp", "assert_matches", "async-trait", @@ -9223,6 +9422,7 @@ dependencies = [ "reth-tasks", "reth-testing-utils", "reth-tokio-util", + "reth-transaction-pool", "serde", "thiserror", "tokio", @@ -9236,6 +9436,7 @@ dependencies = [ "alloy-dyn-abi", "alloy-json-rpc", "alloy-network", + "alloy-primitives", "async-trait", "auto_impl", "cfg-if", @@ -9270,6 +9471,8 @@ dependencies = [ name = "reth-rpc-eth-types" version = "1.0.4" dependencies = [ + "alloy-consensus", + "alloy-primitives", "alloy-sol-types", "derive_more 1.0.0", "futures", @@ -9315,7 +9518,7 @@ dependencies = [ "pin-project", "reqwest 0.12.7", "tokio", - "tower", + "tower 0.4.13", "tracing", ] @@ -9323,7 +9526,7 @@ dependencies = [ name = "reth-rpc-server-types" version = "1.0.4" dependencies = [ - "alloy-primitives 0.8.0", + "alloy-primitives", "jsonrpsee-core", "jsonrpsee-types", "reth-errors", @@ -9338,11 +9541,12 @@ dependencies = [ name = "reth-rpc-types" version = "1.0.4" dependencies = [ - "alloy-primitives 0.8.0", + "alloy-primitives", "alloy-rpc-types", - "alloy-rpc-types-admin 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-rpc-types-admin 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "alloy-rpc-types-anvil", "alloy-rpc-types-beacon", + "alloy-rpc-types-debug", "alloy-rpc-types-engine", "alloy-rpc-types-mev", "alloy-rpc-types-trace", @@ -9359,6 +9563,7 @@ dependencies = [ name = "reth-rpc-types-compat" version = "1.0.4" dependencies = [ + "alloy-primitives", "alloy-rlp", "alloy-rpc-types", "reth-primitives", @@ -9420,7 +9625,7 @@ dependencies = [ name = "reth-stages-api" version = "1.0.4" dependencies = [ - "alloy-primitives 0.8.0", + "alloy-primitives", "aquamarine", "assert_matches", "auto_impl", @@ -9431,6 +9636,7 @@ dependencies = [ "reth-errors", "reth-metrics", "reth-network-p2p", + "reth-node-types", "reth-primitives-traits", "reth-provider", "reth-prune", @@ -9449,9 +9655,9 @@ dependencies = [ name = "reth-stages-types" version = "1.0.4" dependencies = [ - "alloy-primitives 0.8.0", + "alloy-primitives", "arbitrary", - "bytes 1.7.1", + "bytes 1.7.2", "modular-bitfield", "proptest", "proptest-arbitrary-interop", @@ -9466,13 +9672,15 @@ dependencies = [ name = "reth-static-file" version = "1.0.4" dependencies = [ - "alloy-primitives 0.8.0", + "alloy-primitives", "assert_matches", "parking_lot 0.12.3", "rayon", + "reth-chainspec", "reth-db", "reth-db-api", "reth-nippy-jar", + "reth-node-types", "reth-provider", "reth-prune-types", "reth-stages", @@ -9489,7 +9697,7 @@ dependencies = [ name = "reth-static-file-types" version = "1.0.4" dependencies = [ - "alloy-primitives 0.8.0", + "alloy-primitives", "clap", "derive_more 1.0.0", "serde", @@ -9500,8 +9708,10 @@ dependencies = [ name = "reth-storage-api" version = "1.0.4" dependencies = [ + "alloy-primitives", "auto_impl", "reth-chainspec", + "reth-db-api", "reth-db-models", "reth-execution-types", "reth-primitives", @@ -9515,6 +9725,7 @@ dependencies = [ name = "reth-storage-errors" version = "1.0.4" dependencies = [ + "alloy-primitives", "alloy-rlp", "derive_more 1.0.0", "reth-fs-util", @@ -9543,7 +9754,8 @@ name = "reth-testing-utils" version = "1.0.4" dependencies = [ "alloy-eips", - "alloy-genesis 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-genesis 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-primitives", "rand 0.8.5", "reth-primitives", "secp256k1", @@ -9576,6 +9788,8 @@ dependencies = [ name = "reth-transaction-pool" version = "1.0.4" dependencies = [ + "alloy-eips", + "alloy-primitives", "alloy-rlp", "aquamarine", "assert_matches", @@ -9618,6 +9832,7 @@ dependencies = [ name = "reth-trie" version = "1.0.4" dependencies = [ + "alloy-primitives", "alloy-rlp", "auto_impl", "criterion", @@ -9631,16 +9846,13 @@ dependencies = [ "reth-execution-errors", "reth-metrics", "reth-primitives", - "reth-provider", "reth-stages-types", "reth-storage-errors", "reth-trie-common", "revm", "serde", "serde_json", - "similar-asserts", "tokio", - "tokio-stream", "tracing", "triehash", ] @@ -9650,12 +9862,12 @@ name = "reth-trie-common" version = "1.0.4" dependencies = [ "alloy-consensus", - "alloy-genesis 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "alloy-primitives 0.8.0", + "alloy-genesis 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "alloy-primitives", "alloy-rlp", "alloy-trie", "arbitrary", - "bytes 1.7.1", + "bytes 1.7.2", "derive_more 1.0.0", "hash-db", "itertools 0.13.0", @@ -9667,17 +9879,15 @@ dependencies = [ "reth-primitives-traits", "revm-primitives", "serde", - "test-fuzz", - "toml 0.8.19", ] [[package]] name = "reth-trie-db" version = "1.0.4" dependencies = [ + "alloy-primitives", "alloy-rlp", "auto_impl", - "criterion", "derive_more 1.0.0", "itertools 0.13.0", "metrics", @@ -9689,6 +9899,7 @@ dependencies = [ "reth-db-api", "reth-execution-errors", "reth-metrics", + "reth-node-types", "reth-primitives", "reth-provider", "reth-stages-types", @@ -9709,6 +9920,7 @@ dependencies = [ name = "reth-trie-parallel" version = "1.0.4" dependencies = [ + "alloy-primitives", "alloy-rlp", "criterion", "derive_more 1.0.0", @@ -9759,8 +9971,8 @@ dependencies = [ [[package]] name = "revm" -version = "14.0.0" -source = "git+https://github.com/bnb-chain/revm?rev=b8318b3db1c3499c580429dca5961bfa8693c690#b8318b3db1c3499c580429dca5961bfa8693c690" +version = "14.0.2" +source = "git+https://github.com/bnb-chain/revm?rev=fbc92f58052227c06f8a2a4e25227d111ed8e08e#fbc92f58052227c06f8a2a4e25227d111ed8e08e" dependencies = [ "auto_impl", "cfg-if", @@ -9773,12 +9985,13 @@ dependencies = [ [[package]] name = "revm-inspectors" -version = "0.6.0" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48184032103bb23788e42e42c7c85207f5b0b8a248b09ea8f5233077f35ab56e" +checksum = "cd8e3bae0d5c824da0ac883e2521c5e83870d6521eeeccd4ee54266aa3cc1a51" dependencies = [ - "alloy-primitives 0.8.0", - "alloy-rpc-types", + "alloy-primitives", + "alloy-rpc-types-eth", + "alloy-rpc-types-trace", "alloy-sol-types", "anstyle", "boa_engine", @@ -9791,8 +10004,8 @@ dependencies = [ [[package]] name = "revm-interpreter" -version = "10.0.0" -source = "git+https://github.com/bnb-chain/revm?rev=b8318b3db1c3499c580429dca5961bfa8693c690#b8318b3db1c3499c580429dca5961bfa8693c690" +version = "10.0.2" +source = "git+https://github.com/bnb-chain/revm?rev=fbc92f58052227c06f8a2a4e25227d111ed8e08e#fbc92f58052227c06f8a2a4e25227d111ed8e08e" dependencies = [ "revm-primitives", "serde", @@ -9800,8 +10013,8 @@ dependencies = [ [[package]] name = "revm-precompile" -version = "11.0.0" -source = "git+https://github.com/bnb-chain/revm?rev=b8318b3db1c3499c580429dca5961bfa8693c690#b8318b3db1c3499c580429dca5961bfa8693c690" +version = "11.0.2" +source = "git+https://github.com/bnb-chain/revm?rev=fbc92f58052227c06f8a2a4e25227d111ed8e08e#fbc92f58052227c06f8a2a4e25227d111ed8e08e" dependencies = [ "alloy-rlp", "aurora-engine-modexp", @@ -9828,11 +10041,11 @@ dependencies = [ [[package]] name = "revm-primitives" -version = "9.0.0" -source = "git+https://github.com/bnb-chain/revm?rev=b8318b3db1c3499c580429dca5961bfa8693c690#b8318b3db1c3499c580429dca5961bfa8693c690" +version = "9.0.2" +source = "git+https://github.com/bnb-chain/revm?rev=fbc92f58052227c06f8a2a4e25227d111ed8e08e#fbc92f58052227c06f8a2a4e25227d111ed8e08e" dependencies = [ "alloy-eips", - "alloy-primitives 0.8.0", + "alloy-primitives", "auto_impl", "bitflags 2.6.0", "bitvec", @@ -9857,9 +10070,9 @@ dependencies = [ [[package]] name = "rgb" -version = "0.8.48" +version = "0.8.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f86ae463694029097b846d8f99fd5536740602ae00022c0c50c5600720b2f71" +checksum = "57397d16646700483b67d2dd6511d79318f9d057fdbd21a4066aeac8b41d310a" dependencies = [ "bytemuck", ] @@ -9918,9 +10131,9 @@ dependencies = [ [[package]] name = "rlimit" -version = "0.10.1" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3560f70f30a0f16d11d01ed078a07740fe6b489667abc7c7b029155d9f21c3d8" +checksum = "7043b63bd0cd1aaa628e476b80e6d4023a3b50eb32789f2728908107bd0c793a" dependencies = [ "libc", ] @@ -9931,7 +10144,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bb919243f34364b6bd2fc10ef797edbfa75f33c252e7998527479c6d6b47e1ec" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "rustc-hex", ] @@ -9970,7 +10183,7 @@ dependencies = [ "arbitrary", "ark-ff 0.3.0", "ark-ff 0.4.2", - "bytes 1.7.1", + "bytes 1.7.2", "fastrlp", "num-bigint", "num-traits", @@ -10008,6 +10221,9 @@ name = "rustc-hash" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152" +dependencies = [ + "rand 0.8.5", +] [[package]] name = "rustc-hex" @@ -10026,18 +10242,18 @@ dependencies = [ [[package]] name = "rustc_version" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ "semver 1.0.23", ] [[package]] name = "rustix" -version = "0.38.35" +version = "0.38.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a85d50532239da68e9addb745ba38ff4612a242c1c7ceea689c4bc7c2f43c36f" +checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" dependencies = [ "bitflags 2.6.0", "errno", @@ -10060,15 +10276,15 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.12" +version = "0.23.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c58f8c84392efc0a126acce10fa59ff7b3d2ac06ab451a33f2741989b806b044" +checksum = "f2dabaac7466917e566adb06783a81ca48944c6898a1b08b9374106dd671f4c8" dependencies = [ "log", "once_cell", "ring", "rustls-pki-types", - "rustls-webpki 0.102.7", + "rustls-webpki 0.102.8", "subtle", "zeroize", ] @@ -10087,9 +10303,22 @@ dependencies = [ [[package]] name = "rustls-native-certs" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04182dffc9091a404e0fc069ea5cd60e5b866c3adf881eff99a32d048242dffa" +checksum = "e5bfb394eeed242e909609f56089eecfe5fda225042e8b171791b9c95f5931e5" +dependencies = [ + "openssl-probe", + "rustls-pemfile 2.1.3", + "rustls-pki-types", + "schannel", + "security-framework", +] + +[[package]] +name = "rustls-native-certs" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcaf18a4f2be7326cd874a5fa579fae794320a0f388d365dca7e480e55f83f8a" dependencies = [ "openssl-probe", "rustls-pemfile 2.1.3", @@ -10134,10 +10363,10 @@ dependencies = [ "jni", "log", "once_cell", - "rustls 0.23.12", - "rustls-native-certs 0.7.2", + "rustls 0.23.13", + "rustls-native-certs 0.7.3", "rustls-platform-verifier-android", - "rustls-webpki 0.102.7", + "rustls-webpki 0.102.8", "security-framework", "security-framework-sys", "webpki-roots", @@ -10162,9 +10391,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.102.7" +version = "0.102.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84678086bd54edf2b415183ed7a94d0efb049f1b646a33e22a36f3794be6ae56" +checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" dependencies = [ "ring", "rustls-pki-types", @@ -10212,20 +10441,20 @@ dependencies = [ [[package]] name = "scc" -version = "2.1.16" +version = "2.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aeb7ac86243095b70a7920639507b71d51a63390d1ba26c4f60a552fbb914a37" +checksum = "0c947adb109a8afce5fc9c7bf951f87f146e9147b3a6a58413105628fb1d1e66" dependencies = [ "sdd", ] [[package]] name = "schannel" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" +checksum = "e9aaafd5a2b6e3d657ff009d82fbd630b6bd54dd4eb06f21693925cdf80f9b8b" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -10239,6 +10468,12 @@ dependencies = [ "hashbrown 0.13.2", ] +[[package]] +name = "scoped-tls" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" + [[package]] name = "scopeguard" version = "1.2.0" @@ -10257,9 +10492,9 @@ dependencies = [ [[package]] name = "sdd" -version = "3.0.2" +version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0495e4577c672de8254beb68d01a9b62d0e8a13c099edecdbedccce3223cd29f" +checksum = "60a7b59a5d9b0099720b417b6325d91a52cbf5b3dcb5041d864be53eefa58abc" [[package]] name = "sec1" @@ -10277,9 +10512,9 @@ dependencies = [ [[package]] name = "secp256k1" -version = "0.29.0" +version = "0.29.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e0cc0f1cf93f4969faf3ea1c7d8a9faed25918d96affa959720823dfe86d4f3" +checksum = "9465315bc9d4566e1724f0fffcbcc446268cb522e60f9a27bcded6b19c108113" dependencies = [ "rand 0.8.5", "secp256k1-sys", @@ -10288,9 +10523,9 @@ dependencies = [ [[package]] name = "secp256k1-sys" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1433bd67156263443f14d603720b082dd3121779323fce20cba2aa07b874bc1b" +checksum = "d4387882333d3aa8cb20530a17c69a3752e97837832f34f6dccc760e715001d9" dependencies = [ "cc", ] @@ -10311,9 +10546,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.11.1" +version = "2.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75da29fe9b9b08fe9d6b22b5b4bcbc75d8db3aa31e639aa56bb62e9d46bfceaf" +checksum = "ea4a292869320c0272d7bc55a5a6aafaff59b4f63404a003887b679a2e05b4b6" dependencies = [ "core-foundation-sys", "libc", @@ -10360,9 +10595,9 @@ checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" [[package]] name = "serde" -version = "1.0.209" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99fce0ffe7310761ca6bf9faf5115afbc19688edd00171d81b1bb1b116c63e09" +checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" dependencies = [ "serde_derive", ] @@ -10388,22 +10623,22 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.209" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5831b979fd7b5439637af1752d535ff49f4860c0f341d1baeb6faf0f4242170" +checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] name = "serde_json" -version = "1.0.127" +version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8043c06d9f82bd7271361ed64f415fe5e12a77fdb52e573e7f06a516dea329ad" +checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" dependencies = [ - "indexmap 2.4.0", + "indexmap 2.5.0", "itoa", "memchr", "ryu", @@ -10429,14 +10664,14 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] name = "serde_spanned" -version = "0.6.7" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb5b1b31579f3811bf615c144393417496f152e12ac8b7663bf664f4a815306d" +checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" dependencies = [ "serde", ] @@ -10463,7 +10698,7 @@ dependencies = [ "chrono", "hex 0.4.3", "indexmap 1.9.3", - "indexmap 2.4.0", + "indexmap 2.5.0", "serde", "serde_derive", "serde_json", @@ -10480,7 +10715,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -10505,7 +10740,7 @@ checksum = "82fe9db325bcef1fbcde82e078a5cc4efdf787e96b3b9cf45b50b529f2083d67" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -10580,9 +10815,9 @@ dependencies = [ [[package]] name = "sha3-asm" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57d79b758b7cb2085612b11a235055e485605a5103faccdd633f35bd7aee69dd" +checksum = "c28efc5e327c837aa837c59eae585fc250715ef939ac32881bcc11677cd02d46" dependencies = [ "cc", "cfg-if", @@ -10670,9 +10905,9 @@ dependencies = [ [[package]] name = "similar-asserts" -version = "1.5.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e041bb827d1bfca18f213411d51b665309f1afb37a04a5d1464530e13779fc0f" +checksum = "cfe85670573cd6f0fa97940f26e7e6601213c3b0555246c24234131f88c5709e" dependencies = [ "console", "similar", @@ -10754,7 +10989,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37468c595637c10857701c990f93a40ce0e357cedb0953d1c26c8d8027f9bb53" dependencies = [ "base64 0.22.1", - "bytes 1.7.1", + "bytes 1.7.2", "futures", "http 1.1.0", "httparse", @@ -10768,6 +11003,9 @@ name = "spin" version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +dependencies = [ + "lock_api", +] [[package]] name = "spki" @@ -10792,7 +11030,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d904e7009df136af5297832a3ace3370cd14ff1546a232f4f185036c2736fcac" dependencies = [ "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -10834,11 +11072,11 @@ version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" dependencies = [ - "heck 0.5.0", + "heck", "proc-macro2", "quote", "rustversion", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -10875,21 +11113,11 @@ version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "734676eb262c623cec13c3155096e08d1f8f29adce39ba17948b18dad1e54142" -[[package]] -name = "sucds" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d53d46182afe6ed822a94c54a532dc0d59691a8f49226bdc4596529ca864cdd6" -dependencies = [ - "anyhow", - "num-traits", -] - [[package]] name = "symbolic-common" -version = "12.10.0" +version = "12.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16629323a4ec5268ad23a575110a724ad4544aae623451de600c747bf87b36cf" +checksum = "9fdf97c441f18a4f92425b896a4ec7a27e03631a0b1047ec4e34e9916a9a167e" dependencies = [ "debugid", "memmap2", @@ -10899,9 +11127,9 @@ dependencies = [ [[package]] name = "symbolic-demangle" -version = "12.10.0" +version = "12.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48c043a45f08f41187414592b3ceb53fb0687da57209cc77401767fb69d5b596" +checksum = "bc8ece6b129e97e53d1fbb3f61d33a6a9e5369b11d01228c068094d6d134eaea" dependencies = [ "cpp_demangle", "rustc-demangle", @@ -10921,9 +11149,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.76" +version = "2.0.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "578e081a14e0cefc3279b0472138c513f37b41a08d5a3cca9b6e4e8ceb6cd525" +checksum = "9f35bcdf61fd8e7be6caf75f429fdca8beb3ed76584befb503b1569faee373ed" dependencies = [ "proc-macro2", "quote", @@ -10932,14 +11160,14 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "0.8.0" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "284c41c2919303438fcf8dede4036fd1e82d4fc0fbb2b279bd2a1442c909ca92" +checksum = "0ab661c8148c2261222a4d641ad5477fd4bea79406a99056096a0b41b35617a5" dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -10977,7 +11205,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -11143,7 +11371,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -11167,22 +11395,22 @@ checksum = "a38c90d48152c236a3ab59271da4f4ae63d678c5d7ad6b7714d7cb9760be5e4b" [[package]] name = "thiserror" -version = "1.0.63" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" +checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.63" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" +checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -11226,9 +11454,9 @@ dependencies = [ [[package]] name = "tikv-jemalloc-ctl" -version = "0.5.4" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "619bfed27d807b54f7f776b9430d4f8060e66ee138a28632ca898584d462c31c" +checksum = "f21f216790c8df74ce3ab25b534e0718da5a1916719771d3fec23315c99e468b" dependencies = [ "libc", "paste", @@ -11237,9 +11465,9 @@ dependencies = [ [[package]] name = "tikv-jemalloc-sys" -version = "0.5.4+5.3.0-patched" +version = "0.6.0+5.3.0-1-ge13ca993e8ccb9ba9847cc330696e02839f328f7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9402443cb8fd499b6f327e40565234ff34dbda27460c5b47db0db77443dd85d1" +checksum = "cd3c60906412afa9c2b5b5a48ca6a5abe5736aec9eb48ad05037a677e52e4e2d" dependencies = [ "cc", "libc", @@ -11247,9 +11475,9 @@ dependencies = [ [[package]] name = "tikv-jemallocator" -version = "0.5.4" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "965fe0c26be5c56c94e38ba547249074803efd52adfb66de62107d95aab3eaca" +checksum = "4cec5ff18518d81584f477e9bfdf957f5bb0979b0bac3af4ca30b5b3ae2d2865" dependencies = [ "libc", "tikv-jemalloc-sys", @@ -11335,12 +11563,12 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.39.3" +version = "1.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9babc99b9923bfa4804bd74722ff02c0381021eafa4db9949217e3be8e84fff5" +checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998" dependencies = [ "backtrace", - "bytes 1.7.1", + "bytes 1.7.2", "libc", "mio 1.0.2", "parking_lot 0.12.3", @@ -11359,7 +11587,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -11378,16 +11606,16 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.12", + "rustls 0.23.13", "rustls-pki-types", "tokio", ] [[package]] name = "tokio-stream" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af" +checksum = "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1" dependencies = [ "futures-core", "pin-project-lite", @@ -11403,7 +11631,7 @@ checksum = "c6989540ced10490aaf14e6bad2e3d33728a2813310a0c71d1574304c49631cd" dependencies = [ "futures-util", "log", - "rustls 0.23.12", + "rustls 0.23.13", "rustls-pki-types", "tokio", "tokio-rustls 0.26.0", @@ -11413,11 +11641,11 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.11" +version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" +checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "futures-core", "futures-io", "futures-sink", @@ -11458,11 +11686,11 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.20" +version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "583c44c02ad26b0c3f3066fe629275e50627026c51ac2e595cca4c230ce1ce1d" +checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ - "indexmap 2.4.0", + "indexmap 2.5.0", "serde", "serde_spanned", "toml_datetime", @@ -11490,6 +11718,20 @@ dependencies = [ "tracing", ] +[[package]] +name = "tower" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2873938d487c3cfb9aed7546dc9f2711d867c9f90c46b889989a2cb84eba6b4f" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper 0.1.2", + "tower-layer", + "tower-service", +] + [[package]] name = "tower-http" version = "0.5.2" @@ -11499,7 +11741,7 @@ dependencies = [ "async-compression", "base64 0.21.7", "bitflags 2.6.0", - "bytes 1.7.1", + "bytes 1.7.2", "futures-core", "futures-util", "http 1.1.0", @@ -11514,7 +11756,7 @@ dependencies = [ "pin-project-lite", "tokio", "tokio-util", - "tower", + "tower 0.4.13", "tower-layer", "tower-service", "tracing", @@ -11565,7 +11807,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -11653,6 +11895,27 @@ dependencies = [ "tracing-serde", ] +[[package]] +name = "tracy-client" +version = "0.17.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "746b078c6a09ebfd5594609049e07116735c304671eaab06ce749854d23435bc" +dependencies = [ + "loom", + "once_cell", + "rustc-demangle", + "tracy-client-sys", +] + +[[package]] +name = "tracy-client-sys" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68613466112302fdbeabc5fa55f7d57462a0b247d5a6b7d7e09401fb471a144d" +dependencies = [ + "cc", +] + [[package]] name = "triehash" version = "0.8.4" @@ -11736,13 +11999,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e2e2ce1e47ed2994fd43b04c8f618008d4cabdd5ee34027cf14f9d918edd9c8" dependencies = [ "byteorder", - "bytes 1.7.1", + "bytes 1.7.2", "data-encoding", "http 1.1.0", "httparse", "log", "rand 0.8.5", - "rustls 0.23.12", + "rustls 0.23.13", "rustls-pki-types", "sha1", "thiserror", @@ -11796,24 +12059,24 @@ checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" [[package]] name = "unicode-ident" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" +checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" [[package]] name = "unicode-normalization" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" +checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" dependencies = [ "tinyvec", ] [[package]] name = "unicode-segmentation" -version = "1.11.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202" +checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" [[package]] name = "unicode-truncate" @@ -11828,15 +12091,15 @@ dependencies = [ [[package]] name = "unicode-width" -version = "0.1.13" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0336d538f7abc86d282a4189614dfaa90810dfc2c6f6427eaf88e16311dd225d" +checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" [[package]] name = "unicode-xid" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "229730647fbc343e3a80e463c1db7f78f3855d3f3739bee0dda773c9a037c90a" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" [[package]] name = "universal-hash" @@ -11854,6 +12117,12 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6889a77d49f1f013504cec6bf97a2c730394adedaeb1deb5ea08949a50541105" +[[package]] +name = "unsigned-varint" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb066959b24b5196ae73cb057f45598450d2c5f71460e98c49b738086eff9c06" + [[package]] name = "untrusted" version = "0.9.0" @@ -11999,7 +12268,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", "wasm-bindgen-shared", ] @@ -12033,7 +12302,7 @@ checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -12069,9 +12338,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.26.3" +version = "0.26.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd7c23921eeb1713a4e851530e9b9756e4fb0e89978582942612524cf09f01cd" +checksum = "841c67bff177718f1d4dfefde8d8f0e78f9b6589319ba88312f567fc5841a958" dependencies = [ "rustls-pki-types", ] @@ -12133,6 +12402,16 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd04d41d93c4992d421894c18c8b43496aa748dd4c081bac0dc93eb0489272b6" +dependencies = [ + "windows-core 0.58.0", + "windows-targets 0.52.6", +] + [[package]] name = "windows-core" version = "0.52.0" @@ -12148,12 +12427,25 @@ version = "0.57.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2ed2439a290666cd67ecce2b0ffaad89c2a56b976b736e6ece670297897832d" dependencies = [ - "windows-implement", - "windows-interface", + "windows-implement 0.57.0", + "windows-interface 0.57.0", "windows-result 0.1.2", "windows-targets 0.52.6", ] +[[package]] +name = "windows-core" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ba6d44ec8c2591c134257ce647b7ea6b20335bf6379a27dac5f1641fcf59f99" +dependencies = [ + "windows-implement 0.58.0", + "windows-interface 0.58.0", + "windows-result 0.2.0", + "windows-strings", + "windows-targets 0.52.6", +] + [[package]] name = "windows-implement" version = "0.57.0" @@ -12162,7 +12454,18 @@ checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", +] + +[[package]] +name = "windows-implement" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.77", ] [[package]] @@ -12173,7 +12476,18 @@ checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", +] + +[[package]] +name = "windows-interface" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.77", ] [[package]] @@ -12365,9 +12679,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.6.18" +version = "0.6.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68a9bda4691f099d435ad181000724da8e5899daa10713c2d432552b9ccd3a6f" +checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" dependencies = [ "memchr", ] @@ -12405,7 +12719,7 @@ dependencies = [ "js-sys", "log", "pharos", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "send_wrapper 0.6.0", "thiserror", "wasm-bindgen", @@ -12413,15 +12727,6 @@ dependencies = [ "web-sys", ] -[[package]] -name = "wyhash" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf6e163c25e3fac820b4b453185ea2dea3b6a3e0a721d4d23d75bd33734c295" -dependencies = [ - "rand_core 0.6.4", -] - [[package]] name = "wyz" version = "0.5.1" @@ -12431,6 +12736,12 @@ dependencies = [ "tap", ] +[[package]] +name = "yansi" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" + [[package]] name = "yoke" version = "0.7.4" @@ -12451,7 +12762,7 @@ checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", "synstructure 0.13.1", ] @@ -12473,7 +12784,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -12493,7 +12804,7 @@ checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", "synstructure 0.13.1", ] @@ -12514,7 +12825,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -12536,7 +12847,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index d72a1be5d1..e8dc860e13 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,7 +1,7 @@ [workspace.package] version = "1.0.4" edition = "2021" -rust-version = "1.80" +rust-version = "1.81" license = "MIT OR Apache-2.0" homepage = "https://paradigmxyz.github.io/reth" repository = "https://github.com/paradigmxyz/reth" @@ -31,13 +31,13 @@ members = [ "crates/consensus/consensus/", "crates/consensus/debug-client/", "crates/e2e-test-utils/", + "crates/engine/invalid-block-hooks/", "crates/engine/primitives/", "crates/engine/service", "crates/engine/tree/", "crates/engine/util/", "crates/errors/", "crates/ethereum-forks/", - "crates/ethereum-forks/", "crates/ethereum/cli/", "crates/ethereum/consensus/", "crates/ethereum/engine-primitives/", @@ -72,6 +72,8 @@ members = [ "crates/node/core/", "crates/node/events/", "crates/node/metrics", + "crates/node/types", + "crates/optimism/bin", "crates/optimism/chainspec", "crates/optimism/cli", "crates/optimism/consensus", @@ -173,9 +175,11 @@ rustdoc.all = "warn" # suggestions which we fixed. The others didn't have any findings, so we can # assume they don't have that many false positives. Let's enable them to # prevent future problems. +borrow_as_ptr = "warn" branches_sharing_code = "warn" clear_with_drain = "warn" cloned_instead_of_copied = "warn" +collection_is_never_read = "warn" derive_partial_eq_without_eq = "warn" doc_markdown = "warn" empty_line_after_doc_comments = "warn" @@ -214,6 +218,7 @@ redundant_clone = "warn" redundant_else = "warn" single_char_pattern = "warn" string_lit_as_bytes = "warn" +string_lit_chars_any = "warn" suboptimal_flops = "warn" suspicious_operation_groupings = "warn" trailing_empty_array = "warn" @@ -223,12 +228,14 @@ trivial_regex = "warn" tuple_array_conversions = "warn" type_repetition_in_bounds = "warn" uninhabited_references = "warn" +unnecessary_self_imports = "warn" unnecessary_struct_initialization = "warn" unnested_or_patterns = "warn" unused_peekable = "warn" unused_rounding = "warn" use_self = "warn" useless_let_if_seq = "warn" +while_float = "warn" zero_sized_map_values = "warn" # These are nursery lints which have findings. Allow them for now. Some are not @@ -236,16 +243,15 @@ zero_sized_map_values = "warn" # Explicitly listing should make it easier to fix in the future. as_ptr_cast_mut = "allow" cognitive_complexity = "allow" -collection_is_never_read = "allow" debug_assert_with_mut_call = "allow" fallible_impl_from = "allow" future_not_send = "allow" needless_collect = "allow" non_send_fields_in_send_ty = "allow" redundant_pub_crate = "allow" -too_long_first_doc_paragraph = "allow" significant_drop_in_scrutinee = "allow" significant_drop_tightening = "allow" +too_long_first_doc_paragraph = "allow" # Speed up tests. [profile.dev.package] @@ -257,22 +263,22 @@ unarray.opt-level = 3 # Meant for testing - all optimizations, but with debug assertions and overflow checks. [profile.hivetests] inherits = "test" -opt-level = 3 lto = "thin" +opt-level = 3 [profile.release] -opt-level = 3 -lto = "thin" +codegen-units = 16 debug = "line-tables-only" -strip = true +lto = "thin" +opt-level = 3 panic = "unwind" -codegen-units = 16 +strip = true # Use the `--profile profiling` flag to show symbols in release mode. # e.g. `cargo build --profile profiling` [profile.profiling] -inherits = "release" debug = 2 +inherits = "release" strip = false # Make sure debug symbols are in the bench profile @@ -280,12 +286,13 @@ strip = false inherits = "profiling" [profile.maxperf] +codegen-units = 1 inherits = "release" lto = "fat" -codegen-units = 1 [workspace.dependencies] # reth +op-reth = { path = "crates/optimism/bin" } reth = { path = "bin/reth" } reth-auto-seal-consensus = { path = "crates/consensus/auto-seal" } reth-basic-payload-builder = { path = "crates/payload/basic" } @@ -341,6 +348,7 @@ reth-exex = { path = "crates/exex/exex" } reth-exex-test-utils = { path = "crates/exex/test-utils" } reth-exex-types = { path = "crates/exex/types" } reth-fs-util = { path = "crates/fs-util" } +reth-invalid-block-hooks = { path = "crates/engine/invalid-block-hooks" } reth-ipc = { path = "crates/rpc/ipc" } reth-libmdbx = { path = "crates/storage/libmdbx-rs" } reth-mdbx-sys = { path = "crates/storage/libmdbx-rs/mdbx-sys" } @@ -362,6 +370,7 @@ reth-node-ethereum = { path = "crates/ethereum/node" } reth-node-events = { path = "crates/node/events" } reth-node-metrics = { path = "crates/node/metrics" } reth-node-optimism = { path = "crates/optimism/node" } +reth-node-types = { path = "crates/node/types" } reth-optimism-chainspec = { path = "crates/optimism/chainspec" } reth-optimism-cli = { path = "crates/optimism/cli" } reth-optimism-consensus = { path = "crates/optimism/consensus" } @@ -410,18 +419,18 @@ reth-trie-parallel = { path = "crates/trie/parallel" } reth-trie-prefetch = { path = "crates/trie/prefetch" } # revm -revm = { version = "14.0.0", features = [ +revm = { version = "14.0.1", features = [ "std", "secp256k1", "blst", ], default-features = false } -revm-inspectors = "0.6" -revm-primitives = { version = "9.0.0", features = [ +revm-inspectors = "0.7" +revm-primitives = { version = "9.0.2", features = [ "std", ], default-features = false } # eth -alloy-chains = "=0.1.18" +alloy-chains = "0.1.32" alloy-dyn-abi = "0.8.0" alloy-json-abi = "0.8.0" alloy-primitives = { version = "0.8.0", default-features = false } @@ -429,42 +438,45 @@ alloy-rlp = "0.3.4" alloy-sol-types = "0.8.0" alloy-trie = { version = "0.5", default-features = false } -alloy-consensus = { version = "0.3.0", default-features = false } -alloy-eips = { version = "0.3.0", default-features = false } -alloy-genesis = { version = "0.3.0", default-features = false } -alloy-json-rpc = { version = "0.3.0", default-features = false } -alloy-network = { version = "0.3.0", default-features = false } -alloy-node-bindings = { version = "0.3.0", default-features = false } -alloy-provider = { version = "0.3.0", features = [ +alloy-consensus = { version = "0.3.6", default-features = false } +alloy-eips = { version = "0.3.6", default-features = false } +alloy-genesis = { version = "0.3.6", default-features = false } +alloy-json-rpc = { version = "0.3.6", default-features = false } +alloy-network = { version = "0.3.6", default-features = false } +alloy-network-primitives = { version = "0.3.6", default-features = false } +alloy-node-bindings = { version = "0.3.6", default-features = false } +alloy-provider = { version = "0.3.6", features = [ "reqwest", ], default-features = false } -alloy-pubsub = { version = "0.3.0", default-features = false } -alloy-rpc-client = { version = "0.3.0", default-features = false } -alloy-rpc-types = { version = "0.3.0", features = [ +alloy-pubsub = { version = "0.3.6", default-features = false } +alloy-rpc-client = { version = "0.3.6", default-features = false } +alloy-rpc-types = { version = "0.3.6", features = [ "eth", ], default-features = false } -alloy-rpc-types-admin = { version = "0.3.0", default-features = false } -alloy-rpc-types-anvil = { version = "0.3.0", default-features = false } -alloy-rpc-types-beacon = { version = "0.3.0", default-features = false } -alloy-rpc-types-engine = { version = "0.3.0", default-features = false } -alloy-rpc-types-eth = { version = "0.3.0", default-features = false } -alloy-rpc-types-mev = { version = "0.3.0", default-features = false } -alloy-rpc-types-trace = { version = "0.3.0", default-features = false } -alloy-rpc-types-txpool = { version = "0.3.0", default-features = false } -alloy-serde = { version = "0.3.0", default-features = false } -alloy-signer = { version = "0.3.0", default-features = false } -alloy-signer-local = { version = "0.3.0", default-features = false } -alloy-transport = { version = "0.3.0" } -alloy-transport-http = { version = "0.3.0", features = [ +alloy-rpc-types-admin = { version = "0.3.6", default-features = false } +alloy-rpc-types-anvil = { version = "0.3.6", default-features = false } +alloy-rpc-types-beacon = { version = "0.3.6", default-features = false } +alloy-rpc-types-debug = { version = "0.3.6", default-features = false } +alloy-rpc-types-engine = { version = "0.3.6", default-features = false } +alloy-rpc-types-eth = { version = "0.3.6", default-features = false } +alloy-rpc-types-mev = { version = "0.3.6", default-features = false } +alloy-rpc-types-trace = { version = "0.3.6", default-features = false } +alloy-rpc-types-txpool = { version = "0.3.6", default-features = false } +alloy-serde = { version = "0.3.6", default-features = false } +alloy-signer = { version = "0.3.6", default-features = false } +alloy-signer-local = { version = "0.3.6", default-features = false } +alloy-transport = { version = "0.3.6" } +alloy-transport-http = { version = "0.3.6", features = [ "reqwest-rustls-tls", ], default-features = false } -alloy-transport-ipc = { version = "0.3.0", default-features = false } -alloy-transport-ws = { version = "0.3.0", default-features = false } +alloy-transport-ipc = { version = "0.3.6", default-features = false } +alloy-transport-ws = { version = "0.3.6", default-features = false } # op -op-alloy-rpc-types = "0.2" -op-alloy-rpc-types-engine = "0.2" -op-alloy-network = "0.2" +op-alloy-rpc-types = "0.2.12" +op-alloy-rpc-types-engine = "0.2.12" +op-alloy-network = "0.2.12" +op-alloy-consensus = "0.2.12" # misc aquamarine = "0.5" @@ -474,6 +486,7 @@ bitflags = "2.4" boyer-moore-magiclen = "0.2.16" bytes = "1.5" clap = "4" +cfg-if = "1.0" const_format = { version = "0.2.32", features = ["rust_1_64"] } dashmap = "6.0" derive_more = { version = "1", features = ["full"] } @@ -486,6 +499,9 @@ humantime-serde = "1.1" itertools = "0.13" linked_hash_set = "0.1" modular-bitfield = "0.11.2" +notify = { version = "6.1.1", default-features = false, features = [ + "macos_fsevent", +] } nybbles = "0.2.1" once_cell = "1.19" parking_lot = "0.12" @@ -539,6 +555,7 @@ tower-http = "0.5" # p2p discv5 = "0.7.0" +if-addrs = "0.13" # rpc jsonrpsee = "0.24" @@ -578,23 +595,27 @@ serial_test = "3" similar-asserts = "1.5.0" tempfile = "3.8" test-fuzz = "5" +tikv-jemalloc-ctl = "0.6" +tikv-jemallocator = "0.6" +tracy-client = "0.17.3" [patch.crates-io] -revm = { git = "https://github.com/bnb-chain/revm", rev = "b8318b3db1c3499c580429dca5961bfa8693c690" } -revm-interpreter = { git = "https://github.com/bnb-chain/revm", rev = "b8318b3db1c3499c580429dca5961bfa8693c690" } -revm-precompile = { git = "https://github.com/bnb-chain/revm", rev = "b8318b3db1c3499c580429dca5961bfa8693c690" } -revm-primitives = { git = "https://github.com/bnb-chain/revm", rev = "b8318b3db1c3499c580429dca5961bfa8693c690" } -alloy-chains = { git = "https://github.com/bnb-chain/alloy-chains-rs.git", tag = "v1.0.0" } -alloy-rpc-types-eth = { git = "https://github.com/bnb-chain/alloy", rev = "133e5b3dd8af4bc1ceb9018be42c156727eef619" } -alloy-consensus = { git = "https://github.com/bnb-chain/alloy", rev = "133e5b3dd8af4bc1ceb9018be42c156727eef619" } -alloy-eips = { git = "https://github.com/bnb-chain/alloy", rev = "133e5b3dd8af4bc1ceb9018be42c156727eef619" } -alloy-network = { git = "https://github.com/bnb-chain/alloy", rev = "133e5b3dd8af4bc1ceb9018be42c156727eef619" } -alloy-serde = { git = "https://github.com/bnb-chain/alloy", rev = "133e5b3dd8af4bc1ceb9018be42c156727eef619" } -alloy-signer = { git = "https://github.com/bnb-chain/alloy", rev = "133e5b3dd8af4bc1ceb9018be42c156727eef619" } -alloy-signer-local = { git = "https://github.com/bnb-chain/alloy", rev = "133e5b3dd8af4bc1ceb9018be42c156727eef619" } -alloy-provider = { git = "https://github.com/bnb-chain/alloy", rev = "133e5b3dd8af4bc1ceb9018be42c156727eef619" } -alloy-transport = { git = "https://github.com/bnb-chain/alloy", rev = "133e5b3dd8af4bc1ceb9018be42c156727eef619" } -alloy-transport-http = { git = "https://github.com/bnb-chain/alloy", rev = "133e5b3dd8af4bc1ceb9018be42c156727eef619" } -alloy-json-rpc = { git = "https://github.com/bnb-chain/alloy", rev = "133e5b3dd8af4bc1ceb9018be42c156727eef619" } -alloy-rpc-client = { git = "https://github.com/bnb-chain/alloy", rev = "133e5b3dd8af4bc1ceb9018be42c156727eef619" } -alloy-rpc-types-engine = { git = "https://github.com/bnb-chain/alloy", rev = "133e5b3dd8af4bc1ceb9018be42c156727eef619" } +revm = { git = "https://github.com/bnb-chain/revm", rev = "fbc92f58052227c06f8a2a4e25227d111ed8e08e" } +revm-interpreter = { git = "https://github.com/bnb-chain/revm", rev = "fbc92f58052227c06f8a2a4e25227d111ed8e08e" } +revm-primitives = { git = "https://github.com/bnb-chain/revm", rev = "fbc92f58052227c06f8a2a4e25227d111ed8e08e" } +alloy-chains = { git = "https://github.com/bnb-chain/alloy-chains-rs.git", rev = "6be74c75424a31a0d98a906084c778a9d74769fc" } +alloy-rpc-types-eth = { git = "https://github.com/bnb-chain/alloy", rev = "718aee579dc000019582245226eebf8b40d24c41" } +alloy-consensus = { git = "https://github.com/bnb-chain/alloy", rev = "718aee579dc000019582245226eebf8b40d24c41" } +alloy-eips = { git = "https://github.com/bnb-chain/alloy", rev = "718aee579dc000019582245226eebf8b40d24c41" } +alloy-network = { git = "https://github.com/bnb-chain/alloy", rev = "718aee579dc000019582245226eebf8b40d24c41" } +alloy-network-primitives = { git = "https://github.com/bnb-chain/alloy", rev = "718aee579dc000019582245226eebf8b40d24c41" } +alloy-serde = { git = "https://github.com/bnb-chain/alloy", rev = "718aee579dc000019582245226eebf8b40d24c41" } +alloy-signer = { git = "https://github.com/bnb-chain/alloy", rev = "718aee579dc000019582245226eebf8b40d24c41" } +alloy-signer-local = { git = "https://github.com/bnb-chain/alloy", rev = "718aee579dc000019582245226eebf8b40d24c41" } +alloy-provider = { git = "https://github.com/bnb-chain/alloy", rev = "718aee579dc000019582245226eebf8b40d24c41" } +alloy-transport = { git = "https://github.com/bnb-chain/alloy", rev = "718aee579dc000019582245226eebf8b40d24c41" } +alloy-transport-http = { git = "https://github.com/bnb-chain/alloy", rev = "718aee579dc000019582245226eebf8b40d24c41" } +alloy-json-rpc = { git = "https://github.com/bnb-chain/alloy", rev = "718aee579dc000019582245226eebf8b40d24c41" } +alloy-rpc-client = { git = "https://github.com/bnb-chain/alloy", rev = "718aee579dc000019582245226eebf8b40d24c41" } +alloy-rpc-types-engine = { git = "https://github.com/bnb-chain/alloy", rev = "718aee579dc000019582245226eebf8b40d24c41" } + diff --git a/Makefile b/Makefile index 65a5343132..25d797d4cd 100644 --- a/Makefile +++ b/Makefile @@ -52,7 +52,7 @@ install: ## Build and install the reth binary under `~/.cargo/bin`. .PHONY: install-op install-op: ## Build and install the op-reth binary under `~/.cargo/bin`. - cargo install --path bin/reth --bin op-reth --force --locked \ + cargo install --path crates/optimism/bin --bin op-reth --force --locked \ --features "optimism opbnb $(FEATURES)" \ --profile "$(PROFILE)" \ $(CARGO_INSTALL_EXTRA_FLAGS) @@ -74,7 +74,7 @@ build-debug: ## Build the reth binary into `target/debug` directory. .PHONY: build-op build-op: ## Build the op-reth binary into `target` directory. - cargo build --bin op-reth --features "optimism opbnb $(FEATURES)" --profile "$(PROFILE)" + cargo build --bin op-reth --features "optimism,opbnb,$(FEATURES)" --profile "$(PROFILE)" --manifest-path crates/optimism/bin/Cargo.toml .PHONY: build-bsc build-bsc: ## Build the bsc-reth binary into `target` directory. @@ -85,7 +85,7 @@ build-native-%: cargo build --bin reth --target $* --features "$(FEATURES)" --profile "$(PROFILE)" op-build-native-%: - cargo build --bin op-reth --target $* --features "optimism opbnb $(FEATURES)" --profile "$(PROFILE)" + cargo build --bin op-reth --target $* --features "optimism,opbnb,$(FEATURES)" --profile "$(PROFILE)" --manifest-path crates/optimism/bin/Cargo.toml bsc-build-native-%: cargo build --bin bsc-reth --target $* --features "bsc $(FEATURES)" --profile "$(PROFILE)" @@ -124,7 +124,7 @@ build-%: op-build-%: RUSTFLAGS="-C link-arg=-lgcc -Clink-arg=-static-libgcc" \ - cross build --bin op-reth --target $* --features "optimism opbnb $(FEATURES)" --profile "$(PROFILE)" + cross build --bin op-reth --target $* --features "optimism,opbnb,$(FEATURES)" --profile "$(PROFILE)" --manifest-path crates/optimism/bin/Cargo.toml bsc-build-%: RUSTFLAGS="-C link-arg=-lgcc -Clink-arg=-static-libgcc" \ @@ -385,7 +385,7 @@ maxperf: ## Builds `reth` with the most aggressive optimisations. .PHONY: maxperf-op maxperf-op: ## Builds `op-reth` with the most aggressive optimisations. - RUSTFLAGS="-C target-cpu=native" cargo build --profile maxperf --features jemalloc,asm-keccak,optimism --bin op-reth + RUSTFLAGS="-C target-cpu=native" cargo build --profile maxperf --features jemalloc,asm-keccak,optimism --bin op-reth --manifest-path crates/optimism/bin/Cargo.toml .PHONY: maxperf-no-asm maxperf-no-asm: ## Builds `reth` with the most aggressive optimisations, minus the "asm-keccak" feature. diff --git a/bin/reth-bench/Cargo.toml b/bin/reth-bench/Cargo.toml index 29e9c4963c..e4a532a2cc 100644 --- a/bin/reth-bench/Cargo.toml +++ b/bin/reth-bench/Cargo.toml @@ -14,9 +14,8 @@ workspace = true [dependencies] # reth -reth-provider = { workspace = true } reth-cli-runner.workspace = true -reth-db = { workspace = true, features = ["mdbx"] } +reth-cli-util.workspace = true reth-node-core.workspace = true reth-node-api.workspace = true reth-rpc-types.workspace = true @@ -25,7 +24,10 @@ reth-primitives = { workspace = true, features = ["alloy-compat"] } reth-tracing.workspace = true # alloy -alloy-provider = { workspace = true, features = ["engine-api", "reqwest-rustls-tls"], default-features = false } +alloy-provider = { workspace = true, features = [ + "engine-api", + "reqwest-rustls-tls", +], default-features = false } alloy-rpc-types-engine.workspace = true alloy-transport.workspace = true alloy-transport-http.workspace = true @@ -34,7 +36,6 @@ alloy-transport-ipc.workspace = true alloy-pubsub.workspace = true alloy-json-rpc.workspace = true alloy-rpc-client.workspace = true -alloy-consensus.workspace = true alloy-eips.workspace = true # reqwest @@ -50,7 +51,6 @@ tracing.workspace = true # io serde.workspace = true -serde_json.workspace = true # async tokio = { workspace = true, features = [ @@ -59,7 +59,6 @@ tokio = { workspace = true, features = [ "time", "rt-multi-thread", ] } -tokio-util.workspace = true futures.workspace = true async-trait.workspace = true @@ -71,10 +70,6 @@ clap = { workspace = true, features = ["derive", "env"] } # for writing data csv = "1.3.0" -[target.'cfg(unix)'.dependencies] -tikv-jemallocator = { version = "0.5.0", optional = true } -libc = "0.2" - [dev-dependencies] reth-tracing.workspace = true @@ -83,8 +78,9 @@ default = ["jemalloc"] asm-keccak = ["reth-primitives/asm-keccak"] -jemalloc = ["dep:tikv-jemallocator"] -jemalloc-prof = ["jemalloc", "tikv-jemallocator?/profiling"] +jemalloc = ["reth-cli-util/jemalloc"] +jemalloc-prof = ["reth-cli-util/jemalloc-prof"] +tracy-allocator = ["reth-cli-util/tracy-allocator"] min-error-logs = ["tracing/release_max_level_error"] min-warn-logs = ["tracing/release_max_level_warn"] @@ -92,11 +88,7 @@ min-info-logs = ["tracing/release_max_level_info"] min-debug-logs = ["tracing/release_max_level_debug"] min-trace-logs = ["tracing/release_max_level_trace"] -optimism = [ - "reth-primitives/optimism", - "reth-provider/optimism", - "reth-node-core/optimism", -] +optimism = ["reth-primitives/optimism", "reth-node-core/optimism"] # no-op feature flag for switching between the `optimism` and default functionality in CI matrices ethereum = [] diff --git a/bin/reth-bench/src/main.rs b/bin/reth-bench/src/main.rs index 8cb7dbd07b..c7335961d8 100644 --- a/bin/reth-bench/src/main.rs +++ b/bin/reth-bench/src/main.rs @@ -3,10 +3,16 @@ //! This is a tool that converts existing blocks into a stream of blocks for benchmarking purposes. //! These blocks are then fed into reth as a stream of execution payloads. -// We use jemalloc for performance reasons. -#[cfg(all(feature = "jemalloc", unix))] +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + #[global_allocator] -static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; +static ALLOC: reth_cli_util::allocator::Allocator = reth_cli_util::allocator::new_allocator(); pub mod authenticated_transport; pub mod bench; diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index b8b4ca3187..ab98fa7675 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -14,6 +14,7 @@ workspace = true [dependencies] # reth +reth-cli.workspace = true reth-chainspec.workspace = true reth-config.workspace = true reth-primitives.workspace = true @@ -52,7 +53,6 @@ reth-payload-primitives.workspace = true reth-payload-validator.workspace = true reth-basic-payload-builder.workspace = true reth-static-file.workspace = true -reth-static-file-types = { workspace = true, features = ["clap"] } reth-trie = { workspace = true, features = ["metrics"] } reth-trie-db = { workspace = true, features = ["metrics"] } reth-node-api.workspace = true @@ -62,21 +62,19 @@ reth-node-optimism = { workspace = true, optional = true, features = [ ] } reth-node-core.workspace = true reth-ethereum-payload-builder.workspace = true -reth-db-common.workspace = true reth-node-ethereum.workspace = true reth-node-builder.workspace = true reth-node-events.workspace = true reth-node-metrics.workspace = true reth-consensus.workspace = true -reth-optimism-primitives.workspace = true reth-engine-util.workspace = true reth-prune.workspace = true -reth-stages-api.workspace = true -reth-optimism-cli = { workspace = true, optional = true } -reth-optimism-rpc.workspace = true reth-node-bsc = { workspace = true, optional = true, features = [ "bsc", ] } +reth-evm-bsc = { workspace = true, optional = true, features = [ + "bsc", +] } # crypto alloy-rlp.workspace = true @@ -85,13 +83,7 @@ alloy-rlp.workspace = true tracing.workspace = true # io -fdlimit.workspace = true -serde.workspace = true serde_json.workspace = true -toml = { workspace = true, features = ["display"] } - -# metrics -metrics-process.workspace = true # async tokio = { workspace = true, features = [ @@ -106,20 +98,12 @@ futures.workspace = true aquamarine.workspace = true eyre.workspace = true clap = { workspace = true, features = ["derive", "env"] } -tempfile.workspace = true backon.workspace = true similar-asserts.workspace = true -itertools.workspace = true - -# p2p -discv5.workspace = true - -[target.'cfg(unix)'.dependencies] -tikv-jemallocator = { version = "0.5.0", optional = true } -libc = "0.2" [dev-dependencies] reth-discv4.workspace = true +tempfile.workspace = true [features] default = ["jemalloc"] @@ -128,8 +112,13 @@ dev = ["reth-cli-commands/dev"] asm-keccak = ["reth-node-core/asm-keccak", "reth-primitives/asm-keccak"] -jemalloc = ["dep:tikv-jemallocator", "reth-node-core/jemalloc", "reth-node-metrics/jemalloc"] -jemalloc-prof = ["jemalloc", "tikv-jemallocator?/profiling"] +jemalloc = [ + "reth-cli-util/jemalloc", + "reth-node-core/jemalloc", + "reth-node-metrics/jemalloc", +] +jemalloc-prof = ["reth-cli-util/jemalloc"] +tracy-allocator = ["reth-cli-util/tracy-allocator"] min-error-logs = ["tracing/release_max_level_error"] min-warn-logs = ["tracing/release_max_level_warn"] @@ -137,23 +126,6 @@ min-info-logs = ["tracing/release_max_level_info"] min-debug-logs = ["tracing/release_max_level_debug"] min-trace-logs = ["tracing/release_max_level_trace"] -optimism = [ - "dep:reth-node-optimism", - "dep:reth-optimism-cli", - "reth-beacon-consensus/optimism", - "reth-blockchain-tree/optimism", - "reth-node-core/optimism", - "reth-optimism-cli?/optimism", - "reth-primitives/optimism", - "reth-provider/optimism", - "reth-rpc/optimism", -] - -opbnb = [ - "reth-primitives/opbnb", - "reth-node-core/opbnb", -] - bsc = [ "reth-rpc/bsc", "reth-primitives/bsc", @@ -161,7 +133,8 @@ bsc = [ "reth-stages/bsc", "reth-node-builder/bsc", "reth-beacon-consensus/bsc", - "dep:reth-node-bsc", + "reth-node-bsc/bsc", + "reth-evm-bsc/bsc", ] # no-op feature flag for switching between the `optimism` and default functionality in CI matrices @@ -171,12 +144,7 @@ ethereum = [] name = "reth" path = "src/main.rs" -[[bin]] -name = "op-reth" -path = "src/optimism.rs" -required-features = ["optimism"] - [[bin]] name = "bsc-reth" path = "src/bsc.rs" -required-features = ["bsc"] \ No newline at end of file +required-features = ["bsc"] diff --git a/bin/reth/src/bsc.rs b/bin/reth/src/bsc.rs index 7ee88566e3..d5976998be 100644 --- a/bin/reth/src/bsc.rs +++ b/bin/reth/src/bsc.rs @@ -1,33 +1,49 @@ #![allow(missing_docs)] -// We use jemalloc for performance reasons. -#[cfg(all(feature = "jemalloc", unix))] #[global_allocator] -static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; +static ALLOC: reth_cli_util::allocator::Allocator = reth_cli_util::allocator::new_allocator(); -#[cfg(not(feature = "bsc"))] -compile_error!("Cannot build the `bsc-reth` binary with the `bsc` feature flag disabled."); - -/// clap [Args] for Engine related arguments. -use clap::Args; +use clap::{Args, Parser}; +use reth::{args::utils::DefaultChainSpecParser, cli::Cli}; +#[cfg(feature = "bsc")] +use reth_node_bsc::{node::BSCAddOns, BscNode}; +use reth_node_builder::{ + engine_tree_config::{ + TreeConfig, DEFAULT_MEMORY_BLOCK_BUFFER_TARGET, DEFAULT_PERSISTENCE_THRESHOLD, + }, + EngineNodeLauncher, +}; +use reth_provider::providers::BlockchainProvider2; /// Parameters for configuring the engine -#[derive(Debug, Clone, Args, PartialEq, Eq, Default)] +#[derive(Debug, Clone, Args, PartialEq, Eq)] #[command(next_help_heading = "Engine")] pub struct EngineArgs { /// Enable the engine2 experimental features on reth binary #[arg(long = "engine.experimental", default_value = "false")] pub experimental: bool, + + /// Configure persistence threshold for engine experimental. + #[arg(long = "engine.persistence-threshold", requires = "experimental", default_value_t = DEFAULT_PERSISTENCE_THRESHOLD)] + pub persistence_threshold: u64, + + /// Configure the target number of blocks to keep in memory. + #[arg(long = "engine.memory-block-buffer-target", requires = "experimental", default_value_t = DEFAULT_MEMORY_BLOCK_BUFFER_TARGET)] + pub memory_block_buffer_target: u64, +} + +impl Default for EngineArgs { + fn default() -> Self { + Self { + experimental: false, + persistence_threshold: DEFAULT_PERSISTENCE_THRESHOLD, + memory_block_buffer_target: DEFAULT_MEMORY_BLOCK_BUFFER_TARGET, + } + } } #[cfg(feature = "bsc")] fn main() { - use clap::Parser; - use reth::cli::Cli; - use reth_node_bsc::{node::BSCAddOns, BscNode}; - use reth_node_builder::EngineNodeLauncher; - use reth_provider::providers::BlockchainProvider2; - reth_cli_util::sigsegv_handler::install(); // Enable backtraces unless a RUST_BACKTRACE value has already been explicitly provided. @@ -35,30 +51,36 @@ fn main() { std::env::set_var("RUST_BACKTRACE", "1"); } - if let Err(err) = Cli::::parse().run(|builder, engine_args| async move { - let enable_engine2 = engine_args.experimental; - match enable_engine2 { - true => { - let handle = builder - .with_types_and_provider::>() - .with_components(BscNode::components()) - .with_add_ons::() - .launch_with_fn(|builder| { - let launcher = EngineNodeLauncher::new( - builder.task_executor().clone(), - builder.config().datadir(), - ); - builder.launch_with(launcher) - }) - .await?; - handle.node_exit_future.await + if let Err(err) = + Cli::::parse().run(|builder, engine_args| async move { + let enable_engine2 = engine_args.experimental; + match enable_engine2 { + true => { + let engine_tree_config = TreeConfig::default() + .with_persistence_threshold(engine_args.persistence_threshold) + .with_memory_block_buffer_target(engine_args.memory_block_buffer_target); + let handle = builder + .with_types_and_provider::>() + .with_components(BscNode::components()) + .with_add_ons::() + .launch_with_fn(|builder| { + let launcher = EngineNodeLauncher::new( + builder.task_executor().clone(), + builder.config().datadir(), + engine_tree_config, + ); + builder.launch_with(launcher) + }) + .await?; + handle.node_exit_future.await + } + false => { + let handle = builder.launch_node(BscNode::default()).await?; + handle.node_exit_future.await + } } - false => { - let handle = builder.launch_node(BscNode::default()).await?; - handle.node_exit_future.await - } - } - }) { + }) + { eprintln!("Error: {err:?}"); std::process::exit(1); } diff --git a/bin/reth/src/cli/mod.rs b/bin/reth/src/cli/mod.rs index d6fc7d3c4d..665dbcba2b 100644 --- a/bin/reth/src/cli/mod.rs +++ b/bin/reth/src/cli/mod.rs @@ -1,16 +1,13 @@ //! CLI definition and entrypoint to executable use crate::{ - args::{ - utils::{chain_help, chain_value_parser, SUPPORTED_CHAINS}, - LogArgs, - }, + args::LogArgs, commands::debug_cmd, - macros::block_executor, version::{LONG_VERSION, SHORT_VERSION}, }; use clap::{value_parser, Parser, Subcommand}; use reth_chainspec::ChainSpec; +use reth_cli::chainspec::ChainSpecParser; use reth_cli_commands::{ config_cmd, db, dump_genesis, import, init_cmd, init_state, node::{self, NoArgs}, @@ -19,6 +16,8 @@ use reth_cli_commands::{ use reth_cli_runner::CliRunner; use reth_db::DatabaseEnv; use reth_node_builder::{NodeBuilder, WithLaunchContext}; +use reth_node_core::args::utils::DefaultChainSpecParser; +use reth_node_ethereum::{EthExecutorProvider, EthereumNode}; use reth_tracing::FileWorkerGuard; use std::{ffi::OsString, fmt, future::Future, sync::Arc}; use tracing::info; @@ -35,10 +34,10 @@ pub use crate::core::cli::*; /// This is the entrypoint to the executable. #[derive(Debug, Parser)] #[command(author, version = SHORT_VERSION, long_version = LONG_VERSION, about = "Reth", long_about = None)] -pub struct Cli { +pub struct Cli { /// The command to run #[command(subcommand)] - command: Commands, + command: Commands, /// The chain this node is running. /// @@ -46,12 +45,12 @@ pub struct Cli { #[arg( long, value_name = "CHAIN_OR_PATH", - long_help = chain_help(), - default_value = SUPPORTED_CHAINS[0], - value_parser = chain_value_parser, + long_help = C::help_message(), + default_value = C::SUPPORTED_CHAINS[0], + value_parser = C::parser(), global = true, )] - chain: Arc, + chain: Arc, /// Add a new instance of a node. /// @@ -89,7 +88,7 @@ impl Cli { } } -impl Cli { +impl, Ext: clap::Args + fmt::Debug> Cli { /// Execute the configured cli command. /// /// This accepts a closure that is used to launch the node via the @@ -117,14 +116,14 @@ impl Cli { /// /// ```no_run /// use clap::Parser; - /// use reth::cli::Cli; + /// use reth::{args::utils::DefaultChainSpecParser, cli::Cli}; /// /// #[derive(Debug, Parser)] /// pub struct MyArgs { /// pub enable: bool, /// } /// - /// Cli::parse() + /// Cli::::parse() /// .run(|builder, my_args: MyArgs| async move { /// // launch the node /// @@ -149,29 +148,33 @@ impl Cli { Commands::Node(command) => { runner.run_command_until_exit(|ctx| command.execute(ctx, launcher)) } - Commands::Init(command) => runner.run_blocking_until_ctrl_c(command.execute()), - Commands::InitState(command) => runner.run_blocking_until_ctrl_c(command.execute()), + Commands::Init(command) => { + runner.run_blocking_until_ctrl_c(command.execute::()) + } + Commands::InitState(command) => { + runner.run_blocking_until_ctrl_c(command.execute::()) + } Commands::Import(command) => runner.run_blocking_until_ctrl_c( - command.execute(|chain_spec| block_executor!(chain_spec)), + command.execute::(EthExecutorProvider::ethereum), ), - #[cfg(feature = "optimism")] - Commands::ImportOp(command) => runner.run_blocking_until_ctrl_c(command.execute()), - #[cfg(feature = "optimism")] - Commands::ImportReceiptsOp(command) => { - runner.run_blocking_until_ctrl_c(command.execute()) - } Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()), - Commands::Db(command) => runner.run_blocking_until_ctrl_c(command.execute()), + Commands::Db(command) => { + runner.run_blocking_until_ctrl_c(command.execute::()) + } Commands::Stage(command) => runner.run_command_until_exit(|ctx| { - command.execute(ctx, |chain_spec| block_executor!(chain_spec)) + command.execute::(ctx, EthExecutorProvider::ethereum) }), Commands::P2P(command) => runner.run_until_ctrl_c(command.execute()), #[cfg(feature = "dev")] Commands::TestVectors(command) => runner.run_until_ctrl_c(command.execute()), Commands::Config(command) => runner.run_until_ctrl_c(command.execute()), - Commands::Debug(command) => runner.run_command_until_exit(|ctx| command.execute(ctx)), - Commands::Recover(command) => runner.run_command_until_exit(|ctx| command.execute(ctx)), - Commands::Prune(command) => runner.run_until_ctrl_c(command.execute()), + Commands::Debug(command) => { + runner.run_command_until_exit(|ctx| command.execute::(ctx)) + } + Commands::Recover(command) => { + runner.run_command_until_exit(|ctx| command.execute::(ctx)) + } + Commands::Prune(command) => runner.run_until_ctrl_c(command.execute::()), } } @@ -187,38 +190,30 @@ impl Cli { /// Commands to be executed #[derive(Debug, Subcommand)] -pub enum Commands { +pub enum Commands { /// Start the node #[command(name = "node")] - Node(node::NodeCommand), + Node(Box>), /// Initialize the database from a genesis file. #[command(name = "init")] - Init(init_cmd::InitCommand), + Init(init_cmd::InitCommand), /// Initialize the database from a state dump file. #[command(name = "init-state")] - InitState(init_state::InitStateCommand), + InitState(init_state::InitStateCommand), /// This syncs RLP encoded blocks from a file. #[command(name = "import")] - Import(import::ImportCommand), - /// This syncs RLP encoded OP blocks below Bedrock from a file, without executing. - #[cfg(feature = "optimism")] - #[command(name = "import-op")] - ImportOp(reth_optimism_cli::ImportOpCommand), - /// This imports RLP encoded receipts from a file. - #[cfg(feature = "optimism")] - #[command(name = "import-receipts-op")] - ImportReceiptsOp(reth_optimism_cli::ImportReceiptsOpCommand), + Import(import::ImportCommand), /// Dumps genesis block JSON configuration to stdout. - DumpGenesis(dump_genesis::DumpGenesisCommand), + DumpGenesis(dump_genesis::DumpGenesisCommand), /// Database debugging utilities #[command(name = "db")] - Db(db::Command), + Db(db::Command), /// Manipulate individual stages. #[command(name = "stage")] - Stage(stage::Command), + Stage(stage::Command), /// P2P Debugging utilities #[command(name = "p2p")] - P2P(p2p::Command), + P2P(p2p::Command), /// Generate Test Vectors #[cfg(feature = "dev")] #[command(name = "test-vectors")] @@ -228,13 +223,13 @@ pub enum Commands { Config(config_cmd::Command), /// Various debug routines #[command(name = "debug")] - Debug(debug_cmd::Command), + Debug(debug_cmd::Command), /// Scripts for node recovery #[command(name = "recover")] - Recover(recover::Command), + Recover(recover::Command), /// Prune according to the configuration without any limits #[command(name = "prune")] - Prune(prune::PruneCommand), + Prune(prune::PruneCommand), } #[cfg(test)] @@ -242,6 +237,7 @@ mod tests { use super::*; use crate::args::ColorMode; use clap::CommandFactory; + use reth_node_core::args::utils::SUPPORTED_CHAINS; #[test] fn parse_color_mode() { @@ -254,7 +250,7 @@ mod tests { /// runtime #[test] fn test_parse_help_all_subcommands() { - let reth = Cli::::command(); + let reth = Cli::::command(); for sub_command in reth.get_subcommands() { let err = Cli::try_parse_args_from(["reth", sub_command.get_name(), "--help"]) .err() diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index a2f4ee7582..2afe795a45 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -1,5 +1,4 @@ //! Command for debugging block building. -use crate::macros::block_executor; use alloy_rlp::Decodable; use clap::Parser; use eyre::Context; @@ -10,26 +9,30 @@ use reth_beacon_consensus::EthBeaconConsensus; use reth_blockchain_tree::{ BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, }; +use reth_chainspec::ChainSpec; +use reth_cli::chainspec::ChainSpecParser; use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs}; use reth_cli_runner::CliContext; use reth_consensus::Consensus; -use reth_db::DatabaseEnv; use reth_errors::RethResult; use reth_evm::execute::{BlockExecutorProvider, Executor}; +#[cfg(feature = "bsc")] +use reth_evm_bsc::{BscEvmConfig, BscExecutorProvider}; use reth_execution_types::ExecutionOutcome; use reth_fs_util as fs; -use reth_node_api::PayloadBuilderAttributes; +use reth_node_api::{NodeTypesWithDB, NodeTypesWithEngine, PayloadBuilderAttributes}; +#[cfg(not(feature = "bsc"))] +use reth_node_ethereum::{EthEvmConfig, EthExecutorProvider}; use reth_payload_builder::database::CachedReads; use reth_primitives::{ - constants::eip4844::LoadKzgSettingsError, revm_primitives::KzgSettings, Address, - BlobTransaction, BlobTransactionSidecar, Bytes, PooledTransactionsElement, SealedBlock, - SealedBlockWithSenders, Transaction, TransactionSigned, TxEip4844, B256, U256, + revm_primitives::KzgSettings, Address, BlobTransaction, BlobTransactionSidecar, Bytes, + PooledTransactionsElement, SealedBlock, SealedBlockWithSenders, Transaction, TransactionSigned, + TxEip4844, B256, U256, }; use reth_provider::{ providers::BlockchainProvider, BlockHashReader, BlockReader, BlockWriter, ChainSpecProvider, ProviderFactory, StageCheckpointReader, StateProviderFactory, }; -use reth_prune::PruneModes; use reth_revm::{database::StateProviderDatabase, primitives::EnvKzgSettings}; use reth_rpc_types::engine::{BlobsBundleV1, PayloadAttributes}; use reth_stages::StageId; @@ -46,9 +49,9 @@ use tracing::*; /// This debug routine requires that the node is positioned at the block before the target. /// The script will then parse the block and attempt to build a similar one. #[derive(Debug, Parser)] -pub struct Command { +pub struct Command { #[command(flatten)] - env: EnvironmentArgs, + env: EnvironmentArgs, /// Overrides the KZG trusted setup by reading from the supplied file. #[arg(long, value_name = "PATH")] @@ -77,13 +80,13 @@ pub struct Command { blobs_bundle_path: Option, } -impl Command { +impl> Command { /// Fetches the best block block from the database. /// /// If the database is empty, returns the genesis block. - fn lookup_best_block( + fn lookup_best_block>( &self, - factory: ProviderFactory>, + factory: ProviderFactory, ) -> RethResult> { let provider = factory.provider()?; @@ -106,7 +109,9 @@ impl Command { fn kzg_settings(&self) -> eyre::Result { if let Some(ref trusted_setup_file) = self.trusted_setup_file { let trusted_setup = KzgSettings::load_trusted_setup_file(trusted_setup_file) - .map_err(LoadKzgSettingsError::KzgError)?; + .wrap_err_with(|| { + format!("Failed to load trusted setup file: {:?}", trusted_setup_file) + })?; Ok(EnvKzgSettings::Custom(Arc::new(trusted_setup))) } else { Ok(EnvKzgSettings::Default) @@ -114,25 +119,25 @@ impl Command { } /// Execute `debug in-memory-merkle` command - pub async fn execute(self, ctx: CliContext) -> eyre::Result<()> { - let Environment { provider_factory, .. } = self.env.init(AccessRights::RW)?; + pub async fn execute>( + self, + ctx: CliContext, + ) -> eyre::Result<()> { + let Environment { provider_factory, .. } = self.env.init::(AccessRights::RW)?; let consensus: Arc = Arc::new(EthBeaconConsensus::new(provider_factory.chain_spec())); - #[cfg(feature = "bsc")] - let executor = block_executor!(provider_factory.chain_spec(), provider_factory.clone()); #[cfg(not(feature = "bsc"))] - let executor = block_executor!(provider_factory.chain_spec()); + let executor = EthExecutorProvider::ethereum(provider_factory.chain_spec()); + #[cfg(feature = "bsc")] + let executor = + BscExecutorProvider::bsc(provider_factory.chain_spec(), provider_factory.clone()); // configure blockchain tree let tree_externals = TreeExternals::new(provider_factory.clone(), Arc::clone(&consensus), executor); - let tree = BlockchainTree::new( - tree_externals, - BlockchainTreeConfig::default(), - PruneModes::none(), - )?; + let tree = BlockchainTree::new(tree_externals, BlockchainTreeConfig::default())?; let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); // fetch the best block from the database @@ -224,17 +229,6 @@ impl Command { let payload_config = PayloadConfig::new( Arc::clone(&best_block), Bytes::default(), - #[cfg(feature = "optimism")] - reth_node_optimism::OptimismPayloadBuilderAttributes::try_new( - best_block.hash(), - reth_rpc_types::optimism::OptimismPayloadAttributes { - payload_attributes: payload_attrs, - transactions: None, - no_tx_pool: None, - gas_limit: None, - }, - )?, - #[cfg(not(feature = "optimism"))] reth_payload_builder::EthPayloadBuilderAttributes::try_new( best_block.hash(), payload_attrs, @@ -251,14 +245,14 @@ impl Command { None, ); - #[cfg(feature = "optimism")] - let payload_builder = reth_node_optimism::OptimismPayloadBuilder::new( - reth_node_optimism::OptimismEvmConfig::default(), - ) - .compute_pending_block(); - - #[cfg(not(feature = "optimism"))] - let payload_builder = reth_ethereum_payload_builder::EthereumPayloadBuilder::default(); + #[cfg(not(feature = "bsc"))] + let payload_builder = reth_ethereum_payload_builder::EthereumPayloadBuilder::new( + EthEvmConfig::new(provider_factory.chain_spec()), + ); + #[cfg(feature = "bsc")] + let payload_builder = reth_ethereum_payload_builder::EthereumPayloadBuilder::new( + BscEvmConfig::new(provider_factory.chain_spec()), + ); match payload_builder.try_build(args)? { BuildOutcome::Better { payload, .. } => { @@ -274,12 +268,15 @@ impl Command { SealedBlockWithSenders::new(block.clone(), senders).unwrap(); let db = StateProviderDatabase::new(blockchain_db.latest()?); - #[cfg(feature = "bsc")] - let executor = - block_executor!(provider_factory.chain_spec(), provider_factory.clone()) - .executor(db, None); #[cfg(not(feature = "bsc"))] - let executor = block_executor!(provider_factory.chain_spec()).executor(db, None); + let executor = + EthExecutorProvider::ethereum(provider_factory.chain_spec()).executor(db, None); + #[cfg(feature = "bsc")] + let executor = BscExecutorProvider::bsc( + provider_factory.chain_spec(), + provider_factory.clone(), + ) + .executor(db, None); let block_execution_output = executor .execute((&block_with_senders.clone().unseal(), U256::MAX, None).into())?; diff --git a/bin/reth/src/commands/debug_cmd/execution.rs b/bin/reth/src/commands/debug_cmd/execution.rs index 07e96ee3ba..0dbd55fc2e 100644 --- a/bin/reth/src/commands/debug_cmd/execution.rs +++ b/bin/reth/src/commands/debug_cmd/execution.rs @@ -1,17 +1,17 @@ //! Command for debugging execution. -use std::{path::PathBuf, sync::Arc}; - +use crate::{args::NetworkArgs, utils::get_single_header}; use clap::Parser; use futures::{stream::select as stream_select, StreamExt}; use reth_beacon_consensus::EthBeaconConsensus; +use reth_chainspec::ChainSpec; +use reth_cli::chainspec::ChainSpecParser; use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs}; use reth_cli_runner::CliContext; use reth_cli_util::get_secret_key; use reth_config::Config; use reth_consensus::Consensus; use reth_db::DatabaseEnv; -use reth_db_api::database::Database; use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, headers::reverse_headers::ReverseHeadersDownloaderBuilder, @@ -20,6 +20,12 @@ use reth_exex::ExExManagerHandle; use reth_network::{NetworkEventListenerProvider, NetworkHandle}; use reth_network_api::NetworkInfo; use reth_network_p2p::{headers::client::HeadersClient, BlockClient}; +use reth_node_api::{NodeTypesWithDB, NodeTypesWithDBAdapter, NodeTypesWithEngine}; + +#[cfg(feature = "bsc")] +use reth_evm_bsc::BscExecutorProvider; +#[cfg(not(feature = "bsc"))] +use reth_node_ethereum::EthExecutorProvider; use reth_primitives::{BlockHashOrNumber, BlockNumber, B256}; use reth_provider::{ BlockExecutionWriter, ChainSpecProvider, ProviderFactory, StageCheckpointReader, @@ -31,16 +37,15 @@ use reth_stages::{ }; use reth_static_file::StaticFileProducer; use reth_tasks::TaskExecutor; +use std::{path::PathBuf, sync::Arc}; use tokio::sync::watch; use tracing::*; -use crate::{args::NetworkArgs, macros::block_executor, utils::get_single_header}; - /// `reth debug execution` command #[derive(Debug, Parser)] -pub struct Command { +pub struct Command { #[command(flatten)] - env: EnvironmentArgs, + env: EnvironmentArgs, #[command(flatten)] network: NetworkArgs, @@ -55,18 +60,17 @@ pub struct Command { pub interval: u64, } -impl Command { - fn build_pipeline( +impl> Command { + fn build_pipeline, Client>( &self, config: &Config, client: Client, consensus: Arc, - provider_factory: ProviderFactory, + provider_factory: ProviderFactory, task_executor: &TaskExecutor, - static_file_producer: StaticFileProducer, - ) -> eyre::Result> + static_file_producer: StaticFileProducer>, + ) -> eyre::Result> where - DB: Database + Unpin + Clone + 'static, Client: BlockClient + 'static, { // building network downloaders using the fetch client @@ -82,12 +86,13 @@ impl Command { let prune_modes = config.prune.clone().map(|prune| prune.segments).unwrap_or_default(); let (tip_tx, tip_rx) = watch::channel(B256::ZERO); - #[cfg(feature = "bsc")] - let executor = block_executor!(provider_factory.chain_spec(), provider_factory.clone()); #[cfg(not(feature = "bsc"))] - let executor = block_executor!(provider_factory.chain_spec()); + let executor = EthExecutorProvider::ethereum(provider_factory.chain_spec()); + #[cfg(feature = "bsc")] + let executor = + BscExecutorProvider::bsc(provider_factory.chain_spec(), provider_factory.clone()); - let pipeline = Pipeline::builder() + let pipeline = Pipeline::::builder() .with_tip_sender(tip_tx) .add_stages( DefaultStages::new( @@ -119,11 +124,11 @@ impl Command { Ok(pipeline) } - async fn build_network( + async fn build_network>( &self, config: &Config, task_executor: TaskExecutor, - provider_factory: ProviderFactory>, + provider_factory: ProviderFactory>>, network_secret_path: PathBuf, default_peers_path: PathBuf, ) -> eyre::Result { @@ -160,8 +165,12 @@ impl Command { } /// Execute `execution-debug` command - pub async fn execute(self, ctx: CliContext) -> eyre::Result<()> { - let Environment { provider_factory, config, data_dir } = self.env.init(AccessRights::RW)?; + pub async fn execute>( + self, + ctx: CliContext, + ) -> eyre::Result<()> { + let Environment { provider_factory, config, data_dir } = + self.env.init::(AccessRights::RW)?; let consensus: Arc = Arc::new(EthBeaconConsensus::new(provider_factory.chain_spec())); diff --git a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs index 9369d71456..bd0c6a99c8 100644 --- a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs @@ -1,19 +1,27 @@ //! Command for debugging in-memory merkle trie calculation. -use std::{path::PathBuf, sync::Arc}; - +use crate::{ + args::NetworkArgs, + utils::{get_single_body, get_single_header}, +}; use backon::{ConstantBuilder, Retryable}; use clap::Parser; +use reth_chainspec::ChainSpec; +use reth_cli::chainspec::ChainSpecParser; use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs}; use reth_cli_runner::CliContext; use reth_cli_util::get_secret_key; use reth_config::Config; -use reth_db::DatabaseEnv; use reth_errors::BlockValidationError; use reth_evm::execute::{BlockExecutorProvider, Executor}; +#[cfg(feature = "bsc")] +use reth_evm_bsc::BscExecutorProvider; use reth_execution_types::ExecutionOutcome; use reth_network::NetworkHandle; use reth_network_api::NetworkInfo; +use reth_node_api::{NodeTypesWithDB, NodeTypesWithEngine}; +#[cfg(not(feature = "bsc"))] +use reth_node_ethereum::EthExecutorProvider; use reth_primitives::BlockHashOrNumber; use reth_provider::{ writer::UnifiedStorageWriter, AccountExtReader, ChainSpecProvider, HashingWriter, @@ -25,22 +33,17 @@ use reth_stages::StageId; use reth_tasks::TaskExecutor; use reth_trie::StateRoot; use reth_trie_db::DatabaseStateRoot; +use std::{path::PathBuf, sync::Arc}; use tracing::*; -use crate::{ - args::NetworkArgs, - macros::block_executor, - utils::{get_single_body, get_single_header}, -}; - /// `reth debug in-memory-merkle` command /// This debug routine requires that the node is positioned at the block before the target. /// The script will then download the block from p2p network and attempt to calculate and verify /// merkle root for it. #[derive(Debug, Parser)] -pub struct Command { +pub struct Command { #[command(flatten)] - env: EnvironmentArgs, + env: EnvironmentArgs, #[command(flatten)] network: NetworkArgs, @@ -54,12 +57,12 @@ pub struct Command { skip_node_depth: Option, } -impl Command { - async fn build_network( +impl> Command { + async fn build_network>( &self, config: &Config, task_executor: TaskExecutor, - provider_factory: ProviderFactory>, + provider_factory: ProviderFactory, network_secret_path: PathBuf, default_peers_path: PathBuf, ) -> eyre::Result { @@ -77,8 +80,12 @@ impl Command { } /// Execute `debug in-memory-merkle` command - pub async fn execute(self, ctx: CliContext) -> eyre::Result<()> { - let Environment { provider_factory, config, data_dir } = self.env.init(AccessRights::RW)?; + pub async fn execute>( + self, + ctx: CliContext, + ) -> eyre::Result<()> { + let Environment { provider_factory, config, data_dir } = + self.env.init::(AccessRights::RW)?; let provider = provider_factory.provider()?; @@ -132,11 +139,13 @@ impl Command { provider_factory.static_file_provider(), )); - #[cfg(feature = "bsc")] - let executor = block_executor!(provider_factory.chain_spec(), provider_factory.clone()) - .executor(db, None); #[cfg(not(feature = "bsc"))] - let executor = block_executor!(provider_factory.chain_spec()).executor(db, None); + let executor = + EthExecutorProvider::ethereum(provider_factory.chain_spec()).executor(db, None); + #[cfg(feature = "bsc")] + let executor = + BscExecutorProvider::bsc(provider_factory.chain_spec(), provider_factory.clone()) + .executor(db, None); let merkle_block_td = provider.header_td_by_number(merkle_block_number)?.unwrap_or_default(); diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index 71b822fb35..d4d07b92d3 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -1,21 +1,26 @@ //! Command for debugging merkle trie calculation. - -use std::{path::PathBuf, sync::Arc}; - +use crate::{args::NetworkArgs, utils::get_single_header}; use backon::{ConstantBuilder, Retryable}; use clap::Parser; use reth_beacon_consensus::EthBeaconConsensus; +use reth_chainspec::ChainSpec; +use reth_cli::chainspec::ChainSpecParser; use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs}; use reth_cli_runner::CliContext; use reth_cli_util::get_secret_key; use reth_config::Config; use reth_consensus::Consensus; -use reth_db::{tables, DatabaseEnv}; +use reth_db::tables; use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; use reth_evm::execute::{BatchExecutor, BlockExecutorProvider}; +#[cfg(feature = "bsc")] +use reth_evm_bsc::BscExecutorProvider; use reth_network::NetworkHandle; use reth_network_api::NetworkInfo; use reth_network_p2p::full_block::FullBlockClient; +use reth_node_api::{NodeTypesWithDB, NodeTypesWithEngine}; +#[cfg(not(feature = "bsc"))] +use reth_node_ethereum::EthExecutorProvider; use reth_primitives::BlockHashOrNumber; use reth_provider::{ writer::UnifiedStorageWriter, BlockNumReader, BlockWriter, ChainSpecProvider, HeaderProvider, @@ -27,15 +32,14 @@ use reth_stages::{ ExecInput, Stage, StageCheckpoint, }; use reth_tasks::TaskExecutor; +use std::{path::PathBuf, sync::Arc}; use tracing::*; -use crate::{args::NetworkArgs, macros::block_executor, utils::get_single_header}; - /// `reth debug merkle` command #[derive(Debug, Parser)] -pub struct Command { +pub struct Command { #[command(flatten)] - env: EnvironmentArgs, + env: EnvironmentArgs, #[command(flatten)] network: NetworkArgs, @@ -53,12 +57,12 @@ pub struct Command { skip_node_depth: Option, } -impl Command { - async fn build_network( +impl> Command { + async fn build_network>( &self, config: &Config, task_executor: TaskExecutor, - provider_factory: ProviderFactory>, + provider_factory: ProviderFactory, network_secret_path: PathBuf, default_peers_path: PathBuf, ) -> eyre::Result { @@ -76,8 +80,12 @@ impl Command { } /// Execute `merkle-debug` command - pub async fn execute(self, ctx: CliContext) -> eyre::Result<()> { - let Environment { provider_factory, config, data_dir } = self.env.init(AccessRights::RW)?; + pub async fn execute>( + self, + ctx: CliContext, + ) -> eyre::Result<()> { + let Environment { provider_factory, config, data_dir } = + self.env.init::(AccessRights::RW)?; let provider_rw = provider_factory.provider_rw()?; @@ -94,11 +102,11 @@ impl Command { ) .await?; + #[cfg(not(feature = "bsc"))] + let executor_provider = EthExecutorProvider::ethereum(provider_factory.chain_spec()); #[cfg(feature = "bsc")] let executor_provider = - block_executor!(provider_factory.chain_spec(), provider_factory.clone()); - #[cfg(not(feature = "bsc"))] - let executor_provider = block_executor!(provider_factory.chain_spec()); + BscExecutorProvider::bsc(provider_factory.chain_spec(), provider_factory.clone()); // Initialize the fetch client info!(target: "reth::cli", target_block_number=self.to, "Downloading tip of block range"); diff --git a/bin/reth/src/commands/debug_cmd/mod.rs b/bin/reth/src/commands/debug_cmd/mod.rs index c3704aff4e..51681e8c59 100644 --- a/bin/reth/src/commands/debug_cmd/mod.rs +++ b/bin/reth/src/commands/debug_cmd/mod.rs @@ -1,7 +1,11 @@ //! `reth debug` command. Collection of various debugging routines. use clap::{Parser, Subcommand}; +use reth_chainspec::ChainSpec; +use reth_cli::chainspec::ChainSpecParser; use reth_cli_runner::CliContext; +use reth_node_api::NodeTypesWithEngine; +use reth_node_ethereum::EthEngineTypes; mod build_block; mod execution; @@ -11,35 +15,40 @@ mod replay_engine; /// `reth debug` command #[derive(Debug, Parser)] -pub struct Command { +pub struct Command { #[command(subcommand)] - command: Subcommands, + command: Subcommands, } /// `reth debug` subcommands #[derive(Subcommand, Debug)] -pub enum Subcommands { +pub enum Subcommands { /// Debug the roundtrip execution of blocks as well as the generated data. - Execution(execution::Command), + Execution(execution::Command), /// Debug the clean & incremental state root calculations. - Merkle(merkle::Command), + Merkle(merkle::Command), /// Debug in-memory state root calculation. - InMemoryMerkle(in_memory_merkle::Command), + InMemoryMerkle(in_memory_merkle::Command), /// Debug block building. - BuildBlock(build_block::Command), + BuildBlock(build_block::Command), /// Debug engine API by replaying stored messages. - ReplayEngine(replay_engine::Command), + ReplayEngine(replay_engine::Command), } -impl Command { +impl> Command { /// Execute `debug` command - pub async fn execute(self, ctx: CliContext) -> eyre::Result<()> { + pub async fn execute< + N: NodeTypesWithEngine, + >( + self, + ctx: CliContext, + ) -> eyre::Result<()> { match self.command { - Subcommands::Execution(command) => command.execute(ctx).await, - Subcommands::Merkle(command) => command.execute(ctx).await, - Subcommands::InMemoryMerkle(command) => command.execute(ctx).await, - Subcommands::BuildBlock(command) => command.execute(ctx).await, - Subcommands::ReplayEngine(command) => command.execute(ctx).await, + Subcommands::Execution(command) => command.execute::(ctx).await, + Subcommands::Merkle(command) => command.execute::(ctx).await, + Subcommands::InMemoryMerkle(command) => command.execute::(ctx).await, + Subcommands::BuildBlock(command) => command.execute::(ctx).await, + Subcommands::ReplayEngine(command) => command.execute::(ctx).await, } } } diff --git a/bin/reth/src/commands/debug_cmd/replay_engine.rs b/bin/reth/src/commands/debug_cmd/replay_engine.rs index 196ebf4f5a..2920df8cfd 100644 --- a/bin/reth/src/commands/debug_cmd/replay_engine.rs +++ b/bin/reth/src/commands/debug_cmd/replay_engine.rs @@ -1,5 +1,4 @@ -use std::{path::PathBuf, sync::Arc, time::Duration}; - +use crate::args::NetworkArgs; use clap::Parser; use eyre::Context; use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; @@ -7,6 +6,8 @@ use reth_beacon_consensus::{hooks::EngineHooks, BeaconConsensusEngine, EthBeacon use reth_blockchain_tree::{ BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, }; +use reth_chainspec::ChainSpec; +use reth_cli::chainspec::ChainSpecParser; use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs}; use reth_cli_runner::CliContext; use reth_cli_util::get_secret_key; @@ -14,9 +15,15 @@ use reth_config::Config; use reth_consensus::Consensus; use reth_db::DatabaseEnv; use reth_engine_util::engine_store::{EngineMessageStore, StoredEngineApiMessage}; +#[cfg(feature = "bsc")] +use reth_evm_bsc::{BscEvmConfig, BscExecutorProvider}; use reth_fs_util as fs; use reth_network::NetworkHandle; use reth_network_api::NetworkInfo; +use reth_node_api::{NodeTypesWithDB, NodeTypesWithDBAdapter, NodeTypesWithEngine}; +use reth_node_ethereum::EthEngineTypes; +#[cfg(not(feature = "bsc"))] +use reth_node_ethereum::{EthEvmConfig, EthExecutorProvider}; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; use reth_provider::{ providers::BlockchainProvider, CanonStateSubscriptions, ChainSpecProvider, ProviderFactory, @@ -26,18 +33,17 @@ use reth_stages::Pipeline; use reth_static_file::StaticFileProducer; use reth_tasks::TaskExecutor; use reth_transaction_pool::noop::NoopTransactionPool; +use std::{path::PathBuf, sync::Arc, time::Duration}; use tokio::sync::oneshot; use tracing::*; -use crate::{args::NetworkArgs, macros::block_executor}; - /// `reth debug replay-engine` command /// This script will read stored engine API messages and replay them by the timestamp. /// It does not require #[derive(Debug, Parser)] -pub struct Command { +pub struct Command { #[command(flatten)] - env: EnvironmentArgs, + env: EnvironmentArgs, #[command(flatten)] network: NetworkArgs, @@ -51,12 +57,12 @@ pub struct Command { interval: u64, } -impl Command { - async fn build_network( +impl> Command { + async fn build_network>( &self, config: &Config, task_executor: TaskExecutor, - provider_factory: ProviderFactory>, + provider_factory: ProviderFactory, network_secret_path: PathBuf, default_peers_path: PathBuf, ) -> eyre::Result { @@ -74,25 +80,28 @@ impl Command { } /// Execute `debug replay-engine` command - pub async fn execute(self, ctx: CliContext) -> eyre::Result<()> { - let Environment { provider_factory, config, data_dir } = self.env.init(AccessRights::RW)?; + pub async fn execute< + N: NodeTypesWithEngine, + >( + self, + ctx: CliContext, + ) -> eyre::Result<()> { + let Environment { provider_factory, config, data_dir } = + self.env.init::(AccessRights::RW)?; let consensus: Arc = Arc::new(EthBeaconConsensus::new(provider_factory.chain_spec())); - #[cfg(feature = "bsc")] - let executor = block_executor!(provider_factory.chain_spec(), provider_factory.clone()); #[cfg(not(feature = "bsc"))] - let executor = block_executor!(provider_factory.chain_spec()); + let executor = EthExecutorProvider::ethereum(provider_factory.chain_spec()); + #[cfg(feature = "bsc")] + let executor = + BscExecutorProvider::bsc(provider_factory.chain_spec(), provider_factory.clone()); // Configure blockchain tree let tree_externals = TreeExternals::new(provider_factory.clone(), Arc::clone(&consensus), executor); - let tree = BlockchainTree::new( - tree_externals, - BlockchainTreeConfig::default(), - PruneModes::none(), - )?; + let tree = BlockchainTree::new(tree_externals, BlockchainTreeConfig::default())?; let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); // Set up the blockchain provider @@ -112,13 +121,13 @@ impl Command { .await?; // Set up payload builder - #[cfg(not(feature = "optimism"))] - let payload_builder = reth_ethereum_payload_builder::EthereumPayloadBuilder::default(); - - // Optimism's payload builder is implemented on the OptimismPayloadBuilder type. - #[cfg(feature = "optimism")] - let payload_builder = reth_node_optimism::OptimismPayloadBuilder::new( - reth_node_optimism::OptimismEvmConfig::default(), + #[cfg(not(feature = "bsc"))] + let payload_builder = reth_ethereum_payload_builder::EthereumPayloadBuilder::new( + EthEvmConfig::new(provider_factory.chain_spec()), + ); + #[cfg(feature = "bsc")] + let payload_builder = reth_ethereum_payload_builder::EthereumPayloadBuilder::new( + BscEvmConfig::new(provider_factory.chain_spec()), ); let payload_generator = BasicPayloadJobGenerator::with_builder( @@ -130,17 +139,8 @@ impl Command { payload_builder, ); - #[cfg(feature = "optimism")] - let (payload_service, payload_builder): ( - _, - PayloadBuilderHandle, - ) = PayloadBuilderService::new(payload_generator, blockchain_db.canonical_state_stream()); - - #[cfg(not(feature = "optimism"))] - let (payload_service, payload_builder): ( - _, - PayloadBuilderHandle, - ) = PayloadBuilderService::new(payload_generator, blockchain_db.canonical_state_stream()); + let (payload_service, payload_builder): (_, PayloadBuilderHandle) = + PayloadBuilderService::new(payload_generator, blockchain_db.canonical_state_stream()); ctx.task_executor.spawn_critical("payload builder service", payload_service); @@ -148,7 +148,7 @@ impl Command { let network_client = network.fetch_client().await?; let (beacon_consensus_engine, beacon_engine_handle) = BeaconConsensusEngine::new( network_client, - Pipeline::builder().build( + Pipeline::>>::builder().build( provider_factory.clone(), StaticFileProducer::new(provider_factory.clone(), PruneModes::none()), ), diff --git a/bin/reth/src/lib.rs b/bin/reth/src/lib.rs index 020f645808..1fd03b34f2 100644 --- a/bin/reth/src/lib.rs +++ b/bin/reth/src/lib.rs @@ -18,20 +18,17 @@ //! calls to the logging component is made. //! - `min-debug-logs`: Disables all logs below `debug` level. //! - `min-trace-logs`: Disables all logs below `trace` level. -//! - `optimism`: Enables [OP-Stack](https://stack.optimism.io/) support for the node. Note that -//! this breaks compatibility with the Ethereum mainnet as a new deposit transaction type is -//! introduced as well as gas cost changes. #![doc( html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] pub mod cli; pub mod commands; -mod macros; /// Re-exported utils. pub mod utils { @@ -114,6 +111,11 @@ pub mod blockchain_tree { pub use reth_blockchain_tree::*; } +/// Re-exported from `reth_consensus`. +pub mod consensus { + pub use reth_consensus::*; +} + /// Re-exported from `reth_consensus_common`. pub mod consensus_common { pub use reth_consensus_common::*; @@ -144,7 +146,6 @@ pub mod transaction_pool { /// Re-export of `reth_rpc_*` crates. pub mod rpc { - /// Re-exported from `reth_rpc_builder`. pub mod builder { pub use reth_rpc_builder::*; @@ -188,8 +189,9 @@ pub mod rpc { #[doc(inline)] pub use reth_cli_runner::{tokio_runtime, CliContext, CliRunner}; -#[cfg(all(feature = "jemalloc", unix))] -use tikv_jemallocator as _; - // for rendering diagrams use aquamarine as _; +#[cfg(feature = "bsc")] +use reth_evm_bsc as _; +#[cfg(feature = "bsc")] +use reth_node_bsc as _; diff --git a/bin/reth/src/macros.rs b/bin/reth/src/macros.rs deleted file mode 100644 index 2cc573338a..0000000000 --- a/bin/reth/src/macros.rs +++ /dev/null @@ -1,32 +0,0 @@ -//! Helper macros - -/// Creates the block executor type based on the configured feature. -/// -/// Note(mattsse): This is incredibly horrible and will be replaced -#[cfg(all(not(feature = "optimism"), not(feature = "bsc")))] -macro_rules! block_executor { - ($chain_spec:expr) => { - reth_node_ethereum::EthExecutorProvider::ethereum($chain_spec) - }; -} - -#[cfg(feature = "optimism")] -macro_rules! block_executor { - ($chain_spec:expr) => { - reth_node_optimism::OpExecutorProvider::optimism($chain_spec) - }; -} - -#[cfg(feature = "bsc")] -macro_rules! block_executor { - ($chain_spec:expr) => { - // In some cases provider is not available - // And we don't really need a bsc executor provider - reth_node_ethereum::EthExecutorProvider::ethereum($chain_spec) - }; - ($chain_spec:expr, $provider:expr) => { - reth_node_bsc::BscExecutorProvider::bsc($chain_spec, $provider) - }; -} - -pub(crate) use block_executor; diff --git a/bin/reth/src/main.rs b/bin/reth/src/main.rs index a26a461206..530b905050 100644 --- a/bin/reth/src/main.rs +++ b/bin/reth/src/main.rs @@ -1,33 +1,48 @@ #![allow(missing_docs)] -// We use jemalloc for performance reasons. -#[cfg(all(feature = "jemalloc", unix))] #[global_allocator] -static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; +static ALLOC: reth_cli_util::allocator::Allocator = reth_cli_util::allocator::new_allocator(); -#[cfg(all(feature = "optimism", not(test)))] -compile_error!("Cannot build the `reth` binary with the `optimism` feature flag enabled. Did you mean to build `op-reth`?"); - -/// clap [Args] for Engine related arguments. -use clap::Args; +use clap::{Args, Parser}; +use reth::{args::utils::DefaultChainSpecParser, cli::Cli}; +use reth_node_builder::{ + engine_tree_config::{ + TreeConfig, DEFAULT_MEMORY_BLOCK_BUFFER_TARGET, DEFAULT_PERSISTENCE_THRESHOLD, + }, + EngineNodeLauncher, +}; +use reth_node_ethereum::{node::EthereumAddOns, EthereumNode}; +use reth_provider::providers::BlockchainProvider2; /// Parameters for configuring the engine -#[derive(Debug, Clone, Args, PartialEq, Eq, Default)] +#[derive(Debug, Clone, Args, PartialEq, Eq)] #[command(next_help_heading = "Engine")] pub struct EngineArgs { /// Enable the engine2 experimental features on reth binary #[arg(long = "engine.experimental", default_value = "false")] pub experimental: bool, + + /// Configure persistence threshold for engine experimental. + #[arg(long = "engine.persistence-threshold", requires = "experimental", default_value_t = DEFAULT_PERSISTENCE_THRESHOLD)] + pub persistence_threshold: u64, + + /// Configure the target number of blocks to keep in memory. + #[arg(long = "engine.memory-block-buffer-target", requires = "experimental", default_value_t = DEFAULT_MEMORY_BLOCK_BUFFER_TARGET)] + pub memory_block_buffer_target: u64, } -#[cfg(all(not(feature = "optimism"), not(feature = "bsc")))] -fn main() { - use clap::Parser; - use reth::cli::Cli; - use reth_node_builder::EngineNodeLauncher; - use reth_node_ethereum::{node::EthereumAddOns, EthereumNode}; - use reth_provider::providers::BlockchainProvider2; +impl Default for EngineArgs { + fn default() -> Self { + Self { + experimental: false, + persistence_threshold: DEFAULT_PERSISTENCE_THRESHOLD, + memory_block_buffer_target: DEFAULT_MEMORY_BLOCK_BUFFER_TARGET, + } + } +} +#[cfg(not(feature = "bsc"))] +fn main() { reth_cli_util::sigsegv_handler::install(); // Enable backtraces unless a RUST_BACKTRACE value has already been explicitly provided. @@ -35,30 +50,36 @@ fn main() { std::env::set_var("RUST_BACKTRACE", "1"); } - if let Err(err) = Cli::::parse().run(|builder, engine_args| async move { - let enable_engine2 = engine_args.experimental; - match enable_engine2 { - true => { - let handle = builder - .with_types_and_provider::>() - .with_components(EthereumNode::components()) - .with_add_ons::() - .launch_with_fn(|builder| { - let launcher = EngineNodeLauncher::new( - builder.task_executor().clone(), - builder.config().datadir(), - ); - builder.launch_with(launcher) - }) - .await?; - handle.node_exit_future.await + if let Err(err) = + Cli::::parse().run(|builder, engine_args| async move { + let enable_engine2 = engine_args.experimental; + match enable_engine2 { + true => { + let engine_tree_config = TreeConfig::default() + .with_persistence_threshold(engine_args.persistence_threshold) + .with_memory_block_buffer_target(engine_args.memory_block_buffer_target); + let handle = builder + .with_types_and_provider::>() + .with_components(EthereumNode::components()) + .with_add_ons::() + .launch_with_fn(|builder| { + let launcher = EngineNodeLauncher::new( + builder.task_executor().clone(), + builder.config().datadir(), + engine_tree_config, + ); + builder.launch_with(launcher) + }) + .await?; + handle.node_exit_future.await + } + false => { + let handle = builder.launch_node(EthereumNode::default()).await?; + handle.node_exit_future.await + } } - false => { - let handle = builder.launch_node(EthereumNode::default()).await?; - handle.node_exit_future.await - } - } - }) { + }) + { eprintln!("Error: {err:?}"); std::process::exit(1); } diff --git a/bin/reth/src/optimism.rs b/bin/reth/src/optimism.rs deleted file mode 100644 index c3a2a667b6..0000000000 --- a/bin/reth/src/optimism.rs +++ /dev/null @@ -1,80 +0,0 @@ -#![allow(missing_docs, rustdoc::missing_crate_level_docs)] - -use clap::Parser; -use reth::cli::Cli; -use reth_node_builder::EngineNodeLauncher; -use reth_node_optimism::{args::RollupArgs, node::OptimismAddOns, OptimismNode}; -use reth_optimism_rpc::eth::rpc::SequencerClient; -use reth_provider::providers::BlockchainProvider2; - -// We use jemalloc for performance reasons -#[cfg(all(feature = "jemalloc", unix))] -#[global_allocator] -static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; - -#[cfg(not(feature = "optimism"))] -compile_error!("Cannot build the `op-reth` binary with the `optimism` feature flag disabled."); - -#[cfg(feature = "optimism")] -fn main() { - reth_cli_util::sigsegv_handler::install(); - - // Enable backtraces unless a RUST_BACKTRACE value has already been explicitly provided. - if std::env::var_os("RUST_BACKTRACE").is_none() { - std::env::set_var("RUST_BACKTRACE", "1"); - } - - if let Err(err) = Cli::::parse().run(|builder, rollup_args| async move { - let enable_engine2 = rollup_args.experimental; - let sequencer_http_arg = rollup_args.sequencer_http.clone(); - match enable_engine2 { - true => { - let handle = builder - .with_types_and_provider::>() - .with_components(OptimismNode::components(rollup_args)) - .with_add_ons::() - .extend_rpc_modules(move |ctx| { - // register sequencer tx forwarder - if let Some(sequencer_http) = sequencer_http_arg { - ctx.registry - .eth_api() - .set_sequencer_client(SequencerClient::new(sequencer_http)); - } - - Ok(()) - }) - .launch_with_fn(|builder| { - let launcher = EngineNodeLauncher::new( - builder.task_executor().clone(), - builder.config().datadir(), - ); - builder.launch_with(launcher) - }) - .await?; - - handle.node_exit_future.await - } - false => { - let handle = builder - .node(OptimismNode::new(rollup_args.clone())) - .extend_rpc_modules(move |ctx| { - // register sequencer tx forwarder - if let Some(sequencer_http) = sequencer_http_arg { - ctx.registry - .eth_api() - .set_sequencer_client(SequencerClient::new(sequencer_http)); - } - - Ok(()) - }) - .launch() - .await?; - - handle.node_exit_future.await - } - } - }) { - eprintln!("Error: {err:?}"); - std::process::exit(1); - } -} diff --git a/book/cli/SUMMARY.md b/book/cli/SUMMARY.md index 4cc86c7a36..5f338a0d1e 100644 --- a/book/cli/SUMMARY.md +++ b/book/cli/SUMMARY.md @@ -44,4 +44,3 @@ - [`reth recover`](./reth/recover.md) - [`reth recover storage-tries`](./reth/recover/storage-tries.md) - [`reth prune`](./reth/prune.md) - diff --git a/book/cli/help.py b/book/cli/help.py deleted file mode 100755 index 3f40a5e0b5..0000000000 --- a/book/cli/help.py +++ /dev/null @@ -1,294 +0,0 @@ -#!/usr/bin/env python3 - -import argparse -import os -import re -import subprocess -import sys -from os import makedirs, path - -HELP_KEY = "help" -SECTION_START = "" -SECTION_END = "" -SECTION_RE = rf"\s*{SECTION_START}.*?{SECTION_END}" - -README = """\ -# CLI Reference - - - -Automatically-generated CLI reference from `--help` output. - -{{#include ./SUMMARY.md}} -""" - - -def write_file(file_path, content): - content = "\n".join([line.rstrip() for line in content.split("\n")]) - with open(file_path, "w") as f: - f.write(content) - - -def main(): - args = parse_args(sys.argv[1:]) - for cmd in args.commands: - if cmd.find(" ") >= 0: - raise Exception(f"subcommands are not allowed: {cmd}") - makedirs(args.out_dir, exist_ok=True) - - output = {} - - # Iterate over all commands and their subcommands. - cmd_iter = [[cmd] for cmd in args.commands] - for cmd in cmd_iter: - subcmds, stdout = get_entry(cmd) - if args.verbose and len(subcmds) > 0: - eprint(f"Found subcommands for \"{' '.join(cmd)}\": {subcmds}") - - # Add entry to output map, e.g. `output["cmd"]["subcmd"]["help"] = "..."`. - e = output - for arg in cmd: - tmp = e.get(arg) - if not tmp: - e[arg] = {} - tmp = e[arg] - e = tmp - e[HELP_KEY] = stdout - - # Append subcommands. - for subcmd in subcmds: - cmd_iter.append(cmd + [subcmd]) - - # Generate markdown files. - summary = "" - root_summary = "" - for cmd, obj in output.items(): - cmd_markdown(args.out_dir, cmd, obj) - - root_path = path.relpath(args.out_dir, args.root_dir) - summary += cmd_summary("", cmd, obj, 0) - summary += "\n" - - root_summary += cmd_summary(root_path, cmd, obj, args.root_indentation) - root_summary += "\n" - write_file(path.join(args.out_dir, "SUMMARY.md"), summary) - - # Generate README.md. - if args.readme: - write_file(path.join(args.out_dir, "README.md"), README) - - if args.root_summary: - update_root_summary(args.root_dir, root_summary) - - -def parse_args(args: list[str]): - """Parses command line arguments.""" - parser = argparse.ArgumentParser( - description="Generate markdown files from help output of commands" - ) - parser.add_argument("--root-dir", default=".", help="Root directory") - parser.add_argument( - "--root-indentation", - default=0, - type=int, - help="Indentation for the root SUMMARY.md file", - ) - parser.add_argument("--out-dir", help="Output directory") - parser.add_argument( - "--readme", - action="store_true", - help="Whether to add a README.md file", - ) - parser.add_argument( - "--root-summary", - action="store_true", - help="Whether to update the root SUMMARY.md file", - ) - parser.add_argument( - "commands", - nargs="+", - help="Command to generate markdown for. Can be a subcommand.", - ) - parser.add_argument( - "--verbose", "-v", action="store_true", help="Print verbose output" - ) - return parser.parse_known_args(args)[0] - - -def get_entry(cmd: list[str]): - """Returns the subcommands and help output for a command.""" - env = os.environ.copy() - env["NO_COLOR"] = "1" - env["COLUMNS"] = "100" - env["LINES"] = "10000" - output = subprocess.run(cmd + ["--help"], capture_output=True, env=env) - if output.returncode != 0: - stderr = output.stderr.decode("utf-8") - raise Exception(f"Command \"{' '.join(cmd)}\" failed:\n{stderr}") - stdout = output.stdout.decode("utf-8") - subcmds = parse_sub_commands(stdout) - return subcmds, stdout - - -def parse_sub_commands(s: str): - """Returns a list of subcommands from the help output of a command.""" - idx = s.find("Commands:") - if idx < 0: - return [] - s = s[idx:] - - idx = s.find("Options:") - if idx < 0: - return [] - s = s[:idx] - - idx = s.find("Arguments:") - if idx >= 0: - s = s[:idx] - - subcmds = s.splitlines()[1:] - subcmds = filter( - lambda x: x.strip() != "" and x.startswith(" ") and x[2] != " ", subcmds - ) - subcmds = map(lambda x: x.strip().split(" ")[0], subcmds) - subcmds = filter(lambda x: x != "help", subcmds) - return list(subcmds) - - -def cmd_markdown(out_dir: str, cmd: str, obj: object): - """Writes the markdown for a command and its subcommands to out_dir.""" - - def rec(cmd: list[str], obj: object): - out = "" - out += f"# {' '.join(cmd)}\n\n" - out += help_markdown(cmd, obj[HELP_KEY]) - out_path = out_dir - for arg in cmd: - out_path = path.join(out_path, arg) - makedirs(path.dirname(out_path), exist_ok=True) - write_file(f"{out_path}.md", out) - - for k, v in obj.items(): - if k == HELP_KEY: - continue - rec(cmd + [k], v) - - rec([command_name(cmd)], obj) - - -def help_markdown(cmd: list[str], s: str): - """Returns the markdown for a command's help output.""" - cmd[0] = command_name(cmd[0]) - description, s = parse_description(s) - return f"""\ -{description} - -```bash -$ {' '.join(cmd)} --help -{preprocess_help(s.strip())} -```""" - - -def parse_description(s: str): - """Splits the help output into a description and the rest.""" - idx = s.find("Usage:") - if idx < 0: - return "", s - return s[:idx].strip().splitlines()[0].strip(), s[idx:] - - -def cmd_summary(md_root: str, cmd: str, obj: object, indent: int): - """Returns the summary for a command and its subcommands.""" - - def rec(cmd: list[str], obj: object, indent: int): - nonlocal out - cmd_s = " ".join(cmd) - cmd_path = cmd_s.replace(" ", "/") - if md_root != "": - cmd_path = f"{md_root}/{cmd_path}" - out += f"{' ' * indent}- [`{cmd_s}`](./{cmd_path}.md)\n" - - for k, v in obj.items(): - if k == HELP_KEY: - continue - rec(cmd + [k], v, indent + 2) - - out = "" - rec([command_name(cmd)], obj, indent) - return out - - -def update_root_summary(root_dir: str, root_summary: str): - """Replaces the CLI_REFERENCE section in the root SUMMARY.md file.""" - summary_file = path.join(root_dir, "SUMMARY.md") - - with open(summary_file, "r") as f: - real_root_summary = f.read() - - if not re.search(SECTION_RE, real_root_summary, flags=re.DOTALL): - raise Exception( - f"Could not find CLI_REFERENCE section in {summary_file}. " - "Please add the following section to the file:\n" - f"{SECTION_START}\n{SECTION_END}" - ) - - last_line = re.findall(f".*{SECTION_END}", real_root_summary)[0] - root_summary_s = root_summary.rstrip().replace("\n\n", "\n") - replace_with = f" {SECTION_START}\n{root_summary_s}\n{last_line}" - - real_root_summary = re.sub( - SECTION_RE, replace_with, real_root_summary, flags=re.DOTALL - ) - root_summary_file = path.join(root_dir, "SUMMARY.md") - with open(root_summary_file, "w") as f: - f.write(real_root_summary) - - -def eprint(*args, **kwargs): - """Prints to stderr.""" - print(*args, file=sys.stderr, **kwargs) - - -def command_name(cmd: str): - """Returns the name of a command.""" - return cmd.split("/")[-1] - - -def preprocess_help(s: str): - """Preprocesses the help output of a command.""" - # Remove the user-specific paths. - s = re.sub( - r"default: /.*/reth", - "default: ", - s, - ) - # Remove the commit SHA and target architecture triple or fourth - # rustup available targets: - # aarch64-apple-darwin - # x86_64-unknown-linux-gnu - # x86_64-pc-windows-gnu - s = re.sub( - r"default: reth/.*-[0-9A-Fa-f]{6,10}/([_\w]+)-(\w+)-(\w+)(-\w+)?", - "default: reth/-/", - s, - ) - # Remove the OS - s = re.sub( - r"default: reth/.*/\w+", - "default: reth//", - s, - ) - - # Remove rpc.max-tracing-requests default value - s = re.sub( - r"(rpc.max-tracing-requests \n.*\n.*\n.*)\[default: \d+\]", - r"\1[default: ]", - s, - flags=re.MULTILINE, - ) - - return s - - -if __name__ == "__main__": - main() diff --git a/book/cli/help.rs b/book/cli/help.rs new file mode 100755 index 0000000000..e347e1ea5d --- /dev/null +++ b/book/cli/help.rs @@ -0,0 +1,374 @@ +#!/usr/bin/env -S cargo +nightly -Zscript +--- +[package] +edition = "2021" + +[dependencies] +clap = { version = "4", features = ["derive"] } +pathdiff = "0.2" +regex = "1" +--- +use clap::Parser; +use regex::Regex; +use std::borrow::Cow; +use std::fs::{self, File}; +use std::io::{self, Write}; +use std::iter::once; +use std::path::{Path, PathBuf}; +use std::process::{Command, Stdio}; +use std::str; +use std::sync::LazyLock; +use std::{fmt, process}; + +const SECTION_START: &str = ""; +const SECTION_END: &str = ""; +const README: &str = r#"# CLI Reference + + + +Automatically-generated CLI reference from `--help` output. + +{{#include ./SUMMARY.md}} +"#; +const TRIM_LINE_END_MARKDOWN: bool = true; + +/// Lazy static regex to avoid recompiling the same regex pattern multiple times. +macro_rules! regex { + ($re:expr) => {{ + static RE: LazyLock = + LazyLock::new(|| Regex::new($re).expect("Failed to compile regex pattern")); + &*RE + }}; +} + +/// Generate markdown files from help output of commands +#[derive(Parser, Debug)] +#[command(about, long_about = None)] +struct Args { + /// Root directory + #[arg(long, default_value_t = String::from("."))] + root_dir: String, + + /// Indentation for the root SUMMARY.md file + #[arg(long, default_value_t = 2)] + root_indentation: usize, + + /// Output directory + #[arg(long)] + out_dir: PathBuf, + + /// Whether to add a README.md file + #[arg(long)] + readme: bool, + + /// Whether to update the root SUMMARY.md file + #[arg(long)] + root_summary: bool, + + /// Print verbose output + #[arg(short, long)] + verbose: bool, + + /// Commands to generate markdown for. + #[arg(required = true, num_args = 1..)] + commands: Vec, +} + +fn write_file(file_path: &Path, content: &str) -> io::Result<()> { + let content = if TRIM_LINE_END_MARKDOWN { + content + .lines() + .map(|line| line.trim_end()) + .collect::>() + .join("\n") + } else { + content.to_string() + }; + fs::write(file_path, content) +} + +fn main() -> io::Result<()> { + let args = Args::parse(); + debug_assert!(args.commands.len() >= 1); + + let out_dir = args.out_dir; + fs::create_dir_all(&out_dir)?; + + let mut todo_iter: Vec = args + .commands + .iter() + .rev() // reverse to keep the order (pop) + .map(Cmd::new) + .collect(); + let mut output = Vec::new(); + + // Iterate over all commands and their subcommands. + while let Some(cmd) = todo_iter.pop() { + let (new_subcmds, stdout) = get_entry(&cmd)?; + if args.verbose && !new_subcmds.is_empty() { + println!( + "Found subcommands for \"{}\": {:?}", + cmd.command_name(), + new_subcmds + ); + } + // Add new subcommands to todo_iter (so that they are processed in the correct order). + for subcmd in new_subcmds.into_iter().rev() { + let new_subcmds: Vec<_> = cmd + .subcommands + .iter() + .cloned() + .chain(once(subcmd)) + .collect(); + + todo_iter.push(Cmd { + cmd: cmd.cmd, + subcommands: new_subcmds, + }); + } + output.push((cmd, stdout)); + } + + // Generate markdown files. + for (cmd, stdout) in &output { + cmd_markdown(&out_dir, cmd, stdout)?; + } + + // Generate SUMMARY.md. + let summary: String = output + .iter() + .map(|(cmd, _)| cmd_summary(None, cmd, 0)) + .chain(once("\n".to_string())) + .collect(); + + write_file(&out_dir.clone().join("SUMMARY.md"), &summary)?; + + // Generate README.md. + if args.readme { + let path = &out_dir.join("README.md"); + if args.verbose { + println!("Writing README.md to \"{}\"", path.to_string_lossy()); + } + write_file(path, README)?; + } + + // Generate root SUMMARY.md. + if args.root_summary { + let root_summary: String = output + .iter() + .map(|(cmd, _)| { + let root_path = pathdiff::diff_paths(&out_dir, &args.root_dir); + cmd_summary(root_path, cmd, args.root_indentation) + }) + .collect(); + + let path = Path::new(args.root_dir.as_str()); + if args.verbose { + println!("Updating root summary in \"{}\"", path.to_string_lossy()); + } + update_root_summary(path, &root_summary)?; + } + + Ok(()) +} + +/// Returns the subcommands and help output for a command. +fn get_entry(cmd: &Cmd) -> io::Result<(Vec, String)> { + let output = Command::new(cmd.cmd) + .args(&cmd.subcommands) + .arg("--help") + .env("NO_COLOR", "1") + .env("COLUMNS", "100") + .env("LINES", "10000") + .stdout(Stdio::piped()) + .output()?; + + if !output.status.success() { + let stderr = str::from_utf8(&output.stderr).unwrap_or("Failed to parse stderr as UTF-8"); + return Err(io::Error::new( + io::ErrorKind::Other, + format!("Command \"{}\" failed:\n{}", cmd, stderr), + )); + } + + let stdout = str::from_utf8(&output.stdout) + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))? + .to_string(); + + // Parse subcommands from the help output + let subcmds = parse_sub_commands(&stdout); + + Ok((subcmds, stdout)) +} + +/// Returns a list of subcommands from the help output of a command. +fn parse_sub_commands(s: &str) -> Vec { + // This regex matches lines starting with two spaces, followed by the subcommand name. + let re = regex!(r"^ (\S+)"); + + s.split("Commands:") + .nth(1) // Get the part after "Commands:" + .map(|commands_section| { + commands_section + .lines() + .take_while(|line| !line.starts_with("Options:") && !line.starts_with("Arguments:")) + .filter_map(|line| { + re.captures(line) + .and_then(|cap| cap.get(1).map(|m| m.as_str().to_string())) + }) + .filter(|cmd| cmd != "help") + .map(String::from) + .collect() + }) + .unwrap_or_default() // Return an empty Vec if "Commands:" was not found +} + +/// Writes the markdown for a command to out_dir. +fn cmd_markdown(out_dir: &Path, cmd: &Cmd, stdout: &str) -> io::Result<()> { + let out = format!("# {}\n\n{}", cmd, help_markdown(cmd, stdout)); + + let out_path = out_dir.join(cmd.to_string().replace(" ", "/")); + fs::create_dir_all(out_path.parent().unwrap())?; + write_file(&out_path.with_extension("md"), &out)?; + + Ok(()) +} + +/// Returns the markdown for a command's help output. +fn help_markdown(cmd: &Cmd, stdout: &str) -> String { + let (description, s) = parse_description(stdout); + format!( + "{}\n\n```bash\n$ {} --help\n```\n```txt\n{}\n```", + description, + cmd, + preprocess_help(s.trim()) + ) +} + +/// Splits the help output into a description and the rest. +fn parse_description(s: &str) -> (&str, &str) { + match s.find("Usage:") { + Some(idx) => { + let description = s[..idx].trim().lines().next().unwrap_or(""); + (description, &s[idx..]) + } + None => ("", s), + } +} + +/// Returns the summary for a command and its subcommands. +fn cmd_summary(md_root: Option, cmd: &Cmd, indent: usize) -> String { + let cmd_s = cmd.to_string(); + let cmd_path = cmd_s.replace(" ", "/"); + let full_cmd_path = match md_root { + None => cmd_path, + Some(md_root) => format!("{}/{}", md_root.to_string_lossy(), cmd_path), + }; + let indent_string = " ".repeat(indent + (cmd.subcommands.len() * 2)); + format!("{}- [`{}`](./{}.md)\n", indent_string, cmd_s, full_cmd_path) +} + +/// Replaces the CLI_REFERENCE section in the root SUMMARY.md file. +fn update_root_summary(root_dir: &Path, root_summary: &str) -> io::Result<()> { + let summary_file = root_dir.join("SUMMARY.md"); + let original_summary_content = fs::read_to_string(&summary_file)?; + + let section_re = regex!(&format!(r"(?s)\s*{SECTION_START}.*?{SECTION_END}")); + if !section_re.is_match(&original_summary_content) { + eprintln!( + "Could not find CLI_REFERENCE section in {}. Please add the following section to the file:\n{}\n... CLI Reference goes here ...\n\n{}", + summary_file.display(), + SECTION_START, + SECTION_END + ); + process::exit(1); + } + + let section_end_re = regex!(&format!(r".*{SECTION_END}")); + let last_line = section_end_re + .find(&original_summary_content) + .map(|m| m.as_str().to_string()) + .expect("Could not extract last line of CLI_REFERENCE section"); + + let root_summary_s = root_summary.trim_end().replace("\n\n", "\n"); + let replace_with = format!(" {}\n{}\n{}", SECTION_START, root_summary_s, last_line); + + let new_root_summary = section_re + .replace(&original_summary_content, replace_with.as_str()) + .to_string(); + + let mut root_summary_file = File::create(&summary_file)?; + root_summary_file.write_all(new_root_summary.as_bytes()) +} + +/// Preprocesses the help output of a command. +fn preprocess_help(s: &str) -> Cow<'_, str> { + static REPLACEMENTS: LazyLock> = LazyLock::new(|| { + let patterns: &[(&str, &str)] = &[ + // Remove the user-specific paths. + (r"default: /.*/reth", "default: "), + // Remove the commit SHA and target architecture triple or fourth + // rustup available targets: + // aarch64-apple-darwin + // x86_64-unknown-linux-gnu + // x86_64-pc-windows-gnu + ( + r"default: reth/.*-[0-9A-Fa-f]{6,10}/([_\w]+)-(\w+)-(\w+)(-\w+)?", + "default: reth/-/", + ), + // Remove the OS + (r"default: reth/.*/\w+", "default: reth//"), + // Remove rpc.max-tracing-requests default value + ( + r"(rpc.max-tracing-requests \n.*\n.*\n.*)\[default: \d+\]", + r"$1[default: ]", + ), + ]; + patterns + .iter() + .map(|&(re, replace_with)| (Regex::new(re).expect(re), replace_with)) + .collect() + }); + + let mut s = Cow::Borrowed(s); + for (re, replacement) in REPLACEMENTS.iter() { + if let Cow::Owned(result) = re.replace_all(&s, *replacement) { + s = Cow::Owned(result); + } + } + s +} + +#[derive(Hash, Debug, PartialEq, Eq)] +struct Cmd<'a> { + /// path to binary (e.g. ./target/debug/reth) + cmd: &'a Path, + /// subcommands (e.g. [db, stats]) + subcommands: Vec, +} + +impl<'a> Cmd<'a> { + fn command_name(&self) -> &str { + self.cmd + .file_name() + .and_then(|os_str| os_str.to_str()) + .expect("Expect valid command") + } + + fn new(cmd: &'a PathBuf) -> Self { + Self { + cmd, + subcommands: Vec::new(), + } + } +} + +impl<'a> fmt::Display for Cmd<'a> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.command_name())?; + if !self.subcommands.is_empty() { + write!(f, " {}", self.subcommands.join(" "))?; + } + Ok(()) + } +} diff --git a/book/cli/reth.md b/book/cli/reth.md index cebeb44e23..70a1dec4da 100644 --- a/book/cli/reth.md +++ b/book/cli/reth.md @@ -4,6 +4,8 @@ Reth ```bash $ reth --help +``` +```txt Usage: reth [OPTIONS] Commands: diff --git a/book/cli/reth/config.md b/book/cli/reth/config.md index df0d261b07..ebf76fae37 100644 --- a/book/cli/reth/config.md +++ b/book/cli/reth/config.md @@ -4,6 +4,8 @@ Write config to stdout ```bash $ reth config --help +``` +```txt Usage: reth config [OPTIONS] Options: diff --git a/book/cli/reth/db.md b/book/cli/reth/db.md index b867134a9d..9e3b32cc0b 100644 --- a/book/cli/reth/db.md +++ b/book/cli/reth/db.md @@ -4,6 +4,8 @@ Database debugging utilities ```bash $ reth db --help +``` +```txt Usage: reth db [OPTIONS] Commands: @@ -45,7 +47,7 @@ Datadir: [default: default] - --datadir.static_files + --datadir.static-files The absolute path to store static files in. --config diff --git a/book/cli/reth/db/checksum.md b/book/cli/reth/db/checksum.md index 407630ed41..a422454fd0 100644 --- a/book/cli/reth/db/checksum.md +++ b/book/cli/reth/db/checksum.md @@ -4,6 +4,8 @@ Calculates the content checksum of a table ```bash $ reth db checksum --help +``` +```txt Usage: reth db checksum [OPTIONS] Arguments: diff --git a/book/cli/reth/db/clear.md b/book/cli/reth/db/clear.md index 7abc54623b..554499a36d 100644 --- a/book/cli/reth/db/clear.md +++ b/book/cli/reth/db/clear.md @@ -4,6 +4,8 @@ Deletes all table entries ```bash $ reth db clear --help +``` +```txt Usage: reth db clear [OPTIONS] Commands: diff --git a/book/cli/reth/db/clear/mdbx.md b/book/cli/reth/db/clear/mdbx.md index ad8ad761e8..50f3731467 100644 --- a/book/cli/reth/db/clear/mdbx.md +++ b/book/cli/reth/db/clear/mdbx.md @@ -4,6 +4,8 @@ Deletes all database table entries ```bash $ reth db clear mdbx --help +``` +```txt Usage: reth db clear mdbx [OPTIONS]
Arguments: diff --git a/book/cli/reth/db/clear/static-file.md b/book/cli/reth/db/clear/static-file.md index 9d1d25fa6e..c830af259c 100644 --- a/book/cli/reth/db/clear/static-file.md +++ b/book/cli/reth/db/clear/static-file.md @@ -4,6 +4,8 @@ Deletes all static file segment entries ```bash $ reth db clear static-file --help +``` +```txt Usage: reth db clear static-file [OPTIONS] Arguments: diff --git a/book/cli/reth/db/clear/static_file.md b/book/cli/reth/db/clear/static_file.md deleted file mode 100644 index 363da79674..0000000000 --- a/book/cli/reth/db/clear/static_file.md +++ /dev/null @@ -1,127 +0,0 @@ -# reth db clear static-file - -Deletes all static_file segment entries - -```bash -$ reth db clear static-file --help -Usage: reth db clear static-file [OPTIONS] - -Arguments: - - Possible values: - - headers: StaticFile segment responsible for the `CanonicalHeaders`, `Headers`, `HeaderTerminalDifficulties` tables - - transactions: StaticFile segment responsible for the `Transactions` table - - receipts: StaticFile segment responsible for the `Receipts` table - -Options: - --datadir - The path to the data dir for all reth files and subdirectories. - - Defaults to the OS-specific data directory: - - - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - - Windows: `{FOLDERID_RoamingAppData}/reth/` - - macOS: `$HOME/Library/Application Support/reth/` - - [default: default] - - --chain - The chain this node is running. - Possible values are either a built-in chain or the path to a chain specification file. - - Built-in chains: - mainnet, sepolia, goerli, holesky, dev - - [default: mainnet] - - --instance - Add a new instance of a node. - - Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - - Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - - Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 - - [default: 1] - - -h, --help - Print help (see a summary with '-h') - -Logging: - --log.stdout.format - The format to use for logs written to stdout - - [default: terminal] - - Possible values: - - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - - terminal: Represents terminal-friendly formatting for logs - - --log.stdout.filter - The filter to use for logs written to stdout - - [default: ] - - --log.file.format - The format to use for logs written to the log file - - [default: terminal] - - Possible values: - - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - - terminal: Represents terminal-friendly formatting for logs - - --log.file.filter - The filter to use for logs written to the log file - - [default: debug] - - --log.file.directory - The path to put log files in - - [default: /logs] - - --log.file.max-size - The maximum size (in MB) of one log file - - [default: 200] - - --log.file.max-files - The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - - [default: 5] - - --log.journald - Write logs to journald - - --log.journald.filter - The filter to use for logs written to journald - - [default: error] - - --color - Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - - [default: always] - - Possible values: - - always: Colors on - - auto: Colors on - - never: Colors off - -Display: - -v, --verbosity... - Set the minimum log level. - - -v Errors - -vv Warnings - -vvv Info - -vvvv Debug - -vvvvv Traces (warning: very verbose!) - - -q, --quiet - Silence all log output -``` \ No newline at end of file diff --git a/book/cli/reth/db/diff.md b/book/cli/reth/db/diff.md index 88266901bb..ea4c29612f 100644 --- a/book/cli/reth/db/diff.md +++ b/book/cli/reth/db/diff.md @@ -4,6 +4,8 @@ Create a diff between two database tables or two entire databases ```bash $ reth db diff --help +``` +```txt Usage: reth db diff [OPTIONS] --secondary-datadir --output Options: diff --git a/book/cli/reth/db/drop.md b/book/cli/reth/db/drop.md index 4a8089f85a..c23f6d93c5 100644 --- a/book/cli/reth/db/drop.md +++ b/book/cli/reth/db/drop.md @@ -4,6 +4,8 @@ Deletes all database entries ```bash $ reth db drop --help +``` +```txt Usage: reth db drop [OPTIONS] Options: diff --git a/book/cli/reth/db/get.md b/book/cli/reth/db/get.md index 94081668ad..266e46e5ca 100644 --- a/book/cli/reth/db/get.md +++ b/book/cli/reth/db/get.md @@ -4,6 +4,8 @@ Gets the content of a table for the given key ```bash $ reth db get --help +``` +```txt Usage: reth db get [OPTIONS] Commands: diff --git a/book/cli/reth/db/get/mdbx.md b/book/cli/reth/db/get/mdbx.md index 0921b79bf6..18fa76edaa 100644 --- a/book/cli/reth/db/get/mdbx.md +++ b/book/cli/reth/db/get/mdbx.md @@ -4,6 +4,8 @@ Gets the content of a database table for the given key ```bash $ reth db get mdbx --help +``` +```txt Usage: reth db get mdbx [OPTIONS]
[SUBKEY] Arguments: diff --git a/book/cli/reth/db/get/static-file.md b/book/cli/reth/db/get/static-file.md index 672df21594..a50da0c0e4 100644 --- a/book/cli/reth/db/get/static-file.md +++ b/book/cli/reth/db/get/static-file.md @@ -4,6 +4,8 @@ Gets the content of a static file segment for the given key ```bash $ reth db get static-file --help +``` +```txt Usage: reth db get static-file [OPTIONS] Arguments: diff --git a/book/cli/reth/db/get/static_file.md b/book/cli/reth/db/get/static_file.md deleted file mode 100644 index 51071116c7..0000000000 --- a/book/cli/reth/db/get/static_file.md +++ /dev/null @@ -1,133 +0,0 @@ -# reth db get static-file - -Gets the content of a static_file segment for the given key - -```bash -$ reth db get static-file --help -Usage: reth db get static-file [OPTIONS] - -Arguments: - - Possible values: - - headers: StaticFile segment responsible for the `CanonicalHeaders`, `Headers`, `HeaderTerminalDifficulties` tables - - transactions: StaticFile segment responsible for the `Transactions` table - - receipts: StaticFile segment responsible for the `Receipts` table - - - The key to get content for - -Options: - --datadir - The path to the data dir for all reth files and subdirectories. - - Defaults to the OS-specific data directory: - - - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - - Windows: `{FOLDERID_RoamingAppData}/reth/` - - macOS: `$HOME/Library/Application Support/reth/` - - [default: default] - - --raw - Output bytes instead of human-readable decoded value - - --chain - The chain this node is running. - Possible values are either a built-in chain or the path to a chain specification file. - - Built-in chains: - mainnet, sepolia, goerli, holesky, dev - - [default: mainnet] - - --instance - Add a new instance of a node. - - Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - - Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - - Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 - - [default: 1] - - -h, --help - Print help (see a summary with '-h') - -Logging: - --log.stdout.format - The format to use for logs written to stdout - - [default: terminal] - - Possible values: - - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - - terminal: Represents terminal-friendly formatting for logs - - --log.stdout.filter - The filter to use for logs written to stdout - - [default: ] - - --log.file.format - The format to use for logs written to the log file - - [default: terminal] - - Possible values: - - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - - terminal: Represents terminal-friendly formatting for logs - - --log.file.filter - The filter to use for logs written to the log file - - [default: debug] - - --log.file.directory - The path to put log files in - - [default: /logs] - - --log.file.max-size - The maximum size (in MB) of one log file - - [default: 200] - - --log.file.max-files - The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - - [default: 5] - - --log.journald - Write logs to journald - - --log.journald.filter - The filter to use for logs written to journald - - [default: error] - - --color - Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - - [default: always] - - Possible values: - - always: Colors on - - auto: Colors on - - never: Colors off - -Display: - -v, --verbosity... - Set the minimum log level. - - -v Errors - -vv Warnings - -vvv Info - -vvvv Debug - -vvvvv Traces (warning: very verbose!) - - -q, --quiet - Silence all log output -``` \ No newline at end of file diff --git a/book/cli/reth/db/list.md b/book/cli/reth/db/list.md index 68bc1d9c27..9b64a70a07 100644 --- a/book/cli/reth/db/list.md +++ b/book/cli/reth/db/list.md @@ -4,6 +4,8 @@ Lists the contents of a table ```bash $ reth db list --help +``` +```txt Usage: reth db list [OPTIONS]
Arguments: diff --git a/book/cli/reth/db/path.md b/book/cli/reth/db/path.md index 591d8e150b..a711acad0d 100644 --- a/book/cli/reth/db/path.md +++ b/book/cli/reth/db/path.md @@ -4,6 +4,8 @@ Returns the full database path ```bash $ reth db path --help +``` +```txt Usage: reth db path [OPTIONS] Options: diff --git a/book/cli/reth/db/static_file.md b/book/cli/reth/db/static_file.md deleted file mode 100644 index 16246c8d47..0000000000 --- a/book/cli/reth/db/static_file.md +++ /dev/null @@ -1,174 +0,0 @@ -# reth db static-file - -StaticFiles tables from database - -```bash -$ reth db static-file --help -Usage: reth db static-file [OPTIONS] [SEGMENTS]... - -Arguments: - [SEGMENTS]... - StaticFile segments to generate - - Possible values: - - headers: StaticFile segment responsible for the `CanonicalHeaders`, `Headers`, `HeaderTerminalDifficulties` tables - - transactions: StaticFile segment responsible for the `Transactions` table - - receipts: StaticFile segment responsible for the `Receipts` table - -Options: - --datadir - The path to the data dir for all reth files and subdirectories. - - Defaults to the OS-specific data directory: - - - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - - Windows: `{FOLDERID_RoamingAppData}/reth/` - - macOS: `$HOME/Library/Application Support/reth/` - - [default: default] - - -f, --from - Starting block for the static_file - - [default: 0] - - -b, --block-interval - Number of blocks in the static_file - - [default: 500000] - - --chain - The chain this node is running. - Possible values are either a built-in chain or the path to a chain specification file. - - Built-in chains: - mainnet, sepolia, goerli, holesky, dev - - [default: mainnet] - - -p, --parallel - Sets the number of static files built in parallel. Note: Each parallel build is memory-intensive - - [default: 1] - - --only-stats - Flag to skip static_file creation and print static_file files stats - - --bench - Flag to enable database-to-static_file benchmarking - - --only-bench - Flag to skip static_file creation and only run benchmarks on existing static files - - -c, --compression - Compression algorithms to use - - [default: uncompressed] - - Possible values: - - lz4: LZ4 compression algorithm - - zstd: Zstandard (Zstd) compression algorithm - - zstd-with-dictionary: Zstandard (Zstd) compression algorithm with a dictionary - - uncompressed: No compression, uncompressed static_file - - --with-filters - Flag to enable inclusion list filters and PHFs - - --phf - Specifies the perfect hashing function to use - - Possible values: - - fmph: Fingerprint-Based Minimal Perfect Hash Function - - go-fmph: Fingerprint-Based Minimal Perfect Hash Function with Group Optimization - - --instance - Add a new instance of a node. - - Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - - Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - - Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 - - [default: 1] - - -h, --help - Print help (see a summary with '-h') - -Logging: - --log.stdout.format - The format to use for logs written to stdout - - [default: terminal] - - Possible values: - - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - - terminal: Represents terminal-friendly formatting for logs - - --log.stdout.filter - The filter to use for logs written to stdout - - [default: ] - - --log.file.format - The format to use for logs written to the log file - - [default: terminal] - - Possible values: - - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - - terminal: Represents terminal-friendly formatting for logs - - --log.file.filter - The filter to use for logs written to the log file - - [default: debug] - - --log.file.directory - The path to put log files in - - [default: /logs] - - --log.file.max-size - The maximum size (in MB) of one log file - - [default: 200] - - --log.file.max-files - The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - - [default: 5] - - --log.journald - Write logs to journald - - --log.journald.filter - The filter to use for logs written to journald - - [default: error] - - --color - Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - - [default: always] - - Possible values: - - always: Colors on - - auto: Colors on - - never: Colors off - -Display: - -v, --verbosity... - Set the minimum log level. - - -v Errors - -vv Warnings - -vvv Info - -vvvv Debug - -vvvvv Traces (warning: very verbose!) - - -q, --quiet - Silence all log output -``` diff --git a/book/cli/reth/db/stats.md b/book/cli/reth/db/stats.md index 76b6c90ae9..2ed087ae1c 100644 --- a/book/cli/reth/db/stats.md +++ b/book/cli/reth/db/stats.md @@ -4,6 +4,8 @@ Lists all the tables, their entry count and their size ```bash $ reth db stats --help +``` +```txt Usage: reth db stats [OPTIONS] Options: diff --git a/book/cli/reth/db/version.md b/book/cli/reth/db/version.md index 131dd6dc23..549f970917 100644 --- a/book/cli/reth/db/version.md +++ b/book/cli/reth/db/version.md @@ -4,6 +4,8 @@ Lists current and local database versions ```bash $ reth db version --help +``` +```txt Usage: reth db version [OPTIONS] Options: diff --git a/book/cli/reth/debug.md b/book/cli/reth/debug.md index d61094834d..ab016d631d 100644 --- a/book/cli/reth/debug.md +++ b/book/cli/reth/debug.md @@ -4,6 +4,8 @@ Various debug routines ```bash $ reth debug --help +``` +```txt Usage: reth debug [OPTIONS] Commands: diff --git a/book/cli/reth/debug/build-block.md b/book/cli/reth/debug/build-block.md index e210303893..76ddac306c 100644 --- a/book/cli/reth/debug/build-block.md +++ b/book/cli/reth/debug/build-block.md @@ -4,6 +4,8 @@ Debug block building ```bash $ reth debug build-block --help +``` +```txt Usage: reth debug build-block [OPTIONS] --prev-randao --timestamp --suggested-fee-recipient Options: @@ -33,7 +35,7 @@ Datadir: [default: default] - --datadir.static_files + --datadir.static-files The absolute path to store static files in. --config diff --git a/book/cli/reth/debug/execution.md b/book/cli/reth/debug/execution.md index a6475535e8..93864e2d20 100644 --- a/book/cli/reth/debug/execution.md +++ b/book/cli/reth/debug/execution.md @@ -4,6 +4,8 @@ Debug the roundtrip execution of blocks as well as the generated data ```bash $ reth debug execution --help +``` +```txt Usage: reth debug execution [OPTIONS] --to Options: @@ -33,7 +35,7 @@ Datadir: [default: default] - --datadir.static_files + --datadir.static-files The absolute path to store static files in. --config @@ -221,6 +223,16 @@ Networking: [default: 131072] + --max-tx-pending-fetch + Max capacity of cache of hashes for transactions pending fetch. + + [default: 25600] + + --net-if.experimental + Name of network interface used to communicate with peers. + + If flag is set, but no value is passed, the default interface for docker `eth0` is tried. + --to The maximum block height diff --git a/book/cli/reth/debug/in-memory-merkle.md b/book/cli/reth/debug/in-memory-merkle.md index 2c9b4b4c67..333048bde6 100644 --- a/book/cli/reth/debug/in-memory-merkle.md +++ b/book/cli/reth/debug/in-memory-merkle.md @@ -4,6 +4,8 @@ Debug in-memory state root calculation ```bash $ reth debug in-memory-merkle --help +``` +```txt Usage: reth debug in-memory-merkle [OPTIONS] Options: @@ -33,7 +35,7 @@ Datadir: [default: default] - --datadir.static_files + --datadir.static-files The absolute path to store static files in. --config @@ -221,6 +223,16 @@ Networking: [default: 131072] + --max-tx-pending-fetch + Max capacity of cache of hashes for transactions pending fetch. + + [default: 25600] + + --net-if.experimental + Name of network interface used to communicate with peers. + + If flag is set, but no value is passed, the default interface for docker `eth0` is tried. + --retries The number of retries per request diff --git a/book/cli/reth/debug/merkle.md b/book/cli/reth/debug/merkle.md index be683bca20..4249d3d08c 100644 --- a/book/cli/reth/debug/merkle.md +++ b/book/cli/reth/debug/merkle.md @@ -4,6 +4,8 @@ Debug the clean & incremental state root calculations ```bash $ reth debug merkle --help +``` +```txt Usage: reth debug merkle [OPTIONS] --to Options: @@ -33,7 +35,7 @@ Datadir: [default: default] - --datadir.static_files + --datadir.static-files The absolute path to store static files in. --config @@ -221,6 +223,16 @@ Networking: [default: 131072] + --max-tx-pending-fetch + Max capacity of cache of hashes for transactions pending fetch. + + [default: 25600] + + --net-if.experimental + Name of network interface used to communicate with peers. + + If flag is set, but no value is passed, the default interface for docker `eth0` is tried. + --retries The number of retries per request diff --git a/book/cli/reth/debug/replay-engine.md b/book/cli/reth/debug/replay-engine.md index bf9b63896b..6fe2434af0 100644 --- a/book/cli/reth/debug/replay-engine.md +++ b/book/cli/reth/debug/replay-engine.md @@ -4,6 +4,8 @@ Debug engine API by replaying stored messages ```bash $ reth debug replay-engine --help +``` +```txt Usage: reth debug replay-engine [OPTIONS] --engine-api-store Options: @@ -33,7 +35,7 @@ Datadir: [default: default] - --datadir.static_files + --datadir.static-files The absolute path to store static files in. --config @@ -221,6 +223,16 @@ Networking: [default: 131072] + --max-tx-pending-fetch + Max capacity of cache of hashes for transactions pending fetch. + + [default: 25600] + + --net-if.experimental + Name of network interface used to communicate with peers. + + If flag is set, but no value is passed, the default interface for docker `eth0` is tried. + --engine-api-store The path to read engine API messages from diff --git a/book/cli/reth/dump-genesis.md b/book/cli/reth/dump-genesis.md index 7197be305f..0a45ffb328 100644 --- a/book/cli/reth/dump-genesis.md +++ b/book/cli/reth/dump-genesis.md @@ -4,6 +4,8 @@ Dumps genesis block JSON configuration to stdout ```bash $ reth dump-genesis --help +``` +```txt Usage: reth dump-genesis [OPTIONS] Options: diff --git a/book/cli/reth/import.md b/book/cli/reth/import.md index 29a67f1817..7bd8a0079e 100644 --- a/book/cli/reth/import.md +++ b/book/cli/reth/import.md @@ -4,6 +4,8 @@ This syncs RLP encoded blocks from a file ```bash $ reth import --help +``` +```txt Usage: reth import [OPTIONS] Options: @@ -33,7 +35,7 @@ Datadir: [default: default] - --datadir.static_files + --datadir.static-files The absolute path to store static files in. --config diff --git a/book/cli/reth/init-state.md b/book/cli/reth/init-state.md index d947baec37..cb221634c4 100644 --- a/book/cli/reth/init-state.md +++ b/book/cli/reth/init-state.md @@ -4,6 +4,8 @@ Initialize the database from a state dump file ```bash $ reth init-state --help +``` +```txt Usage: reth init-state [OPTIONS] Options: @@ -33,7 +35,7 @@ Datadir: [default: default] - --datadir.static_files + --datadir.static-files The absolute path to store static files in. --config diff --git a/book/cli/reth/init.md b/book/cli/reth/init.md index 5eb9d4d03b..cc889e5e35 100644 --- a/book/cli/reth/init.md +++ b/book/cli/reth/init.md @@ -4,6 +4,8 @@ Initialize the database from a genesis file ```bash $ reth init --help +``` +```txt Usage: reth init [OPTIONS] Options: @@ -33,7 +35,7 @@ Datadir: [default: default] - --datadir.static_files + --datadir.static-files The absolute path to store static files in. --config diff --git a/book/cli/reth/node.md b/book/cli/reth/node.md index 9217dc14fa..3ae086d38f 100644 --- a/book/cli/reth/node.md +++ b/book/cli/reth/node.md @@ -4,6 +4,8 @@ Start the node ```bash $ reth node --help +``` +```txt Usage: reth node [OPTIONS] Options: @@ -56,7 +58,7 @@ Datadir: [default: default] - --datadir.static_files + --datadir.static-files The absolute path to store static files in. Networking: @@ -213,6 +215,16 @@ Networking: [default: 131072] + --max-tx-pending-fetch + Max capacity of cache of hashes for transactions pending fetch. + + [default: 25600] + + --net-if.experimental + Name of network interface used to communicate with peers. + + If flag is set, but no value is passed, the default interface for docker `eth0` is tried. + RPC: --http Enable the HTTP-RPC server @@ -335,6 +347,11 @@ RPC: [default: 50000000] + --rpc.max-simulate-blocks + Maximum number of blocks for `eth_simulateV1` call + + [default: 256] + --rpc.eth-proof-window The maximum proof window for historical proof generation. This value allows for generating historical proofs up to configured number of blocks from current tip (up to `tip - window`) @@ -428,6 +445,16 @@ TxPool: [default: 10] + --txpool.minimal-protocol-fee + Minimum base fee required by the protocol + + [default: 7] + + --txpool.gas-limit + The default enforced gas limit for transactions entering the pool + + [default: 30000000] + --blobpool.pricebump Price bump percentage to replace an already existing blob transaction @@ -528,6 +555,17 @@ Debug: --debug.engine-api-store The path to store engine API messages at. If specified, all of the intercepted engine API messages will be written to specified location + --debug.invalid-block-hook + Determines which type of invalid block hook to install + + Example: `witness,prestate` + + [default: witness] + [possible values: witness, pre-state, opcode] + + --debug.healthy-node-rpc-url + The RPC URL of a healthy node to use for comparing invalid block hook results against. + Database: --db.log-level Database logging level. Levels higher than "notice" require a debug build @@ -568,12 +606,75 @@ Dev testnet: Pruning: --full - Run full node. Only the most recent [`MINIMUM_PRUNING_DISTANCE`] block states are stored. This flag takes priority over pruning configuration in reth.toml + Run full node. Only the most recent [`MINIMUM_PRUNING_DISTANCE`] block states are stored + + --block-interval + Minimum pruning interval measured in blocks + + [default: 0] + + --prune.senderrecovery.full + Prunes all sender recovery data + + --prune.senderrecovery.distance + Prune sender recovery data before the `head-N` block number. In other words, keep last N + 1 blocks + + --prune.senderrecovery.before + Prune sender recovery data before the specified block number. The specified block number is not pruned + + --prune.transactionlookup.full + Prunes all transaction lookup data + + --prune.transactionlookup.distance + Prune transaction lookup data before the `head-N` block number. In other words, keep last N + 1 blocks + + --prune.transactionlookup.before + Prune transaction lookup data before the specified block number. The specified block number is not pruned + + --prune.receipts.full + Prunes all receipt data + + --prune.receipts.distance + Prune receipts before the `head-N` block number. In other words, keep last N + 1 blocks + + --prune.receipts.before + Prune receipts before the specified block number. The specified block number is not pruned + + --prune.accounthistory.full + Prunes all account history + + --prune.accounthistory.distance + Prune account before the `head-N` block number. In other words, keep last N + 1 blocks + + --prune.accounthistory.before + Prune account history before the specified block number. The specified block number is not pruned + + --prune.storagehistory.full + Prunes all storage history data + + --prune.storagehistory.distance + Prune storage history before the `head-N` block number. In other words, keep last N + 1 blocks + + --prune.storagehistory.before + Prune storage history before the specified block number. The specified block number is not pruned + + --prune.receiptslogfilter + Configure receipts log filter. Format: <`address`>:<`prune_mode`>[,<`address`>:<`prune_mode`>...] Where <`prune_mode`> can be 'full', 'distance:<`blocks`>', or 'before:<`block_number`>' Engine: --engine.experimental Enable the engine2 experimental features on reth binary + --engine.persistence-threshold + Configure persistence threshold for engine experimental + + [default: 2] + + --engine.memory-block-buffer-target + Configure the target number of blocks to keep in memory + + [default: 2] + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/book/cli/reth/p2p.md b/book/cli/reth/p2p.md index 42d6c8415b..c3ab3f103f 100644 --- a/book/cli/reth/p2p.md +++ b/book/cli/reth/p2p.md @@ -4,6 +4,8 @@ P2P Debugging utilities ```bash $ reth p2p --help +``` +```txt Usage: reth p2p [OPTIONS] Commands: @@ -198,6 +200,16 @@ Networking: [default: 131072] + --max-tx-pending-fetch + Max capacity of cache of hashes for transactions pending fetch. + + [default: 25600] + + --net-if.experimental + Name of network interface used to communicate with peers. + + If flag is set, but no value is passed, the default interface for docker `eth0` is tried. + Datadir: --datadir The path to the data dir for all reth files and subdirectories. @@ -210,7 +222,7 @@ Datadir: [default: default] - --datadir.static_files + --datadir.static-files The absolute path to store static files in. Database: diff --git a/book/cli/reth/p2p/body.md b/book/cli/reth/p2p/body.md index 3b6c6b1622..cb1a96d63d 100644 --- a/book/cli/reth/p2p/body.md +++ b/book/cli/reth/p2p/body.md @@ -4,6 +4,8 @@ Download block body ```bash $ reth p2p body --help +``` +```txt Usage: reth p2p body [OPTIONS] Arguments: diff --git a/book/cli/reth/p2p/header.md b/book/cli/reth/p2p/header.md index c00b81ddbf..69557c523f 100644 --- a/book/cli/reth/p2p/header.md +++ b/book/cli/reth/p2p/header.md @@ -4,6 +4,8 @@ Download block header ```bash $ reth p2p header --help +``` +```txt Usage: reth p2p header [OPTIONS] Arguments: diff --git a/book/cli/reth/p2p/rlpx.md b/book/cli/reth/p2p/rlpx.md index dd73e437aa..71a164c643 100644 --- a/book/cli/reth/p2p/rlpx.md +++ b/book/cli/reth/p2p/rlpx.md @@ -4,6 +4,8 @@ RLPx commands ```bash $ reth p2p rlpx --help +``` +```txt Usage: reth p2p rlpx [OPTIONS] Commands: diff --git a/book/cli/reth/p2p/rlpx/ping.md b/book/cli/reth/p2p/rlpx/ping.md index 222b57735b..a7cef4bd33 100644 --- a/book/cli/reth/p2p/rlpx/ping.md +++ b/book/cli/reth/p2p/rlpx/ping.md @@ -4,6 +4,8 @@ ping node ```bash $ reth p2p rlpx ping --help +``` +```txt Usage: reth p2p rlpx ping [OPTIONS] Arguments: diff --git a/book/cli/reth/prune.md b/book/cli/reth/prune.md index 0b3e701f6b..e0641256f1 100644 --- a/book/cli/reth/prune.md +++ b/book/cli/reth/prune.md @@ -4,6 +4,8 @@ Prune according to the configuration without any limits ```bash $ reth prune --help +``` +```txt Usage: reth prune [OPTIONS] Options: @@ -33,7 +35,7 @@ Datadir: [default: default] - --datadir.static_files + --datadir.static-files The absolute path to store static files in. --config diff --git a/book/cli/reth/recover.md b/book/cli/reth/recover.md index 4fe28211db..dcb9c3c73c 100644 --- a/book/cli/reth/recover.md +++ b/book/cli/reth/recover.md @@ -4,6 +4,8 @@ Scripts for node recovery ```bash $ reth recover --help +``` +```txt Usage: reth recover [OPTIONS] Commands: diff --git a/book/cli/reth/recover/storage-tries.md b/book/cli/reth/recover/storage-tries.md index d5df358a71..1f639cb095 100644 --- a/book/cli/reth/recover/storage-tries.md +++ b/book/cli/reth/recover/storage-tries.md @@ -4,6 +4,8 @@ Recover the node by deleting dangling storage tries ```bash $ reth recover storage-tries --help +``` +```txt Usage: reth recover storage-tries [OPTIONS] Options: @@ -33,7 +35,7 @@ Datadir: [default: default] - --datadir.static_files + --datadir.static-files The absolute path to store static files in. --config diff --git a/book/cli/reth/stage.md b/book/cli/reth/stage.md index c9ff302c1a..928ee0639c 100644 --- a/book/cli/reth/stage.md +++ b/book/cli/reth/stage.md @@ -4,6 +4,8 @@ Manipulate individual stages ```bash $ reth stage --help +``` +```txt Usage: reth stage [OPTIONS] Commands: diff --git a/book/cli/reth/stage/drop.md b/book/cli/reth/stage/drop.md index b700519e1a..ae21a89183 100644 --- a/book/cli/reth/stage/drop.md +++ b/book/cli/reth/stage/drop.md @@ -4,6 +4,8 @@ Drop a stage's tables from the database ```bash $ reth stage drop --help +``` +```txt Usage: reth stage drop [OPTIONS] Options: @@ -33,7 +35,7 @@ Datadir: [default: default] - --datadir.static_files + --datadir.static-files The absolute path to store static files in. --config diff --git a/book/cli/reth/stage/dump.md b/book/cli/reth/stage/dump.md index a5fd3052c0..291d896902 100644 --- a/book/cli/reth/stage/dump.md +++ b/book/cli/reth/stage/dump.md @@ -4,6 +4,8 @@ Dumps a stage from a range into a new database ```bash $ reth stage dump --help +``` +```txt Usage: reth stage dump [OPTIONS] Commands: @@ -40,7 +42,7 @@ Datadir: [default: default] - --datadir.static_files + --datadir.static-files The absolute path to store static files in. --config diff --git a/book/cli/reth/stage/dump/account-hashing.md b/book/cli/reth/stage/dump/account-hashing.md index 4c5030a55b..46145ed8d4 100644 --- a/book/cli/reth/stage/dump/account-hashing.md +++ b/book/cli/reth/stage/dump/account-hashing.md @@ -4,6 +4,8 @@ ```bash $ reth stage dump account-hashing --help +``` +```txt Usage: reth stage dump account-hashing [OPTIONS] --output-datadir --from --to Options: diff --git a/book/cli/reth/stage/dump/execution.md b/book/cli/reth/stage/dump/execution.md index 420602f8a6..836265a6f8 100644 --- a/book/cli/reth/stage/dump/execution.md +++ b/book/cli/reth/stage/dump/execution.md @@ -4,6 +4,8 @@ Execution stage ```bash $ reth stage dump execution --help +``` +```txt Usage: reth stage dump execution [OPTIONS] --output-datadir --from --to Options: diff --git a/book/cli/reth/stage/dump/merkle.md b/book/cli/reth/stage/dump/merkle.md index aa32e5e32c..90fa0f0f52 100644 --- a/book/cli/reth/stage/dump/merkle.md +++ b/book/cli/reth/stage/dump/merkle.md @@ -4,6 +4,8 @@ Merkle stage ```bash $ reth stage dump merkle --help +``` +```txt Usage: reth stage dump merkle [OPTIONS] --output-datadir --from --to Options: diff --git a/book/cli/reth/stage/dump/storage-hashing.md b/book/cli/reth/stage/dump/storage-hashing.md index 5ef1483de2..2b078570f5 100644 --- a/book/cli/reth/stage/dump/storage-hashing.md +++ b/book/cli/reth/stage/dump/storage-hashing.md @@ -4,6 +4,8 @@ ```bash $ reth stage dump storage-hashing --help +``` +```txt Usage: reth stage dump storage-hashing [OPTIONS] --output-datadir --from --to Options: diff --git a/book/cli/reth/stage/run.md b/book/cli/reth/stage/run.md index 1f42dba7b2..3a5589e36e 100644 --- a/book/cli/reth/stage/run.md +++ b/book/cli/reth/stage/run.md @@ -4,6 +4,8 @@ Run a single stage. ```bash $ reth stage run --help +``` +```txt Usage: reth stage run [OPTIONS] --from --to Options: @@ -33,7 +35,7 @@ Datadir: [default: default] - --datadir.static_files + --datadir.static-files The absolute path to store static files in. --config @@ -264,6 +266,16 @@ Networking: [default: 131072] + --max-tx-pending-fetch + Max capacity of cache of hashes for transactions pending fetch. + + [default: 25600] + + --net-if.experimental + Name of network interface used to communicate with peers. + + If flag is set, but no value is passed, the default interface for docker `eth0` is tried. + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/book/cli/reth/stage/unwind.md b/book/cli/reth/stage/unwind.md index a88883e11b..122b8dc48c 100644 --- a/book/cli/reth/stage/unwind.md +++ b/book/cli/reth/stage/unwind.md @@ -4,6 +4,8 @@ Unwinds a certain block range, deleting it from the database ```bash $ reth stage unwind --help +``` +```txt Usage: reth stage unwind [OPTIONS] Commands: @@ -38,7 +40,7 @@ Datadir: [default: default] - --datadir.static_files + --datadir.static-files The absolute path to store static files in. --config @@ -226,6 +228,16 @@ Networking: [default: 131072] + --max-tx-pending-fetch + Max capacity of cache of hashes for transactions pending fetch. + + [default: 25600] + + --net-if.experimental + Name of network interface used to communicate with peers. + + If flag is set, but no value is passed, the default interface for docker `eth0` is tried. + --offline If this is enabled, then all stages except headers, bodies, and sender recovery will be unwound diff --git a/book/cli/reth/stage/unwind/num-blocks.md b/book/cli/reth/stage/unwind/num-blocks.md index 2974b0841d..c74f75556f 100644 --- a/book/cli/reth/stage/unwind/num-blocks.md +++ b/book/cli/reth/stage/unwind/num-blocks.md @@ -4,6 +4,8 @@ Unwinds the database from the latest block, until the given number of blocks hav ```bash $ reth stage unwind num-blocks --help +``` +```txt Usage: reth stage unwind num-blocks [OPTIONS] Arguments: diff --git a/book/cli/reth/stage/unwind/to-block.md b/book/cli/reth/stage/unwind/to-block.md index 98f8f34620..ea710b953a 100644 --- a/book/cli/reth/stage/unwind/to-block.md +++ b/book/cli/reth/stage/unwind/to-block.md @@ -4,6 +4,8 @@ Unwinds the database from the latest block, until the given block number or hash ```bash $ reth stage unwind to-block --help +``` +```txt Usage: reth stage unwind to-block [OPTIONS] Arguments: diff --git a/book/cli/update.sh b/book/cli/update.sh index bffacf2877..6e792df0f2 100755 --- a/book/cli/update.sh +++ b/book/cli/update.sh @@ -5,7 +5,7 @@ BOOK_ROOT="$(dirname "$(dirname "$0")")" RETH=${1:-"$(dirname "$BOOK_ROOT")/target/debug/reth"} cmd=( - "$(dirname "$0")/help.py" + "$(dirname "$0")/help.rs" --root-dir "$BOOK_ROOT/" --root-indentation 2 --root-summary diff --git a/book/developers/exex/hello-world.md b/book/developers/exex/hello-world.md index b15a203cdd..3c90e5a693 100644 --- a/book/developers/exex/hello-world.md +++ b/book/developers/exex/hello-world.md @@ -24,7 +24,9 @@ reth = { git = "https://github.com/paradigmxyz/reth.git" } # Reth reth-exex = { git = "https://github.com/paradigmxyz/reth.git" } # Execution Extensions reth-node-ethereum = { git = "https://github.com/paradigmxyz/reth.git" } # Ethereum Node implementation reth-tracing = { git = "https://github.com/paradigmxyz/reth.git" } # Logging + eyre = "0.6" # Easy error handling +futures-util = "0.3" # Stream utilities for consuming notifications ``` ### Default Reth node @@ -101,13 +103,14 @@ If you try running a node with an ExEx that exits, the node will exit as well. Now, let's extend our simplest ExEx and start actually listening to new notifications, log them, and send events back to the main node ```rust,norun,noplayground,ignore +use futures_util::StreamExt; use reth::api::FullNodeComponents; use reth_exex::{ExExContext, ExExEvent, ExExNotification}; use reth_node_ethereum::EthereumNode; use reth_tracing::tracing::info; async fn my_exex(mut ctx: ExExContext) -> eyre::Result<()> { - while let Some(notification) = ctx.notifications.recv().await { + while let Some(notification) = ctx.notifications.next().await { match ¬ification { ExExNotification::ChainCommitted { new } => { info!(committed_chain = ?new.range(), "Received commit"); diff --git a/book/developers/exex/remote.md b/book/developers/exex/remote.md index 2e7e7dad10..2db5074e1d 100644 --- a/book/developers/exex/remote.md +++ b/book/developers/exex/remote.md @@ -268,13 +268,15 @@ Don't forget to emit `ExExEvent::FinishedHeight` ```rust,norun,noplayground,ignore // ... + +use futures_util::StreamExt; use reth_exex::{ExExContext, ExExEvent}; async fn remote_exex( mut ctx: ExExContext, notifications: Arc>, ) -> eyre::Result<()> { - while let Some(notification) = ctx.notifications.recv().await { + while let Some(notification) = ctx.notifications.next().await { if let Some(committed_chain) = notification.committed_chain() { ctx.events .send(ExExEvent::FinishedHeight(committed_chain.tip().number))?; @@ -332,6 +334,9 @@ fn main() -> eyre::Result<()> { Click to expand ```rust,norun,noplayground,ignore +use std::sync::Arc; + +use futures_util::StreamExt; use remote_exex::proto::{ self, remote_ex_ex_server::{RemoteExEx, RemoteExExServer}, @@ -340,7 +345,6 @@ use reth::api::FullNodeComponents; use reth_exex::{ExExContext, ExExEvent, ExExNotification}; use reth_node_ethereum::EthereumNode; use reth_tracing::tracing::info; -use std::sync::Arc; use tokio::sync::{broadcast, mpsc}; use tokio_stream::wrappers::ReceiverStream; use tonic::{transport::Server, Request, Response, Status}; @@ -381,7 +385,7 @@ async fn remote_exex( mut ctx: ExExContext, notifications: Arc>, ) -> eyre::Result<()> { - while let Some(notification) = ctx.notifications.recv().await { + while let Some(notification) = ctx.notifications.next().await { if let Some(committed_chain) = notification.committed_chain() { ctx.events .send(ExExEvent::FinishedHeight(committed_chain.tip().number))?; diff --git a/book/developers/exex/tracking-state.md b/book/developers/exex/tracking-state.md index 5fe8b1c9ef..4d3bbd0a35 100644 --- a/book/developers/exex/tracking-state.md +++ b/book/developers/exex/tracking-state.md @@ -25,6 +25,7 @@ use std::{ task::{ready, Context, Poll}, }; +use futures_util::StreamExt; use reth::api::FullNodeComponents; use reth_exex::{ExExContext, ExExEvent, ExExNotification}; use reth_node_ethereum::EthereumNode; @@ -40,7 +41,7 @@ impl Future for MyExEx { fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.get_mut(); - while let Some(notification) = ready!(this.ctx.notifications.poll_recv(cx)) { + while let Some(notification) = ready!(this.ctx.notifications.poll_next_unpin(cx)) { match ¬ification { ExExNotification::ChainCommitted { new } => { info!(committed_chain = ?new.range(), "Received commit"); @@ -101,6 +102,7 @@ use std::{ task::{ready, Context, Poll}, }; +use futures_util::StreamExt; use reth::{api::FullNodeComponents, primitives::BlockNumber}; use reth_exex::{ExExContext, ExExEvent}; use reth_node_ethereum::EthereumNode; @@ -130,7 +132,7 @@ impl Future for MyExEx { fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.get_mut(); - while let Some(notification) = ready!(this.ctx.notifications.poll_recv(cx)) { + while let Some(notification) = ready!(this.ctx.notifications.poll_next_unpin(cx)) { if let Some(reverted_chain) = notification.reverted_chain() { this.transactions = this.transactions.saturating_sub( reverted_chain diff --git a/book/run/config.md b/book/run/config.md index f2da90ea4f..10fd40ca76 100644 --- a/book/run/config.md +++ b/book/run/config.md @@ -352,7 +352,7 @@ i.e. keep the data for the last `100_000` blocks # Minimum pruning interval measured in blocks block_interval = 5 -[prune.parts] +[prune.segments] # Sender Recovery pruning configuration sender_recovery = { distance = 100_000 } # Prune all transaction senders before the block `head-100000`, i.e. keep transaction senders for the last 100001 blocks @@ -373,7 +373,7 @@ We can also prune receipts more granular, using the logs filtering: ```toml # Receipts pruning configuration by retaining only those receipts that contain logs emitted # by the specified addresses, discarding all others. This setting is overridden by `receipts`. -[prune.parts.receipts_log_filter] +[prune.segments.receipts_log_filter] # Prune all receipts, leaving only those which: # - Contain logs from address `0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48`, starting from the block 17000000 # - Contain logs from address `0xdac17f958d2ee523a2206206994597c13d831ec7` in the last 1001 blocks diff --git a/book/run/optimism.md b/book/run/optimism.md index 4c2f09ef19..ed71d1e0d9 100644 --- a/book/run/optimism.md +++ b/book/run/optimism.md @@ -42,20 +42,6 @@ Next, you'll need to install a [Rollup Node][rollup-node-spec], which is the equ For the sake of this tutorial, we'll use the reference implementation of the Rollup Node maintained by OP Labs, the `op-node`. The `op-node` can be built from source, or pulled from a [Docker image available on Google Cloud][op-node-docker]. -**`rethdb` build tag** -The `op-node` also comes with an experimental `rethdb` build tag, which allows it to read receipts directly from an L1 `reth` database during [derivation][derivation-spec]. This can speed up sync times, but it is not required if you do not -have access to the L1 archive node on the same machine as your L2 node. - -To build the `op-node` with the `rethdb` build tag enabled: -```sh -git clone https://github.com/ethereum-optimism/optimism.git && \ - (cd optimism/op-service/rethdb-reader && cargo build --release) && \ - cd optimism/op-node && \ - go build -v -tags rethdb -o ./bin/op-node ./cmd/main.go && \ - mv bin/op-node /usr/bin/op-node -``` -This will build the `rethdb-reader` dylib and instruct the `op-node` build to statically link this dylib into the binary. The `op-node` binary will be installed to `/usr/bin/op-node`. - ### Running `op-reth` The `optimism` feature flag in `op-reth` adds several new CLI flags to the `reth` binary: @@ -91,17 +77,6 @@ op-node \ Consider adding the `--l1.trustrpc` flag to improve performance, if the connection to l1 is over localhost. -If you opted to build the `op-node` with the `rethdb` build tag, this feature can be enabled by appending one extra flag to the `op-node` invocation: - -> Note, the `reth_db_path` is the path to the `db` folder inside of the reth datadir, not the `mdbx.dat` file itself. This can be fetched from `op-reth db path [--chain ]`, or if you are using a custom datadir location via the `--datadir` flag, -> by appending `/db` to the end of the path. - -```sh -op-node \ - # ... - --l1.rethdb= -``` - [l1-el-spec]: https://github.com/ethereum/execution-specs [rollup-node-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/rollup-node.md [op-geth-forkdiff]: https://op-geth.optimism.io diff --git a/book/run/pruning.md b/book/run/pruning.md index b6f23f5445..da3bb07e2c 100644 --- a/book/run/pruning.md +++ b/book/run/pruning.md @@ -80,14 +80,14 @@ Essentially, the full node is the same as following configuration for the pruned [prune] block_interval = 5 -[prune.parts] +[prune.segments] sender_recovery = "full" # transaction_lookup is not pruned receipts = { before = 11052984 } # Beacon Deposit Contract deployment block: https://etherscan.io/tx/0xe75fb554e433e03763a1560646ee22dcb74e5274b34c5ad644e7c0f619a7e1d0 account_history = { distance = 10_064 } storage_history = { distance = 10_064 } -[prune.parts.receipts_log_filter] +[prune.segments.receipts_log_filter] # Prune all receipts, leaving only those which contain logs from address `0x00000000219ab540356cbb839cbe05303d7705fa`, # starting from the block 11052984. This leaves receipts with the logs from the Beacon Deposit Contract. "0x00000000219ab540356cbb839cbe05303d7705fa" = { before = 11052984 } diff --git a/book/run/sync-op-mainnet.md b/book/run/sync-op-mainnet.md index 63f45fe331..d8d7becc18 100644 --- a/book/run/sync-op-mainnet.md +++ b/book/run/sync-op-mainnet.md @@ -1,5 +1,25 @@ # Sync OP Mainnet +To sync OP mainnet, bedrock state needs to be imported as a starting point. There are currently two ways: + +* Minimal bootstrap: only state snapshot at Bedrock block is imported without any OVM historical data. +* Full bootstrap: state, blocks and receipts are imported. + +## Minimal bootstrap + +**The state snapshot at Bedrock block is required.** It can be exported from [op-geth](https://github.com/testinprod-io/op-erigon/blob/pcw109550/bedrock-db-migration/bedrock-migration.md#export-state) (**.jsonl**) or downloaded directly from [here](https://mega.nz/file/GdZ1xbAT#a9cBv3AqzsTGXYgX7nZc_3fl--tcBmOAIwIA5ND6kwc). + +```sh +$ op-reth init-state --without-ovm --chain optimism --datadir op-mainnet world_trie_state.jsonl + +$ op-reth node --chain optimism --datadir op-mainnet --debug.tip 0x098f87b75c8b861c775984f9d5dbe7b70cbbbc30fc15adb03a5044de0144f2d0 # block #125200000 +``` + + +## Full bootstrap + +### Import state + To sync OP mainnet, the Bedrock datadir needs to be imported to use as starting point. Blocks lower than the OP mainnet Bedrock fork, are built on the OVM and cannot be executed on the EVM. For this reason, the chain segment from genesis until Bedrock, must be manually imported to circumvent @@ -10,7 +30,7 @@ Importing OP mainnet Bedrock datadir requires exported data: - Blocks [and receipts] below Bedrock - State snapshot at first Bedrock block -## Manual Export Steps +### Manual Export Steps The `op-geth` Bedrock datadir can be downloaded from . @@ -18,9 +38,9 @@ To export the OVM chain from `op-geth`, clone the `testinprod-io/op-geth` repo a . Commands to export blocks, receipts and state dump can be found in `op-geth/migrate.sh`. -## Manual Import Steps +### Manual Import Steps -### 1. Import Blocks +#### 1. Import Blocks Imports a `.rlp` file of blocks. @@ -30,7 +50,7 @@ Import of >100 million OVM blocks, from genesis to Bedrock, completes in 45 minu $ op-reth import-op ``` -### 2. Import Receipts +#### 2. Import Receipts This step is optional. To run a full node, skip this step. If however receipts are to be imported, the corresponding transactions must already be imported (see [step 1](#1-import-blocks)). @@ -44,7 +64,7 @@ Import of >100 million OVM receipts, from genesis to Bedrock, completes in 30 mi $ op-reth import-receipts-op ``` -### 3. Import State +#### 3. Import State Imports a `.jsonl` state dump. The block at which the state dump is made, must be the latest block in reth's database. This should be block 105 235 063, the first Bedrock block (see [step 1](#1-import-blocks)). @@ -61,4 +81,4 @@ Running the node with `--debug.tip `syncs the node without help from block hash can be taken from the latest block on . Use `op-node` to track the tip. Start `op-node` with `--syncmode=execution-layer` and `--l2.enginekind=reth`. If `op-node`'s RPC -connection to L1 is over localhost, `--l1.trustrpc` can be set to improve performance. \ No newline at end of file +connection to L1 is over localhost, `--l1.trustrpc` can be set to improve performance. diff --git a/bsc.Dockerfile b/bsc.Dockerfile index 8dd8951a91..04d559f94b 100644 --- a/bsc.Dockerfile +++ b/bsc.Dockerfile @@ -1,4 +1,4 @@ -FROM lukemathwalker/cargo-chef:latest-rust-1.80 AS chef +FROM lukemathwalker/cargo-chef:latest-rust-1.81 AS chef WORKDIR /app LABEL org.opencontainers.image.source=https://github.com/bnb-chain/reth diff --git a/clippy.toml b/clippy.toml index a6afa98d99..b498158094 100644 --- a/clippy.toml +++ b/clippy.toml @@ -1,2 +1,3 @@ -msrv = "1.80" +msrv = "1.81" too-large-for-stack = 128 +doc-valid-idents = ["P2P", "ExEx", "ExExes", "IPv4", "IPv6", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"] diff --git a/crates/blockchain-tree-api/Cargo.toml b/crates/blockchain-tree-api/Cargo.toml index 69616209dd..552b727671 100644 --- a/crates/blockchain-tree-api/Cargo.toml +++ b/crates/blockchain-tree-api/Cargo.toml @@ -16,5 +16,8 @@ reth-execution-errors.workspace = true reth-primitives.workspace = true reth-storage-errors.workspace = true +# alloy +alloy-primitives.workspace = true + # misc thiserror.workspace = true diff --git a/crates/blockchain-tree-api/src/error.rs b/crates/blockchain-tree-api/src/error.rs index 5ef146b087..5dbd4cf179 100644 --- a/crates/blockchain-tree-api/src/error.rs +++ b/crates/blockchain-tree-api/src/error.rs @@ -1,10 +1,11 @@ //! Error handling for the blockchain tree +use alloy_primitives::{BlockHash, BlockNumber}; use reth_consensus::ConsensusError; use reth_execution_errors::{ BlockExecutionError, BlockValidationError, InternalBlockExecutionError, }; -use reth_primitives::{BlockHash, BlockNumber, SealedBlock}; +use reth_primitives::SealedBlock; pub use reth_storage_errors::provider::ProviderError; /// Various error cases that can occur when a block violates tree assumptions. diff --git a/crates/blockchain-tree-api/src/lib.rs b/crates/blockchain-tree-api/src/lib.rs index b8fb06e055..0a1bf6164e 100644 --- a/crates/blockchain-tree-api/src/lib.rs +++ b/crates/blockchain-tree-api/src/lib.rs @@ -9,10 +9,8 @@ use self::error::CanonicalError; use crate::error::InsertBlockError; -use reth_primitives::{ - BlockHash, BlockNumHash, BlockNumber, Receipt, SealedBlock, SealedBlockWithSenders, - SealedHeader, -}; +use alloy_primitives::{BlockHash, BlockNumber}; +use reth_primitives::{BlockNumHash, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader}; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::collections::BTreeMap; diff --git a/crates/blockchain-tree/Cargo.toml b/crates/blockchain-tree/Cargo.toml index 8a69ed3ad9..e9dd537e73 100644 --- a/crates/blockchain-tree/Cargo.toml +++ b/crates/blockchain-tree/Cargo.toml @@ -22,7 +22,6 @@ reth-evm.workspace = true reth-revm.workspace = true reth-provider.workspace = true reth-execution-types.workspace = true -reth-prune-types.workspace = true reth-stages-api.workspace = true reth-trie = { workspace = true, features = ["metrics"] } reth-trie-db = { workspace = true, features = ["metrics"] } @@ -30,6 +29,10 @@ reth-trie-parallel = { workspace = true, features = ["parallel"] } reth-network.workspace = true reth-consensus.workspace = true reth-trie-prefetch.workspace = true +reth-node-types.workspace = true + +# ethereum +alloy-primitives.workspace = true # common parking_lot.workspace = true diff --git a/crates/blockchain-tree/src/block_buffer.rs b/crates/blockchain-tree/src/block_buffer.rs index dd1b00eb90..99729af0fa 100644 --- a/crates/blockchain-tree/src/block_buffer.rs +++ b/crates/blockchain-tree/src/block_buffer.rs @@ -1,6 +1,7 @@ use crate::metrics::BlockBufferMetrics; +use alloy_primitives::{BlockHash, BlockNumber}; use reth_network::cache::LruCache; -use reth_primitives::{BlockHash, BlockNumber, SealedBlockWithSenders}; +use reth_primitives::SealedBlockWithSenders; use std::collections::{btree_map, hash_map, BTreeMap, HashMap, HashSet}; /// Contains the tree of pending blocks that cannot be executed due to missing parent. @@ -182,7 +183,8 @@ impl BlockBuffer { #[cfg(test)] mod tests { use crate::BlockBuffer; - use reth_primitives::{BlockHash, BlockNumHash, SealedBlockWithSenders}; + use alloy_primitives::BlockHash; + use reth_primitives::{BlockNumHash, SealedBlockWithSenders}; use reth_testing_utils::generators::{self, random_block, BlockParams, Rng}; use std::collections::HashMap; diff --git a/crates/blockchain-tree/src/block_indices.rs b/crates/blockchain-tree/src/block_indices.rs index 942963c122..fb132bdedc 100644 --- a/crates/blockchain-tree/src/block_indices.rs +++ b/crates/blockchain-tree/src/block_indices.rs @@ -2,9 +2,10 @@ use super::state::SidechainId; use crate::canonical_chain::CanonicalChain; +use alloy_primitives::{BlockHash, BlockNumber}; use linked_hash_set::LinkedHashSet; use reth_execution_types::Chain; -use reth_primitives::{BlockHash, BlockNumHash, BlockNumber, SealedBlockWithSenders}; +use reth_primitives::{BlockNumHash, SealedBlockWithSenders}; use std::collections::{btree_map, hash_map, BTreeMap, BTreeSet, HashMap, HashSet}; /// Internal indices of the blocks and chains. @@ -178,7 +179,7 @@ impl BlockIndices { if new_block_value.1 != old_block_value.1 { // remove block hash as it is different removed.push(old_block_value); - added.push(new_block_value.into()); + added.push(new_block_value.into()) } new_hash = new_hashes.next(); old_hash = old_hashes.next(); @@ -371,3 +372,247 @@ impl BlockIndices { &self.canonical_chain } } + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::B256; + use reth_primitives::{Header, SealedBlock, SealedHeader}; + + #[test] + fn pending_block_num_hash_returns_none_if_no_fork() { + // Create a new canonical chain with a single block (represented by its number and hash). + let canonical_chain = BTreeMap::from([(0, B256::from_slice(&[1; 32]))]); + + let block_indices = BlockIndices::new(0, canonical_chain); + + // No fork to child blocks, so there is no pending block. + assert_eq!(block_indices.pending_block_num_hash(), None); + } + + #[test] + fn pending_block_num_hash_works() { + // Create a canonical chain with multiple blocks at heights 1, 2, and 3. + let canonical_chain = BTreeMap::from([ + (1, B256::from_slice(&[1; 32])), + (2, B256::from_slice(&[2; 32])), + (3, B256::from_slice(&[3; 32])), + ]); + + let mut block_indices = BlockIndices::new(3, canonical_chain); + + // Define the hash of the parent block (the block at height 3 in the canonical chain). + let parent_hash = B256::from_slice(&[3; 32]); + + // Define the hashes of two child blocks that extend the canonical chain. + let child_hash_1 = B256::from_slice(&[2; 32]); + let child_hash_2 = B256::from_slice(&[3; 32]); + + // Create a set to store both child block hashes. + let mut child_set = LinkedHashSet::new(); + child_set.insert(child_hash_1); + child_set.insert(child_hash_2); + + // Associate the parent block hash with its children in the fork_to_child mapping. + block_indices.fork_to_child.insert(parent_hash, child_set); + + // Pending block should be the first child block. + assert_eq!( + block_indices.pending_block_num_hash(), + Some(BlockNumHash { number: 4, hash: child_hash_1 }) + ); + } + + #[test] + fn pending_blocks_returns_empty_if_no_fork() { + // Create a canonical chain with a single block at height 10. + let canonical_chain = BTreeMap::from([(10, B256::from_slice(&[1; 32]))]); + let block_indices = BlockIndices::new(0, canonical_chain); + + // No child blocks are associated with the canonical tip. + assert_eq!(block_indices.pending_blocks(), (11, Vec::new())); + } + + #[test] + fn pending_blocks_returns_multiple_children() { + // Define the hash of the parent block (the block at height 5 in the canonical chain). + let parent_hash = B256::from_slice(&[3; 32]); + + // Create a canonical chain with a block at height 5. + let canonical_chain = BTreeMap::from([(5, parent_hash)]); + let mut block_indices = BlockIndices::new(0, canonical_chain); + + // Define the hashes of two child blocks. + let child_hash_1 = B256::from_slice(&[4; 32]); + let child_hash_2 = B256::from_slice(&[5; 32]); + + // Create a set to store both child block hashes. + let mut child_set = LinkedHashSet::new(); + child_set.insert(child_hash_1); + child_set.insert(child_hash_2); + + // Associate the parent block hash with its children. + block_indices.fork_to_child.insert(parent_hash, child_set); + + // Pending blocks should be the two child blocks. + assert_eq!(block_indices.pending_blocks(), (6, vec![child_hash_1, child_hash_2])); + } + + #[test] + fn pending_blocks_with_multiple_forked_chains() { + // Define hashes for parent blocks and child blocks. + let parent_hash_1 = B256::from_slice(&[6; 32]); + let parent_hash_2 = B256::from_slice(&[7; 32]); + + // Create a canonical chain with blocks at heights 1 and 2. + let canonical_chain = BTreeMap::from([(1, parent_hash_1), (2, parent_hash_2)]); + + let mut block_indices = BlockIndices::new(2, canonical_chain); + + // Define hashes for child blocks. + let child_hash_1 = B256::from_slice(&[8; 32]); + let child_hash_2 = B256::from_slice(&[9; 32]); + + // Create sets to store child blocks for each parent block. + let mut child_set_1 = LinkedHashSet::new(); + let mut child_set_2 = LinkedHashSet::new(); + child_set_1.insert(child_hash_1); + child_set_2.insert(child_hash_2); + + // Associate parent block hashes with their child blocks. + block_indices.fork_to_child.insert(parent_hash_1, child_set_1); + block_indices.fork_to_child.insert(parent_hash_2, child_set_2); + + // Check that the pending blocks are only those extending the canonical tip. + assert_eq!(block_indices.pending_blocks(), (3, vec![child_hash_2])); + } + + #[test] + fn insert_non_fork_block_adds_block_correctly() { + // Create a new BlockIndices instance with an empty state. + let mut block_indices = BlockIndices::new(0, BTreeMap::new()); + + // Define test parameters. + let block_number = 1; + let block_hash = B256::from_slice(&[1; 32]); + let chain_id = SidechainId::from(42); + + // Insert the block into the BlockIndices instance. + block_indices.insert_non_fork_block(block_number, block_hash, chain_id); + + // Check that the block number to block hashes mapping includes the new block hash. + assert_eq!( + block_indices.block_number_to_block_hashes.get(&block_number), + Some(&HashSet::from([block_hash])) + ); + + // Check that the block hash to chain ID mapping includes the new entry. + assert_eq!(block_indices.blocks_to_chain.get(&block_hash), Some(&chain_id)); + } + + #[test] + fn insert_non_fork_block_combined_tests() { + // Create a new BlockIndices instance with an empty state. + let mut block_indices = BlockIndices::new(0, BTreeMap::new()); + + // Define test parameters. + let block_number_1 = 2; + let block_hash_1 = B256::from_slice(&[1; 32]); + let block_hash_2 = B256::from_slice(&[2; 32]); + let chain_id_1 = SidechainId::from(84); + + let block_number_2 = 4; + let block_hash_3 = B256::from_slice(&[3; 32]); + let chain_id_2 = SidechainId::from(200); + + // Insert multiple hashes for the same block number. + block_indices.insert_non_fork_block(block_number_1, block_hash_1, chain_id_1); + block_indices.insert_non_fork_block(block_number_1, block_hash_2, chain_id_1); + + // Insert blocks with different numbers. + block_indices.insert_non_fork_block(block_number_2, block_hash_3, chain_id_2); + + // Block number 1 should have two block hashes associated with it. + let mut expected_hashes_for_block_1 = HashSet::new(); + expected_hashes_for_block_1.insert(block_hash_1); + expected_hashes_for_block_1.insert(block_hash_2); + assert_eq!( + block_indices.block_number_to_block_hashes.get(&block_number_1), + Some(&expected_hashes_for_block_1) + ); + + // Check that the block hashes for block_number_1 are associated with the correct chain ID. + assert_eq!(block_indices.blocks_to_chain.get(&block_hash_1), Some(&chain_id_1)); + assert_eq!(block_indices.blocks_to_chain.get(&block_hash_2), Some(&chain_id_1)); + + // Block number 2 should have a single block hash associated with it. + assert_eq!( + block_indices.block_number_to_block_hashes.get(&block_number_2), + Some(&HashSet::from([block_hash_3])) + ); + + // Block hash 3 should be associated with the correct chain ID. + assert_eq!(block_indices.blocks_to_chain.get(&block_hash_3), Some(&chain_id_2)); + } + + #[test] + fn insert_chain_validates_insertion() { + // Create a new BlockIndices instance with an empty state. + let mut block_indices = BlockIndices::new(0, BTreeMap::new()); + + // Define test parameters. + let chain_id = SidechainId::from(42); + + // Define some example blocks and their hashes. + let block_hash_1 = B256::from_slice(&[1; 32]); + let block_hash_2 = B256::from_slice(&[2; 32]); + let parent_hash = B256::from_slice(&[0; 32]); + + // Define blocks with their numbers and parent hashes. + let block_1 = SealedBlockWithSenders { + block: SealedBlock { + header: SealedHeader::new( + Header { parent_hash, number: 1, ..Default::default() }, + block_hash_1, + ), + ..Default::default() + }, + ..Default::default() + }; + let block_2 = SealedBlockWithSenders { + block: SealedBlock { + header: SealedHeader::new( + Header { parent_hash: block_hash_1, number: 2, ..Default::default() }, + block_hash_2, + ), + ..Default::default() + }, + ..Default::default() + }; + + // Define a chain containing the blocks. + let chain = Chain::new(vec![block_1, block_2], Default::default(), Default::default()); + + // Insert the chain into the BlockIndices. + block_indices.insert_chain(chain_id, &chain); + + // Check that the blocks are correctly mapped to the chain ID. + assert_eq!(block_indices.blocks_to_chain.get(&block_hash_1), Some(&chain_id)); + assert_eq!(block_indices.blocks_to_chain.get(&block_hash_2), Some(&chain_id)); + + // Check that block numbers map to their respective hashes. + let mut expected_hashes_1 = HashSet::new(); + expected_hashes_1.insert(block_hash_1); + assert_eq!(block_indices.block_number_to_block_hashes.get(&1), Some(&expected_hashes_1)); + + let mut expected_hashes_2 = HashSet::new(); + expected_hashes_2.insert(block_hash_2); + assert_eq!(block_indices.block_number_to_block_hashes.get(&2), Some(&expected_hashes_2)); + + // Check that the fork_to_child mapping contains the correct parent-child relationship. + // We take the first block of the chain. + let mut expected_children = LinkedHashSet::new(); + expected_children.insert(block_hash_1); + assert_eq!(block_indices.fork_to_child.get(&parent_hash), Some(&expected_children)); + } +} diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 493b258b78..582ac03222 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -5,25 +5,26 @@ use crate::{ state::{SidechainId, TreeState}, AppendableChain, BlockIndices, BlockchainTreeConfig, ExecutionData, TreeExternals, }; +use alloy_primitives::{BlockHash, BlockNumber, B256, U256}; use reth_blockchain_tree_api::{ error::{BlockchainTreeError, CanonicalError, InsertBlockError, InsertBlockErrorKind}, BlockAttachment, BlockStatus, BlockValidationKind, CanonicalOutcome, InsertPayloadOk, }; use reth_consensus::{Consensus, ConsensusError}; -use reth_db_api::database::Database; use reth_evm::execute::BlockExecutorProvider; use reth_execution_errors::{BlockExecutionError, BlockValidationError}; use reth_execution_types::{Chain, ExecutionOutcome}; +use reth_node_types::NodeTypesWithDB; use reth_primitives::{ - BlockHash, BlockNumHash, BlockNumber, EthereumHardfork, ForkBlock, GotExpected, Receipt, - SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, B256, U256, + BlockNumHash, EthereumHardfork, ForkBlock, GotExpected, Receipt, SealedBlock, + SealedBlockWithSenders, SealedHeader, StaticFileSegment, }; use reth_provider::{ - BlockExecutionWriter, BlockNumReader, BlockWriter, CanonStateNotification, - CanonStateNotificationSender, CanonStateNotifications, ChainSpecProvider, ChainSplit, - ChainSplitTarget, DisplayBlocksChain, HeaderProvider, ProviderError, StaticFileProviderFactory, + providers::ProviderNodeTypes, BlockExecutionWriter, BlockNumReader, BlockWriter, + CanonStateNotification, CanonStateNotificationSender, CanonStateNotifications, + ChainSpecProvider, ChainSplit, ChainSplitTarget, DisplayBlocksChain, HeaderProvider, + ProviderError, StaticFileProviderFactory, }; -use reth_prune_types::PruneModes; use reth_stages_api::{MetricEvent, MetricEventsSender}; use reth_storage_errors::provider::{ProviderResult, RootMismatch}; use reth_trie::{ @@ -61,13 +62,13 @@ use tracing::{debug, error, info, instrument, trace, warn}; /// * [`BlockchainTree::make_canonical`]: Check if we have the hash of a block that is the current /// canonical head and commit it to db. #[derive(Debug)] -pub struct BlockchainTree { +pub struct BlockchainTree { /// The state of the tree /// /// Tracks all the chains, the block indices, and the block buffer. state: TreeState, /// External components (the database, consensus engine etc.) - externals: TreeExternals, + externals: TreeExternals, /// Tree configuration config: BlockchainTreeConfig, /// Broadcast channel for canon state changes notifications. @@ -82,7 +83,7 @@ pub struct BlockchainTree { skip_state_root_validation: bool, } -impl BlockchainTree { +impl BlockchainTree { /// Subscribe to new blocks events. /// /// Note: Only canonical blocks are emitted by the tree. @@ -96,9 +97,9 @@ impl BlockchainTree { } } -impl BlockchainTree +impl BlockchainTree where - DB: Database + Clone + 'static, + N: ProviderNodeTypes, E: BlockExecutorProvider, { /// Builds the blockchain tree for the node. @@ -122,9 +123,8 @@ where /// storage space efficiently. It's important to validate this configuration to ensure it does /// not lead to unintended data loss. pub fn new( - mut externals: TreeExternals, + externals: TreeExternals, config: BlockchainTreeConfig, - prune_modes: PruneModes, ) -> ProviderResult { let max_reorg_depth = config.max_reorg_depth() as usize; // The size of the broadcast is twice the maximum reorg depth, because at maximum reorg @@ -132,9 +132,6 @@ where let (canon_state_notification_sender, _receiver) = tokio::sync::broadcast::channel(max_reorg_depth * 2); - // Set the prune modes argument, on the provider - externals.provider_factory = externals.provider_factory.with_prune_modes(prune_modes); - let last_canonical_hashes = externals.fetch_latest_canonical_hashes(config.num_of_canonical_hashes() as usize)?; @@ -209,7 +206,7 @@ where ) -> Result, InsertBlockErrorKind> { // check if block is canonical if self.is_block_hash_canonical(&block.hash)? { - return Ok(Some(BlockStatus::Valid(BlockAttachment::Canonical))) + return Ok(Some(BlockStatus::Valid(BlockAttachment::Canonical))); } let last_finalized_block = self.block_indices().last_finalized_block(); @@ -217,7 +214,7 @@ where if block.number <= last_finalized_block { // check if block is inside database if self.externals.provider_factory.provider()?.block_number(block.hash)?.is_some() { - return Ok(Some(BlockStatus::Valid(BlockAttachment::Canonical))) + return Ok(Some(BlockStatus::Valid(BlockAttachment::Canonical))); } return Err(BlockchainTreeError::PendingBlockIsFinalized { @@ -228,7 +225,7 @@ where // is block inside chain if let Some(attachment) = self.is_block_inside_sidechain(&block) { - return Ok(Some(BlockStatus::Valid(attachment))) + return Ok(Some(BlockStatus::Valid(attachment))); } // check if block is disconnected @@ -312,7 +309,7 @@ where let Some((first_pending_block_number, _)) = parent_block_hashes.first_key_value() else { debug!(target: "blockchain_tree", ?chain_id, "No block hashes stored"); - return None + return None; }; let canonical_chain = canonical_chain .iter() @@ -322,7 +319,7 @@ where // get canonical fork. let canonical_fork = self.canonical_fork(chain_id)?; - return Some(ExecutionData { execution_outcome, parent_block_hashes, canonical_fork }) + return Some(ExecutionData { execution_outcome, parent_block_hashes, canonical_fork }); } // check if there is canonical block @@ -332,7 +329,7 @@ where canonical_fork: ForkBlock { number: canonical_number, hash: block_hash }, execution_outcome: ExecutionOutcome::default(), parent_block_hashes: canonical_chain.inner().clone(), - }) + }); } None @@ -355,12 +352,12 @@ where // check if block parent can be found in any side chain. if let Some(chain_id) = self.block_indices().get_side_chain_id(&parent.hash) { // found parent in side tree, try to insert there - return self.try_insert_block_into_side_chain(block, chain_id, block_validation_kind) + return self.try_insert_block_into_side_chain(block, chain_id, block_validation_kind); } // if not found, check if the parent can be found inside canonical chain. if self.is_block_hash_canonical(&parent.hash)? { - return self.try_append_canonical_chain(block.clone(), block_validation_kind) + return self.try_append_canonical_chain(block.clone(), block_validation_kind); } // this is another check to ensure that if the block points to a canonical block its block @@ -723,7 +720,7 @@ where pub fn buffer_block(&mut self, block: SealedBlockWithSenders) -> Result<(), InsertBlockError> { // validate block consensus rules if let Err(err) = self.validate_block(&block) { - return Err(InsertBlockError::consensus_error(err, block.block)) + return Err(InsertBlockError::consensus_error(err, block.block)); } self.state.buffered_blocks.insert_block(block); @@ -741,17 +738,17 @@ where "Failed to validate total difficulty for block {}: {e}", block.header.hash() ); - return Err(e) + return Err(e); } if let Err(e) = self.externals.consensus.validate_header(block) { error!(?block, "Failed to validate header {}: {e}", block.header.hash()); - return Err(e) + return Err(e); } if let Err(e) = self.externals.consensus.validate_block_pre_execution(block) { error!(?block, "Failed to validate block {}: {e}", block.header.hash()); - return Err(e) + return Err(e); } Ok(()) @@ -776,7 +773,7 @@ where Some(BlockAttachment::Canonical) } else { Some(BlockAttachment::HistoricalFork) - } + }; } None } @@ -817,7 +814,7 @@ where // validate block consensus rules if let Err(err) = self.validate_block(&block) { - return Err(InsertBlockError::consensus_error(err, block.block)) + return Err(InsertBlockError::consensus_error(err, block.block)); } let status = self @@ -1092,7 +1089,7 @@ where } let head = self.state.block_indices.canonical_tip(); - return Ok(CanonicalOutcome::AlreadyCanonical { header, head }) + return Ok(CanonicalOutcome::AlreadyCanonical { header, head }); } let Some(chain_id) = self.block_indices().get_side_chain_id(&block_hash) else { @@ -1107,7 +1104,7 @@ where debug!(target: "blockchain_tree", ?block_hash, ?chain_id, "Chain not present"); return Err(CanonicalError::from(BlockchainTreeError::BlockSideChainIdConsistency { chain_id: chain_id.into(), - })); + })) }; trace!(target: "blockchain_tree", chain = ?canonical, "Found chain to make canonical"); durations_recorder.record_relative(MakeCanonicalAction::SplitChain); @@ -1137,7 +1134,7 @@ where debug!(target: "blockchain_tree", "No blocks in the chain to make canonical"); return Err(CanonicalError::from(BlockchainTreeError::BlockHashNotFoundInChain { block_hash: fork_block.hash, - })); + })) }; trace!(target: "blockchain_tree", ?new_canon_chain, "Merging chains"); let mut chain_appended = false; @@ -1321,7 +1318,7 @@ where pub fn unwind(&mut self, unwind_to: BlockNumber) -> Result<(), CanonicalError> { // nothing to be done if unwind_to is higher then the tip if self.block_indices().canonical_tip().number <= unwind_to { - return Ok(()) + return Ok(()); } // revert `N` blocks from current canonical chain and put them inside BlockchainTree let old_canon_chain = self.revert_canonical_from_database(unwind_to)?; @@ -1363,7 +1360,7 @@ where "Reverting optimistic canonical chain to block {}", revert_until ); - return Err(CanonicalError::OptimisticTargetRevert(revert_until)) + return Err(CanonicalError::OptimisticTargetRevert(revert_until)); } // read data that is needed for new sidechain @@ -1417,28 +1414,27 @@ where mod tests { use super::*; use alloy_genesis::{Genesis, GenesisAccount}; + use alloy_primitives::{keccak256, Address, B256}; use assert_matches::assert_matches; use linked_hash_set::LinkedHashSet; - use reth_chainspec::{ChainSpecBuilder, MAINNET}; + use reth_chainspec::{ChainSpecBuilder, MAINNET, MIN_TRANSACTION_GAS}; use reth_consensus::test_utils::TestConsensus; - use reth_db::{tables, test_utils::TempDatabase, DatabaseEnv}; + use reth_db::tables; use reth_db_api::transaction::DbTxMut; use reth_evm::test_utils::MockExecutorProvider; use reth_evm_ethereum::execute::EthExecutorProvider; - #[cfg(not(feature = "optimism"))] - use reth_primitives::proofs::calculate_receipt_root; - #[cfg(feature = "optimism")] - use reth_primitives::proofs::calculate_receipt_root_optimism; use reth_primitives::{ constants::{EIP1559_INITIAL_BASE_FEE, EMPTY_ROOT_HASH}, - keccak256, - proofs::calculate_transaction_root, + proofs::{calculate_receipt_root, calculate_transaction_root}, revm_primitives::AccountInfo, - Account, Address, Header, Signature, Transaction, TransactionSigned, - TransactionSignedEcRecovered, TxEip1559, Withdrawals, B256, + Account, Header, Signature, Transaction, TransactionSigned, TransactionSignedEcRecovered, + TxEip1559, Withdrawals, }; use reth_provider::{ - test_utils::{blocks::BlockchainTestData, create_test_provider_factory_with_chain_spec}, + test_utils::{ + blocks::BlockchainTestData, create_test_provider_factory_with_chain_spec, + MockNodeTypesWithDB, + }, ProviderFactory, }; use reth_stages_api::StageCheckpoint; @@ -1447,7 +1443,7 @@ mod tests { fn setup_externals( exec_res: Vec, - ) -> TreeExternals>, MockExecutorProvider> { + ) -> TreeExternals { let chain_spec = Arc::new( ChainSpecBuilder::default() .chain(MAINNET.chain) @@ -1463,7 +1459,7 @@ mod tests { TreeExternals::new(provider_factory, consensus, executor_factory) } - fn setup_genesis(factory: &ProviderFactory, mut genesis: SealedBlock) { + fn setup_genesis(factory: &ProviderFactory, mut genesis: SealedBlock) { // insert genesis to db. genesis.header.set_block_number(10); @@ -1540,7 +1536,7 @@ mod tests { self } - fn assert(self, tree: &BlockchainTree) { + fn assert(self, tree: &BlockchainTree) { if let Some(chain_num) = self.chain_num { assert_eq!(tree.state.chains.len(), chain_num); } @@ -1601,13 +1597,13 @@ mod tests { provider_rw.commit().unwrap(); } - let single_tx_cost = U256::from(EIP1559_INITIAL_BASE_FEE * 21_000); + let single_tx_cost = U256::from(EIP1559_INITIAL_BASE_FEE * MIN_TRANSACTION_GAS); let mock_tx = |nonce: u64| -> TransactionSignedEcRecovered { TransactionSigned::from_transaction_and_signature( Transaction::Eip1559(TxEip1559 { chain_id: chain_spec.chain.id(), nonce, - gas_limit: 21_000, + gas_limit: MIN_TRANSACTION_GAS as u128, to: Address::ZERO.into(), max_fee_per_gas: EIP1559_INITIAL_BASE_FEE as u128, ..Default::default() @@ -1630,25 +1626,22 @@ mod tests { Receipt { tx_type: tx.tx_type(), success: true, - cumulative_gas_used: (idx as u64 + 1) * 21_000, + cumulative_gas_used: (idx as u64 + 1) * MIN_TRANSACTION_GAS, ..Default::default() } .with_bloom() }) .collect::>(); - #[cfg(not(feature = "optimism"))] + // receipts root computation is different for OP let receipts_root = calculate_receipt_root(&receipts); - #[cfg(feature = "optimism")] - let receipts_root = calculate_receipt_root_optimism(&receipts, &chain_spec, 0); - SealedBlockWithSenders::new( SealedBlock { header: Header { number, parent_hash: parent.unwrap_or_default(), - gas_used: body.len() as u64 * 21_000, + gas_used: body.len() as u64 * MIN_TRANSACTION_GAS, gas_limit: chain_spec.max_gas_limit, mix_hash: B256::random(), base_fee_per_gas: Some(EIP1559_INITIAL_BASE_FEE), @@ -1695,7 +1688,6 @@ mod tests { let mut tree = BlockchainTree::new( TreeExternals::new(provider_factory, consensus, executor_provider), BlockchainTreeConfig::default(), - PruneModes::default(), ) .expect("failed to create tree"); @@ -1775,8 +1767,7 @@ mod tests { // make tree let config = BlockchainTreeConfig::new(1, 2, 3, 2); - let mut tree = BlockchainTree::new(externals, config, PruneModes::default()) - .expect("failed to create tree"); + let mut tree = BlockchainTree::new(externals, config).expect("failed to create tree"); // genesis block 10 is already canonical tree.make_canonical(B256::ZERO).unwrap(); @@ -1851,8 +1842,7 @@ mod tests { // make tree let config = BlockchainTreeConfig::new(1, 2, 3, 2); - let mut tree = BlockchainTree::new(externals, config, PruneModes::default()) - .expect("failed to create tree"); + let mut tree = BlockchainTree::new(externals, config).expect("failed to create tree"); // genesis block 10 is already canonical tree.make_canonical(B256::ZERO).unwrap(); @@ -1937,8 +1927,7 @@ mod tests { // make tree let config = BlockchainTreeConfig::new(1, 2, 3, 2); - let mut tree = BlockchainTree::new(externals, config, PruneModes::default()) - .expect("failed to create tree"); + let mut tree = BlockchainTree::new(externals, config).expect("failed to create tree"); // genesis block 10 is already canonical tree.make_canonical(B256::ZERO).unwrap(); @@ -2036,8 +2025,7 @@ mod tests { // make tree let config = BlockchainTreeConfig::new(1, 2, 3, 2); - let mut tree = BlockchainTree::new(externals, config, PruneModes::default()) - .expect("failed to create tree"); + let mut tree = BlockchainTree::new(externals, config).expect("failed to create tree"); let mut canon_notif = tree.subscribe_canon_state(); // genesis block 10 is already canonical @@ -2430,8 +2418,7 @@ mod tests { // make tree let config = BlockchainTreeConfig::new(1, 2, 3, 2); - let mut tree = BlockchainTree::new(externals, config, PruneModes::default()) - .expect("failed to create tree"); + let mut tree = BlockchainTree::new(externals, config).expect("failed to create tree"); assert_eq!( tree.insert_block(block1.clone(), BlockValidationKind::Exhaustive).unwrap(), @@ -2451,8 +2438,8 @@ mod tests { tree.make_canonical(block2.hash()).unwrap(); // restart - let mut tree = BlockchainTree::new(cloned_externals_1, config, PruneModes::default()) - .expect("failed to create tree"); + let mut tree = + BlockchainTree::new(cloned_externals_1, config).expect("failed to create tree"); assert_eq!(tree.block_indices().last_finalized_block(), 0); let mut block1a = block1; @@ -2468,8 +2455,7 @@ mod tests { tree.finalize_block(block1a.number).unwrap(); // restart - let tree = BlockchainTree::new(cloned_externals_2, config, PruneModes::default()) - .expect("failed to create tree"); + let tree = BlockchainTree::new(cloned_externals_2, config).expect("failed to create tree"); assert_eq!(tree.block_indices().last_finalized_block(), block1a.number); } diff --git a/crates/blockchain-tree/src/bundle.rs b/crates/blockchain-tree/src/bundle.rs index e10adecdca..226afd8fab 100644 --- a/crates/blockchain-tree/src/bundle.rs +++ b/crates/blockchain-tree/src/bundle.rs @@ -1,6 +1,7 @@ //! [`ExecutionDataProvider`] implementations used by the tree. -use reth_primitives::{BlockHash, BlockNumber, ForkBlock}; +use alloy_primitives::{BlockHash, BlockNumber}; +use reth_primitives::ForkBlock; use reth_provider::{BlockExecutionForkProvider, ExecutionDataProvider, ExecutionOutcome}; use std::collections::BTreeMap; @@ -25,7 +26,7 @@ impl<'a> ExecutionDataProvider for BundleStateDataRef<'a> { fn block_hash(&self, block_number: BlockNumber) -> Option { let block_hash = self.sidechain_block_hashes.get(&block_number).copied(); if block_hash.is_some() { - return block_hash + return block_hash; } self.canonical_block_hashes.get(&block_number).copied() diff --git a/crates/blockchain-tree/src/canonical_chain.rs b/crates/blockchain-tree/src/canonical_chain.rs index e01e52dcb0..e3dc596ba0 100644 --- a/crates/blockchain-tree/src/canonical_chain.rs +++ b/crates/blockchain-tree/src/canonical_chain.rs @@ -1,4 +1,5 @@ -use reth_primitives::{BlockHash, BlockNumHash, BlockNumber}; +use alloy_primitives::{BlockHash, BlockNumber}; +use reth_primitives::BlockNumHash; use std::collections::BTreeMap; /// This keeps track of (non-finalized) blocks of the canonical chain. diff --git a/crates/blockchain-tree/src/chain.rs b/crates/blockchain-tree/src/chain.rs index 6c28cd7212..b158530f4a 100644 --- a/crates/blockchain-tree/src/chain.rs +++ b/crates/blockchain-tree/src/chain.rs @@ -5,28 +5,29 @@ use super::externals::TreeExternals; use crate::BundleStateDataRef; +use alloy_primitives::{BlockHash, BlockNumber, U256}; use reth_blockchain_tree_api::{ error::{BlockchainTreeError, InsertBlockErrorKind}, BlockAttachment, BlockValidationKind, }; use reth_consensus::{Consensus, ConsensusError, PostExecutionInput}; -use reth_db_api::database::Database; use reth_evm::execute::{BlockExecutorProvider, Executor}; use reth_execution_errors::BlockExecutionError; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_primitives::{ - BlockHash, BlockNumber, ForkBlock, GotExpected, Header, SealedBlockWithSenders, SealedHeader, - B256, U256, + revm_primitives::EvmState, ForkBlock, GotExpected, Header, SealedBlockWithSenders, + SealedHeader, B256, }; use reth_provider::{ - providers::{BundleStateProvider, ConsistentDbView}, - FullExecutionDataProvider, ProviderError, StateRootProvider, + providers::{BundleStateProvider, ConsistentDbView, ProviderNodeTypes}, + FullExecutionDataProvider, ProviderError, StateRootProvider, TryIntoHistoricalStateProvider, }; -use reth_revm::{database::StateProviderDatabase, primitives::EvmState}; -use reth_trie::{updates::TrieUpdates, HashedPostState}; +use reth_revm::database::StateProviderDatabase; +use reth_trie::{updates::TrieUpdates, HashedPostState, TrieInput}; use reth_trie_parallel::parallel_root::ParallelStateRoot; use reth_trie_prefetch::TriePrefetch; use std::{ + clone::Clone, collections::{BTreeMap, HashMap}, ops::{Deref, DerefMut}, sync::Arc, @@ -70,18 +71,18 @@ impl AppendableChain { /// if [`BlockValidationKind::Exhaustive`] is specified, the method will verify the state root /// of the block. #[allow(clippy::too_many_arguments)] - pub fn new_canonical_fork( + pub fn new_canonical_fork( block: SealedBlockWithSenders, parent_header: &SealedHeader, canonical_block_hashes: &BTreeMap, canonical_fork: ForkBlock, - externals: &TreeExternals, + externals: &TreeExternals, block_attachment: BlockAttachment, block_validation_kind: BlockValidationKind, enable_prefetch: bool, ) -> Result where - DB: Database + Clone + 'static, + N: ProviderNodeTypes, E: BlockExecutorProvider, { let execution_outcome = ExecutionOutcome::default(); @@ -105,25 +106,25 @@ impl AppendableChain { enable_prefetch, )?; - Ok(Self { chain: Chain::new(vec![block], bundle_state, trie_updates) }) + Ok(Self::new(Chain::new(vec![block], bundle_state, trie_updates))) } /// Create a new chain that forks off of an existing sidechain. /// /// This differs from [`AppendableChain::new_canonical_fork`] in that this starts a new fork. #[allow(clippy::too_many_arguments)] - pub(crate) fn new_chain_fork( + pub(crate) fn new_chain_fork( &self, block: SealedBlockWithSenders, side_chain_block_hashes: BTreeMap, canonical_block_hashes: &BTreeMap, canonical_fork: ForkBlock, - externals: &TreeExternals, + externals: &TreeExternals, block_validation_kind: BlockValidationKind, enable_prefetch: bool, ) -> Result where - DB: Database + Clone + 'static, + N: ProviderNodeTypes, E: BlockExecutorProvider, { let parent_number = @@ -133,11 +134,6 @@ impl AppendableChain { )?; let mut execution_outcome = self.execution_outcome().clone(); - - // Revert state to the state after execution of the parent block - execution_outcome.revert_to(parent.number); - - // Revert changesets to get the state of the parent that we need to apply the change. let bundle_state_data = BundleStateDataRef { execution_outcome: &execution_outcome, sidechain_block_hashes: &side_chain_block_hashes, @@ -166,7 +162,7 @@ impl AppendableChain { execution_outcome.set_first_block(block.number); // If all is okay, return new chain back. Present chain is not modified. - Ok(Self { chain: Chain::from_block(block, execution_outcome, None) }) + Ok(Self::new(Chain::from_block(block, execution_outcome, None))) } /// Validate and execute the given block that _extends the canonical chain_, validating its @@ -179,19 +175,19 @@ impl AppendableChain { /// cache the trie state updates. /// - [`BlockValidationKind`] determines if the state root __should__ be validated. #[allow(clippy::too_many_arguments)] - fn validate_and_execute( + fn validate_and_execute( block: SealedBlockWithSenders, parent_block: &SealedHeader, ancestor_blocks: Option<&HashMap>, bundle_state_data_provider: EDP, - externals: &TreeExternals, + externals: &TreeExternals, block_attachment: BlockAttachment, block_validation_kind: BlockValidationKind, enable_prefetch: bool, ) -> Result<(ExecutionOutcome, Option), BlockExecutionError> where EDP: FullExecutionDataProvider, - DB: Database + Clone + 'static, + N: ProviderNodeTypes, E: BlockExecutorProvider, { // some checks are done before blocks comes here. @@ -214,7 +210,7 @@ impl AppendableChain { // State root calculation can take a while, and we're sure no write transaction // will be open in parallel. See https://github.com/paradigmxyz/reth/issues/7509. .disable_long_read_transaction_safety() - .state_provider_by_block_number(canonical_fork.number)?; + .try_into_history_at_block(canonical_fork.number)?; let provider = BundleStateProvider::new(state_provider, bundle_state_data_provider); @@ -249,11 +245,13 @@ impl AppendableChain { let mut execution_outcome = provider.block_execution_data_provider.execution_outcome().clone(); execution_outcome.extend(initial_execution_outcome.clone()); - let hashed_state = execution_outcome.hash_state_slow(); - ParallelStateRoot::new(consistent_view, hashed_state) - .incremental_root_with_updates() - .map(|(root, updates)| (root, Some(updates))) - .map_err(ProviderError::from)? + ParallelStateRoot::new( + consistent_view, + TrieInput::from_state(execution_outcome.hash_state_slow()), + ) + .incremental_root_with_updates() + .map(|(root, updates)| (root, Some(updates))) + .map_err(ProviderError::from)? } else { let hashed_state = HashedPostState::from_bundle_state(&initial_execution_outcome.state().state); @@ -301,19 +299,19 @@ impl AppendableChain { /// __not__ the canonical head. #[track_caller] #[allow(clippy::too_many_arguments)] - pub(crate) fn append_block( + pub(crate) fn append_block( &mut self, block: SealedBlockWithSenders, side_chain_block_hashes: BTreeMap, canonical_block_hashes: &BTreeMap, - externals: &TreeExternals, + externals: &TreeExternals, canonical_fork: ForkBlock, block_attachment: BlockAttachment, block_validation_kind: BlockValidationKind, enable_prefetch: bool, ) -> Result<(), InsertBlockErrorKind> where - DB: Database + Clone + 'static, + N: ProviderNodeTypes, E: BlockExecutorProvider, { let parent_block = self.chain.tip(); @@ -344,14 +342,14 @@ impl AppendableChain { Ok(()) } - fn setup_prefetch( - externals: &TreeExternals, + fn setup_prefetch( + externals: &TreeExternals, ) -> ( Option>, Option>, ) where - DB: Database + Clone + 'static, + N: ProviderNodeTypes, E: BlockExecutorProvider, { let (prefetch_tx, prefetch_rx) = tokio::sync::mpsc::unbounded_channel(); @@ -369,7 +367,7 @@ impl AppendableChain { tokio::spawn({ async move { - trie_prefetch.run::(Arc::new(consistent_view), prefetch_rx, interrupt_rx).await; + trie_prefetch.run(Arc::new(consistent_view), prefetch_rx, interrupt_rx).await; } }); diff --git a/crates/blockchain-tree/src/externals.rs b/crates/blockchain-tree/src/externals.rs index a903ae7496..a4f72f6d33 100644 --- a/crates/blockchain-tree/src/externals.rs +++ b/crates/blockchain-tree/src/externals.rs @@ -1,12 +1,14 @@ //! Blockchain tree externals. +use alloy_primitives::{BlockHash, BlockNumber}; use reth_consensus::Consensus; use reth_db::{static_file::HeaderMask, tables}; -use reth_db_api::{cursor::DbCursorRO, database::Database, transaction::DbTx}; -use reth_primitives::{BlockHash, BlockNumber, StaticFileSegment}; +use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; +use reth_node_types::NodeTypesWithDB; +use reth_primitives::StaticFileSegment; use reth_provider::{ - FinalizedBlockReader, FinalizedBlockWriter, ProviderFactory, StaticFileProviderFactory, - StatsReader, + providers::ProviderNodeTypes, FinalizedBlockReader, FinalizedBlockWriter, ProviderFactory, + StaticFileProviderFactory, StatsReader, }; use reth_storage_errors::provider::ProviderResult; use std::{collections::BTreeMap, sync::Arc}; @@ -21,19 +23,19 @@ use std::{collections::BTreeMap, sync::Arc}; /// - The executor factory to execute blocks with /// - The chain spec #[derive(Debug)] -pub struct TreeExternals { +pub struct TreeExternals { /// The provider factory, used to commit the canonical chain, or unwind it. - pub(crate) provider_factory: ProviderFactory, + pub(crate) provider_factory: ProviderFactory, /// The consensus engine. pub(crate) consensus: Arc, /// The executor factory to execute blocks with. pub(crate) executor_factory: E, } -impl TreeExternals { +impl TreeExternals { /// Create new tree externals. pub fn new( - provider_factory: ProviderFactory, + provider_factory: ProviderFactory, consensus: Arc, executor_factory: E, ) -> Self { @@ -41,7 +43,7 @@ impl TreeExternals { } } -impl TreeExternals { +impl TreeExternals { /// Fetches the latest canonical block hashes by walking backwards from the head. /// /// Returns the hashes sorted by increasing block numbers diff --git a/crates/blockchain-tree/src/noop.rs b/crates/blockchain-tree/src/noop.rs index d92131dc8a..76e59a4779 100644 --- a/crates/blockchain-tree/src/noop.rs +++ b/crates/blockchain-tree/src/noop.rs @@ -1,13 +1,11 @@ +use alloy_primitives::{BlockHash, BlockNumber}; use reth_blockchain_tree_api::{ self, error::{BlockchainTreeError, CanonicalError, InsertBlockError, ProviderError}, BlockValidationKind, BlockchainTreeEngine, BlockchainTreeViewer, CanonicalOutcome, InsertPayloadOk, }; -use reth_primitives::{ - BlockHash, BlockNumHash, BlockNumber, Receipt, SealedBlock, SealedBlockWithSenders, - SealedHeader, -}; +use reth_primitives::{BlockNumHash, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader}; use reth_provider::{ BlockchainTreePendingStateProvider, CanonStateNotificationSender, CanonStateNotifications, CanonStateSubscriptions, FullExecutionDataProvider, diff --git a/crates/blockchain-tree/src/shareable.rs b/crates/blockchain-tree/src/shareable.rs index c106d0ad32..333527b83e 100644 --- a/crates/blockchain-tree/src/shareable.rs +++ b/crates/blockchain-tree/src/shareable.rs @@ -1,21 +1,19 @@ //! Wrapper around `BlockchainTree` that allows for it to be shared. use super::BlockchainTree; +use alloy_primitives::{BlockHash, BlockNumber}; use parking_lot::RwLock; use reth_blockchain_tree_api::{ error::{CanonicalError, InsertBlockError}, BlockValidationKind, BlockchainTreeEngine, BlockchainTreeViewer, CanonicalOutcome, InsertPayloadOk, }; -use reth_db_api::database::Database; use reth_evm::execute::BlockExecutorProvider; -use reth_primitives::{ - BlockHash, BlockNumHash, BlockNumber, Receipt, SealedBlock, SealedBlockWithSenders, - SealedHeader, -}; +use reth_node_types::NodeTypesWithDB; +use reth_primitives::{BlockNumHash, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader}; use reth_provider::{ - BlockchainTreePendingStateProvider, CanonStateSubscriptions, FullExecutionDataProvider, - ProviderError, + providers::ProviderNodeTypes, BlockchainTreePendingStateProvider, CanonStateSubscriptions, + FullExecutionDataProvider, ProviderError, }; use reth_storage_errors::provider::ProviderResult; use std::{collections::BTreeMap, sync::Arc}; @@ -23,21 +21,21 @@ use tracing::trace; /// Shareable blockchain tree that is behind a `RwLock` #[derive(Clone, Debug)] -pub struct ShareableBlockchainTree { +pub struct ShareableBlockchainTree { /// `BlockchainTree` - pub tree: Arc>>, + pub tree: Arc>>, } -impl ShareableBlockchainTree { +impl ShareableBlockchainTree { /// Create a new shareable database. - pub fn new(tree: BlockchainTree) -> Self { + pub fn new(tree: BlockchainTree) -> Self { Self { tree: Arc::new(RwLock::new(tree)) } } } -impl BlockchainTreeEngine for ShareableBlockchainTree +impl BlockchainTreeEngine for ShareableBlockchainTree where - DB: Database + Clone + 'static, + N: ProviderNodeTypes, E: BlockExecutorProvider, { fn buffer_block(&self, block: SealedBlockWithSenders) -> Result<(), InsertBlockError> { @@ -106,9 +104,9 @@ where } } -impl BlockchainTreeViewer for ShareableBlockchainTree +impl BlockchainTreeViewer for ShareableBlockchainTree where - DB: Database + Clone + 'static, + N: ProviderNodeTypes, E: BlockExecutorProvider, { fn header_by_hash(&self, hash: BlockHash) -> Option { @@ -169,9 +167,9 @@ where } } -impl BlockchainTreePendingStateProvider for ShareableBlockchainTree +impl BlockchainTreePendingStateProvider for ShareableBlockchainTree where - DB: Database + Clone + 'static, + N: ProviderNodeTypes, E: BlockExecutorProvider, { fn find_pending_state_provider( @@ -184,9 +182,9 @@ where } } -impl CanonStateSubscriptions for ShareableBlockchainTree +impl CanonStateSubscriptions for ShareableBlockchainTree where - DB: Send + Sync, + N: ProviderNodeTypes, E: Send + Sync, { fn subscribe_to_canonical_state(&self) -> reth_provider::CanonStateNotifications { diff --git a/crates/blockchain-tree/src/state.rs b/crates/blockchain-tree/src/state.rs index dcf91d8bbd..b76db9e6a9 100644 --- a/crates/blockchain-tree/src/state.rs +++ b/crates/blockchain-tree/src/state.rs @@ -1,7 +1,8 @@ //! Blockchain tree state. use crate::{AppendableChain, BlockBuffer, BlockIndices}; -use reth_primitives::{BlockHash, BlockNumber, Receipt, SealedBlock, SealedBlockWithSenders}; +use alloy_primitives::{BlockHash, BlockNumber}; +use reth_primitives::{Receipt, SealedBlock, SealedBlockWithSenders}; use std::collections::{BTreeMap, HashMap}; /// Container to hold the state of the blockchain tree. diff --git a/crates/bsc/chainspec/src/bsc.rs b/crates/bsc/chainspec/src/bsc.rs index 743809590f..435a139b17 100644 --- a/crates/bsc/chainspec/src/bsc.rs +++ b/crates/bsc/chainspec/src/bsc.rs @@ -8,7 +8,7 @@ use std::sync::Arc; use alloy_chains::{Chain, NamedChain}; use alloy_primitives::{b256, U256}; use once_cell::sync::Lazy; -use reth_chainspec::{BaseFeeParams, BaseFeeParamsKind, ChainSpec}; +use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec}; use reth_ethereum_forks::BscHardfork; use crate::BscChainSpec; @@ -20,7 +20,7 @@ pub static BSC_MAINNET: Lazy> = Lazy::new(|| { chain: Chain::from_named(NamedChain::BNBSmartChain), genesis: serde_json::from_str(include_str!("../res/genesis/bsc.json")) .expect("Can't deserialize BSC Mainnet genesis json"), - genesis_hash: Some(b256!( + genesis_hash: once_cell_set(b256!( "0d21840abff46b96c84b2ac9e10e4f5cdaeb5693cb665db62a2f3b02d2d57b5b" )), paris_block_and_final_difficulty: Some((0, U256::from(0))), diff --git a/crates/bsc/chainspec/src/bsc_chapel.rs b/crates/bsc/chainspec/src/bsc_chapel.rs index abeaadc2c9..bd45a8915f 100644 --- a/crates/bsc/chainspec/src/bsc_chapel.rs +++ b/crates/bsc/chainspec/src/bsc_chapel.rs @@ -8,7 +8,7 @@ use std::sync::Arc; use alloy_chains::{Chain, NamedChain}; use alloy_primitives::{b256, U256}; use once_cell::sync::Lazy; -use reth_chainspec::{BaseFeeParams, BaseFeeParamsKind, ChainSpec}; +use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec}; use reth_ethereum_forks::BscHardfork; use crate::BscChainSpec; @@ -20,7 +20,7 @@ pub static BSC_CHAPEL: Lazy> = Lazy::new(|| { chain: Chain::from_named(NamedChain::BNBSmartChainTestnet), genesis: serde_json::from_str(include_str!("../res/genesis/bsc_chapel.json")) .expect("Can't deserialize BSC Testnet genesis json"), - genesis_hash: Some(b256!( + genesis_hash: once_cell_set(b256!( "6d3c66c5357ec91d5c43af47e234a939b22557cbb552dc45bebbceeed90fbe34" )), paris_block_and_final_difficulty: Some((0, U256::from(0))), diff --git a/crates/bsc/chainspec/src/bsc_rialto.rs b/crates/bsc/chainspec/src/bsc_rialto.rs index 211397e58c..30f71fcb2f 100644 --- a/crates/bsc/chainspec/src/bsc_rialto.rs +++ b/crates/bsc/chainspec/src/bsc_rialto.rs @@ -8,7 +8,7 @@ use std::sync::Arc; use alloy_chains::Chain; use alloy_primitives::{b256, U256}; use once_cell::sync::Lazy; -use reth_chainspec::{BaseFeeParams, BaseFeeParamsKind, ChainSpec}; +use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec}; use reth_ethereum_forks::BscHardfork; use crate::BscChainSpec; @@ -20,7 +20,7 @@ pub static BSC_RIALTO: Lazy> = Lazy::new(|| { chain: Chain::from_id(714), genesis: serde_json::from_str(include_str!("../res/genesis/bsc_rialto.json")) .expect("Can't deserialize BSC Rialto genesis json"), - genesis_hash: Some(b256!( + genesis_hash: once_cell_set(b256!( "281bcec9447d74982bb746f753eecd1320a9b7f4fdbd2cfaf1ae7170a93cff50" )), paris_block_and_final_difficulty: Some((0, U256::from(0))), diff --git a/crates/bsc/chainspec/src/dev.rs b/crates/bsc/chainspec/src/dev.rs index 95ac68fa47..1c2a09d73c 100644 --- a/crates/bsc/chainspec/src/dev.rs +++ b/crates/bsc/chainspec/src/dev.rs @@ -8,7 +8,7 @@ use std::sync::Arc; use alloy_chains::Chain; use alloy_primitives::U256; use once_cell::sync::Lazy; -use reth_chainspec::{BaseFeeParams, BaseFeeParamsKind, ChainSpec}; +use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec}; use reth_ethereum_forks::DEV_HARDFORKS; use reth_primitives_traits::constants::DEV_GENESIS_HASH; @@ -25,7 +25,7 @@ pub static BSC_DEV: Lazy> = Lazy::new(|| { chain: Chain::dev(), genesis: serde_json::from_str(include_str!("../res/genesis/dev.json")) .expect("Can't deserialize Dev testnet genesis json"), - genesis_hash: Some(DEV_GENESIS_HASH), + genesis_hash: once_cell_set(DEV_GENESIS_HASH), paris_block_and_final_difficulty: Some((0, U256::from(0))), hardforks: DEV_HARDFORKS.clone(), deposit_contract: None, diff --git a/crates/bsc/cli/src/chainspec.rs b/crates/bsc/cli/src/chainspec.rs index d51cfab28c..fe38c1b53f 100644 --- a/crates/bsc/cli/src/chainspec.rs +++ b/crates/bsc/cli/src/chainspec.rs @@ -23,7 +23,9 @@ fn chain_value_parser(s: &str) -> eyre::Result, eyre::Error> { #[derive(Debug, Clone, Default)] pub struct BscChainSpecParser; -impl ChainSpecParser for BscChainSpecParser { +impl ChainSpecParser for BscChainSpecParser { + type ChainSpec = BscChainSpec; + const SUPPORTED_CHAINS: &'static [&'static str] = &[ "bsc", "bsc-mainnet", @@ -55,7 +57,7 @@ impl TypedValueParser for BscChainSpecParser { ) -> Result { let val = value.to_str().ok_or_else(|| clap::Error::new(clap::error::ErrorKind::InvalidUtf8))?; - >::parse(val).map_err(|err| { + ::parse(val).map_err(|err| { let arg = arg.map(|a| a.to_string()).unwrap_or_else(|| "...".to_owned()); let possible_values = Self::SUPPORTED_CHAINS.join(","); let msg = format!( @@ -80,7 +82,7 @@ mod tests { #[test] fn parse_known_chain_spec() { for &chain in BscChainSpecParser::SUPPORTED_CHAINS { - assert!(>::parse(chain).is_ok()); + assert!(::parse(chain).is_ok()); } } } diff --git a/crates/bsc/consensus/src/system_tx.rs b/crates/bsc/consensus/src/system_tx.rs index acfb7ceea8..1876dca3b1 100644 --- a/crates/bsc/consensus/src/system_tx.rs +++ b/crates/bsc/consensus/src/system_tx.rs @@ -32,7 +32,7 @@ impl Parlia { Transaction::Legacy(TxLegacy { chain_id: Some(self.chain_spec.chain.id()), nonce: 0, - gas_limit: u64::MAX / 2, + gas_limit: u128::from(u64::MAX / 2), gas_price: 0, value: U256::ZERO, input: Bytes::from(input.clone()), @@ -60,7 +60,7 @@ impl Parlia { Transaction::Legacy(TxLegacy { chain_id: Some(self.chain_spec.chain.id()), nonce: 0, - gas_limit: u64::MAX / 2, + gas_limit: u128::from(u64::MAX / 2), gas_price: 0, value: U256::ZERO, input: Bytes::from(input.clone()), @@ -77,7 +77,7 @@ impl Parlia { Transaction::Legacy(TxLegacy { chain_id: Some(self.chain_spec.chain.id()), nonce: 0, - gas_limit: u64::MAX / 2, + gas_limit: u128::from(u64::MAX / 2), gas_price: 0, value: U256::ZERO, input: Bytes::from(input), @@ -85,11 +85,11 @@ impl Parlia { }) } - pub fn distribute_to_system(&self, system_reward: u128) -> Transaction { + pub fn distribute_to_system(&self, system_reward: u64) -> Transaction { Transaction::Legacy(TxLegacy { chain_id: Some(self.chain_spec.chain.id()), nonce: 0, - gas_limit: u64::MAX / 2, + gas_limit: u128::from(u64::MAX / 2), gas_price: 0, value: U256::from(system_reward), input: Bytes::default(), @@ -97,14 +97,14 @@ impl Parlia { }) } - pub fn distribute_to_validator(&self, address: Address, block_reward: u128) -> Transaction { + pub fn distribute_to_validator(&self, address: Address, block_reward: u64) -> Transaction { let function = self.validator_abi.function("deposit").unwrap().first().unwrap(); let input = function.abi_encode_input(&[DynSolValue::from(address)]).unwrap(); Transaction::Legacy(TxLegacy { chain_id: Some(self.chain_spec.chain.id()), nonce: 0, - gas_limit: u64::MAX / 2, + gas_limit: u128::from(u64::MAX / 2), gas_price: 0, value: U256::from(block_reward), input: Bytes::from(input), @@ -129,7 +129,7 @@ impl Parlia { Transaction::Legacy(TxLegacy { chain_id: Some(self.chain_spec.chain.id()), nonce: 0, - gas_limit: u64::MAX / 2, + gas_limit: u128::from(u64::MAX / 2), gas_price: 0, value: U256::ZERO, input: Bytes::from(input), @@ -160,7 +160,7 @@ impl Parlia { Transaction::Legacy(TxLegacy { chain_id: Some(self.chain_spec.chain.id()), nonce: 0, - gas_limit: u64::MAX / 2, + gas_limit: u128::from(u64::MAX / 2), gas_price: 0, value: U256::ZERO, input: Bytes::from(input), diff --git a/crates/bsc/evm/src/execute.rs b/crates/bsc/evm/src/execute.rs index 0afcc6087f..8bd08bc7a4 100644 --- a/crates/bsc/evm/src/execute.rs +++ b/crates/bsc/evm/src/execute.rs @@ -1,6 +1,7 @@ //! Bsc block executor. use crate::{post_execution::PostExecutionInput, BscBlockExecutionError, BscEvmConfig}; +use core::fmt::Display; use lazy_static::lazy_static; use lru::LruCache; use parking_lot::RwLock; @@ -52,7 +53,7 @@ pub struct BscExecutorProvider { impl

BscExecutorProvider

{ /// Creates a new default bsc executor provider. pub fn bsc(chain_spec: Arc, provider: P) -> Self { - Self::new(chain_spec, Default::default(), Default::default(), provider) + Self::new(chain_spec.clone(), BscEvmConfig::new(chain_spec), Default::default(), provider) } } @@ -71,7 +72,7 @@ impl BscExecutorProvider { impl BscExecutorProvider where P: Clone, - EvmConfig: ConfigureEvm, + EvmConfig: ConfigureEvm

, { fn bsc_executor( &self, @@ -113,7 +114,7 @@ where impl BlockExecutorProvider for BscExecutorProvider where P: ParliaProvider + Clone + Unpin + 'static, - EvmConfig: ConfigureEvm, + EvmConfig: ConfigureEvm
, { type Executor + std::fmt::Display>> = BscBlockExecutor; @@ -164,7 +165,7 @@ pub(crate) struct BscEvmExecutor { impl BscEvmExecutor where - EvmConfig: ConfigureEvm, + EvmConfig: ConfigureEvm
, { /// Executes the transactions in the block and returns the receipts. /// @@ -341,8 +342,8 @@ impl BscBlockExecutor { impl BscBlockExecutor where - EvmConfig: ConfigureEvm, - DB: Database + std::fmt::Display>, + EvmConfig: ConfigureEvm
, + DB: Database + Display>, P: ParliaProvider, { /// Configures a new evm configuration and block environment for the given block. @@ -354,7 +355,6 @@ where self.executor.evm_config.fill_cfg_and_block_env( &mut cfg, &mut block_env, - self.chain_spec(), header, total_difficulty, ); @@ -726,7 +726,7 @@ where impl Executor for BscBlockExecutor where - EvmConfig: ConfigureEvm, + EvmConfig: ConfigureEvm
, DB: Database + std::fmt::Display>, P: ParliaProvider, { @@ -781,7 +781,7 @@ impl BscBatchExecutor { impl BatchExecutor for BscBatchExecutor where - EvmConfig: ConfigureEvm, + EvmConfig: ConfigureEvm
, DB: Database + std::fmt::Display>, P: ParliaProvider, { diff --git a/crates/bsc/evm/src/lib.rs b/crates/bsc/evm/src/lib.rs index 3b96db826c..2e86563e39 100644 --- a/crates/bsc/evm/src/lib.rs +++ b/crates/bsc/evm/src/lib.rs @@ -7,14 +7,19 @@ #![cfg(feature = "bsc")] use reth_chainspec::ChainSpec; -use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; +use reth_ethereum_forks::EthereumHardfork; +use reth_evm::{ConfigureEvm, ConfigureEvmEnv, NextBlockEnvAttributes}; use reth_primitives::{ - revm_primitives::{AnalysisKind, CfgEnvWithHandlerCfg, TxEnv}, + constants::EIP1559_INITIAL_BASE_FEE, + revm_primitives::{ + AnalysisKind, BlobExcessGasAndPrice, BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, SpecId, TxEnv, + }, transaction::FillTxEnv, Address, Bytes, Head, Header, TransactionSigned, U256, }; use reth_revm::{inspector_handle_register, Database, Evm, EvmBuilder, GetInspector}; use revm_primitives::Env; +use std::sync::Arc; mod config; pub use config::{revm_spec, revm_spec_by_timestamp_after_shanghai}; @@ -27,11 +32,27 @@ mod post_execution; mod pre_execution; /// Bsc-related EVM configuration. -#[derive(Debug, Default, Clone, Copy)] +#[derive(Debug, Default, Clone)] #[non_exhaustive] -pub struct BscEvmConfig; +pub struct BscEvmConfig { + chain_spec: Arc, +} + +impl BscEvmConfig { + /// Creates a new Ethereum EVM configuration with the given chain spec. + pub const fn new(chain_spec: Arc) -> Self { + Self { chain_spec } + } + + /// Returns the chain spec associated with this configuration. + pub fn chain_spec(&self) -> &ChainSpec { + &self.chain_spec + } +} impl ConfigureEvmEnv for BscEvmConfig { + type Header = Header; + fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { transaction.fill_tx_env(tx_env, sender); } @@ -49,12 +70,11 @@ impl ConfigureEvmEnv for BscEvmConfig { fn fill_cfg_env( &self, cfg_env: &mut CfgEnvWithHandlerCfg, - chain_spec: &ChainSpec, header: &Header, total_difficulty: U256, ) { let spec_id = revm_spec( - chain_spec, + self.chain_spec(), &Head { number: header.number, timestamp: header.timestamp, @@ -64,7 +84,7 @@ impl ConfigureEvmEnv for BscEvmConfig { }, ); - cfg_env.chain_id = chain_spec.chain().id(); + cfg_env.chain_id = self.chain_spec.chain().id(); cfg_env.perf_analyse_created_bytecodes = AnalysisKind::Analyse; // Disable block gas limit check @@ -72,7 +92,69 @@ impl ConfigureEvmEnv for BscEvmConfig { cfg_env.disable_block_gas_limit = true; cfg_env.handler_cfg.spec_id = spec_id; - cfg_env.handler_cfg.is_bsc = chain_spec.is_bsc(); + cfg_env.handler_cfg.is_bsc = self.chain_spec.is_bsc(); + } + + fn next_cfg_and_block_env( + &self, + parent: &Self::Header, + attributes: NextBlockEnvAttributes, + ) -> (CfgEnvWithHandlerCfg, BlockEnv) { + // configure evm env based on parent block + let cfg = CfgEnv::default().with_chain_id(self.chain_spec.chain().id()); + + // ensure we're not missing any timestamp based hardforks + let spec_id = revm_spec_by_timestamp_after_shanghai(&self.chain_spec, attributes.timestamp); + + // if the parent block did not have excess blob gas (i.e. it was pre-cancun), but it is + // cancun now, we need to set the excess blob gas to the default value + let blob_excess_gas_and_price = parent + .next_block_excess_blob_gas() + .or_else(|| { + if spec_id == SpecId::CANCUN { + // default excess blob gas is zero + Some(0) + } else { + None + } + }) + .map(BlobExcessGasAndPrice::new); + + let mut basefee = parent.next_block_base_fee( + self.chain_spec.base_fee_params_at_timestamp(attributes.timestamp), + ); + + let mut gas_limit = U256::from(parent.gas_limit); + + // If we are on the London fork boundary, we need to multiply the parent's gas limit by the + // elasticity multiplier to get the new gas limit. + if self.chain_spec.fork(EthereumHardfork::London).transitions_at_block(parent.number + 1) { + let elasticity_multiplier = self + .chain_spec + .base_fee_params_at_timestamp(attributes.timestamp) + .elasticity_multiplier; + + // multiply the gas limit by the elasticity multiplier + gas_limit *= U256::from(elasticity_multiplier); + + // set the base fee to the initial base fee from the EIP-1559 spec + basefee = Some(EIP1559_INITIAL_BASE_FEE) + } + + let block_env = BlockEnv { + number: U256::from(parent.number + 1), + coinbase: attributes.suggested_fee_recipient, + timestamp: U256::from(attributes.timestamp), + difficulty: U256::ZERO, + prevrandao: Some(attributes.prev_randao), + gas_limit, + // calculate basefee based on parent block's gas usage + basefee: basefee.map(U256::from).unwrap_or_default(), + // calculate excess gas based on parent block's blob gas usage + blob_excess_gas_and_price, + }; + + (CfgEnvWithHandlerCfg::new_with_spec_id(cfg, spec_id), block_env) } } @@ -102,7 +184,11 @@ impl ConfigureEvm for BscEvmConfig { #[cfg(test)] mod tests { use super::*; - use reth_primitives::revm_primitives::{BlockEnv, CfgEnv}; + use reth_chainspec::Chain; + use reth_primitives::{ + revm_primitives::{BlockEnv, CfgEnv}, + Genesis, + }; use revm_primitives::SpecId; #[test] @@ -111,13 +197,19 @@ mod tests { let mut cfg_env = CfgEnvWithHandlerCfg::new_with_spec_id(CfgEnv::default(), SpecId::LATEST); let mut block_env = BlockEnv::default(); let header = Header::default(); - let chain_spec = ChainSpec::default(); let total_difficulty = U256::ZERO; + let chain_spec = ChainSpec::builder() + .chain(Chain::bsc_mainnet()) + .genesis(Genesis::default()) + .london_activated() + .paris_activated() + .shanghai_activated() + .build(); + BscEvmConfig::default().fill_cfg_and_block_env( &mut cfg_env, &mut block_env, - &chain_spec, &header, total_difficulty, ); diff --git a/crates/bsc/evm/src/patch_hertz.rs b/crates/bsc/evm/src/patch_hertz.rs index d375c90067..92b4b0e910 100644 --- a/crates/bsc/evm/src/patch_hertz.rs +++ b/crates/bsc/evm/src/patch_hertz.rs @@ -2,7 +2,7 @@ use crate::{execute::BscEvmExecutor, BscBlockExecutionError}; use lazy_static::lazy_static; use reth_errors::ProviderError; use reth_evm::ConfigureEvm; -use reth_primitives::{address, b256, Address, TransactionSigned, B256, U256}; +use reth_primitives::{address, b256, Address, Header, TransactionSigned, B256, U256}; use reth_revm::{db::states::StorageSlot, State}; use revm_primitives::db::Database; use std::{collections::HashMap, str::FromStr}; @@ -657,7 +657,7 @@ lazy_static! { impl BscEvmExecutor where - EvmConfig: ConfigureEvm, + EvmConfig: ConfigureEvm
, { pub(crate) fn patch_mainnet_before_tx( &self, diff --git a/crates/bsc/evm/src/post_execution.rs b/crates/bsc/evm/src/post_execution.rs index 274354f494..d5df63e516 100644 --- a/crates/bsc/evm/src/post_execution.rs +++ b/crates/bsc/evm/src/post_execution.rs @@ -31,7 +31,7 @@ pub(crate) struct PostExecutionInput { impl BscBlockExecutor where - EvmConfig: ConfigureEvm, + EvmConfig: ConfigureEvm
, DB: Database + std::fmt::Display>, P: ParliaProvider, { @@ -344,7 +344,7 @@ where let reward_to_system = block_reward >> SYSTEM_REWARD_PERCENT; if reward_to_system > 0 { self.transact_system_tx( - self.parlia().distribute_to_system(reward_to_system), + self.parlia().distribute_to_system(reward_to_system.try_into().unwrap()), validator, system_txs, receipts, @@ -357,7 +357,7 @@ where } self.transact_system_tx( - self.parlia().distribute_to_validator(validator, block_reward), + self.parlia().distribute_to_validator(validator, block_reward.try_into().unwrap()), validator, system_txs, receipts, diff --git a/crates/bsc/evm/src/pre_execution.rs b/crates/bsc/evm/src/pre_execution.rs index 0e005b6736..6ac44cd3af 100644 --- a/crates/bsc/evm/src/pre_execution.rs +++ b/crates/bsc/evm/src/pre_execution.rs @@ -20,7 +20,7 @@ const BLST_DST: &[u8] = b"BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_POP_"; impl BscBlockExecutor where - EvmConfig: ConfigureEvm, + EvmConfig: ConfigureEvm
, DB: Database + std::fmt::Display>, P: ParliaProvider, { diff --git a/crates/bsc/node/src/node.rs b/crates/bsc/node/src/node.rs index 0ab69246ba..27af2188da 100644 --- a/crates/bsc/node/src/node.rs +++ b/crates/bsc/node/src/node.rs @@ -4,18 +4,22 @@ use crate::EthEngineTypes; use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; use reth_bsc_consensus::Parlia; use reth_chainspec::ChainSpec; +use reth_ethereum_engine_primitives::{ + EthBuiltPayload, EthPayloadAttributes, EthPayloadBuilderAttributes, +}; use reth_evm_bsc::{BscEvmConfig, BscExecutorProvider}; use reth_network::NetworkHandle; -use reth_node_api::{FullNodeComponents, NodeAddOns}; +use reth_node_api::{ConfigureEvm, FullNodeComponents, NodeAddOns}; use reth_node_builder::{ components::{ ComponentsBuilder, ConsensusBuilder, ExecutorBuilder, NetworkBuilder, PayloadServiceBuilder, PoolBuilder, }, - node::{FullNodeTypes, NodeTypes}, - BuilderContext, Node, PayloadBuilderConfig, + node::{FullNodeTypes, NodeTypes, NodeTypesWithEngine}, + BuilderContext, Node, PayloadBuilderConfig, PayloadTypes, }; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; +use reth_primitives::Header; use reth_provider::CanonStateSubscriptions; use reth_rpc::EthApi; use reth_tracing::tracing::{debug, info}; @@ -40,7 +44,12 @@ impl BscNode { BscConsensusBuilder, > where - Node: FullNodeTypes, + Node: FullNodeTypes>, + ::Engine: PayloadTypes< + BuiltPayload = EthBuiltPayload, + PayloadAttributes = EthPayloadAttributes, + PayloadBuilderAttributes = EthPayloadBuilderAttributes, + >, { ComponentsBuilder::default() .node_types::() @@ -54,10 +63,13 @@ impl BscNode { impl NodeTypes for BscNode { type Primitives = (); - type Engine = EthEngineTypes; type ChainSpec = ChainSpec; } +impl NodeTypesWithEngine for BscNode { + type Engine = EthEngineTypes; +} + /// Add-ons w.r.t. l1 bsc. #[derive(Debug, Clone)] pub struct BSCAddOns; @@ -66,9 +78,10 @@ impl NodeAddOns for BSCAddOns { type EthApi = EthApi; } -impl Node for BscNode +impl Node for BscNode where - N: FullNodeTypes, + Types: NodeTypesWithEngine, + N: FullNodeTypes, { type ComponentsBuilder = ComponentsBuilder< N, @@ -91,9 +104,10 @@ where #[non_exhaustive] pub struct BscExecutorBuilder; -impl ExecutorBuilder for BscExecutorBuilder +impl ExecutorBuilder for BscExecutorBuilder where - Node: FullNodeTypes, + Types: NodeTypesWithEngine, + Node: FullNodeTypes, { type EVM = BscEvmConfig; @@ -104,10 +118,10 @@ where ctx: &BuilderContext, ) -> eyre::Result<(Self::EVM, Self::Executor)> { let chain_spec = ctx.chain_spec(); - let evm_config = BscEvmConfig::default(); + let evm_config = BscEvmConfig::new(ctx.chain_spec()); let executor = BscExecutorProvider::new( chain_spec, - evm_config, + evm_config.clone(), ctx.reth_config().parlia.clone(), ctx.provider().clone(), ); @@ -126,9 +140,10 @@ pub struct BscPoolBuilder { // TODO add options for txpool args } -impl PoolBuilder for BscPoolBuilder +impl PoolBuilder for BscPoolBuilder where - Node: FullNodeTypes, + Types: NodeTypesWithEngine, + Node: FullNodeTypes, { type Pool = EthTransactionPool; @@ -193,17 +208,27 @@ where #[non_exhaustive] pub struct BscPayloadBuilder; -impl PayloadServiceBuilder for BscPayloadBuilder -where - Node: FullNodeTypes, - Pool: TransactionPool + Unpin + 'static, -{ - async fn spawn_payload_service( +impl BscPayloadBuilder { + /// A helper method initializing [`PayloadBuilderService`] with the given EVM config. + pub fn spawn( self, + evm_config: Evm, ctx: &BuilderContext, pool: Pool, - ) -> eyre::Result> { - let payload_builder = reth_ethereum_payload_builder::EthereumPayloadBuilder::default(); + ) -> eyre::Result> + where + Types: NodeTypesWithEngine, + Node: FullNodeTypes, + Evm: ConfigureEvm
, + Pool: TransactionPool + Unpin + 'static, + Types::Engine: PayloadTypes< + BuiltPayload = EthBuiltPayload, + PayloadAttributes = EthPayloadAttributes, + PayloadBuilderAttributes = EthPayloadBuilderAttributes, + >, + { + let payload_builder = + reth_ethereum_payload_builder::EthereumPayloadBuilder::new(evm_config); let conf = ctx.payload_builder_config(); let payload_job_config = BasicPayloadJobGeneratorConfig::default() @@ -229,6 +254,26 @@ where } } +impl PayloadServiceBuilder for BscPayloadBuilder +where + Types: NodeTypesWithEngine, + Node: FullNodeTypes, + Pool: TransactionPool + Unpin + 'static, + Types::Engine: PayloadTypes< + BuiltPayload = EthBuiltPayload, + PayloadAttributes = EthPayloadAttributes, + PayloadBuilderAttributes = EthPayloadBuilderAttributes, + >, +{ + async fn spawn_payload_service( + self, + ctx: &BuilderContext, + pool: Pool, + ) -> eyre::Result> { + self.spawn(BscEvmConfig::new(ctx.chain_spec()), ctx, pool) + } +} + /// A basic bsc payload service. #[derive(Debug, Default, Clone, Copy)] pub struct BscNetworkBuilder { @@ -259,7 +304,7 @@ pub struct BscConsensusBuilder; impl ConsensusBuilder for BscConsensusBuilder where - Node: FullNodeTypes, + Node: FullNodeTypes>, { type Consensus = Parlia; diff --git a/crates/chain-state/Cargo.toml b/crates/chain-state/Cargo.toml index 9f0706a745..078fe7d0c0 100644 --- a/crates/chain-state/Cargo.toml +++ b/crates/chain-state/Cargo.toml @@ -21,6 +21,9 @@ reth-primitives.workspace = true reth-storage-api.workspace = true reth-trie.workspace = true +# alloy +alloy-primitives.workspace = true + # async tokio = { workspace = true, features = ["sync", "macros", "rt-multi-thread"] } tokio-stream = { workspace = true, features = ["sync"] } diff --git a/crates/chain-state/src/chain_info.rs b/crates/chain-state/src/chain_info.rs index 5a9725e25e..8e8c095a81 100644 --- a/crates/chain-state/src/chain_info.rs +++ b/crates/chain-state/src/chain_info.rs @@ -1,6 +1,7 @@ +use alloy_primitives::BlockNumber; use parking_lot::RwLock; use reth_chainspec::ChainInfo; -use reth_primitives::{BlockNumHash, BlockNumber, SealedHeader}; +use reth_primitives::{BlockNumHash, SealedHeader}; use std::{ sync::{ atomic::{AtomicU64, Ordering}, diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index 4f20cea769..3ce079b4f3 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -4,13 +4,14 @@ use crate::{ CanonStateNotification, CanonStateNotificationSender, CanonStateNotifications, ChainInfoTracker, MemoryOverlayStateProvider, }; +use alloy_primitives::{Address, TxHash, B256}; use parking_lot::RwLock; use reth_chainspec::ChainInfo; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_metrics::{metrics::Gauge, Metrics}; use reth_primitives::{ - Address, BlockNumHash, Header, Receipt, Receipts, SealedBlock, SealedBlockWithSenders, - SealedHeader, TransactionMeta, TransactionSigned, TxHash, B256, + BlockNumHash, Header, Receipt, Receipts, SealedBlock, SealedBlockWithSenders, SealedHeader, + TransactionMeta, TransactionSigned, }; use reth_storage_api::StateProviderBox; use reth_trie::{updates::TrieUpdates, HashedPostState}; @@ -40,6 +41,16 @@ pub(crate) struct InMemoryStateMetrics { /// /// This tracks blocks and their state that haven't been persisted to disk yet but are part of the /// canonical chain that can be traced back to a canonical block on disk. +/// +/// # Locking behavior on state updates +/// +/// All update calls must be atomic, meaning that they must acquire all locks at once, before +/// modifying the state. This is to ensure that the internal state is always consistent. +/// Update functions ensure that the numbers write lock is always acquired first, because lookup by +/// numbers first read the numbers map and then the blocks map. +/// By acquiring the numbers lock first, we ensure that read-only lookups don't deadlock updates. +/// This holds, because only lookup by number functions need to acquire the numbers lock first to +/// get the block hash. #[derive(Debug, Default)] pub(crate) struct InMemoryState { /// All canonical blocks that are not on disk yet. @@ -92,7 +103,8 @@ impl InMemoryState { /// Returns the state for a given block number. pub(crate) fn state_by_number(&self, number: u64) -> Option> { - self.numbers.read().get(&number).and_then(|hash| self.blocks.read().get(hash).cloned()) + let hash = self.hash_by_number(number)?; + self.state_by_hash(hash) } /// Returns the hash for a specific block number @@ -102,11 +114,8 @@ impl InMemoryState { /// Returns the current chain head state. pub(crate) fn head_state(&self) -> Option> { - self.numbers - .read() - .iter() - .max_by_key(|(&number, _)| number) - .and_then(|(_, hash)| self.blocks.read().get(hash).cloned()) + let hash = *self.numbers.read().last_key_value()?.1; + self.state_by_hash(hash) } /// Returns the pending state corresponding to the current head plus one, @@ -138,10 +147,11 @@ impl CanonicalInMemoryStateInner { /// Clears all entries in the in memory state. fn clear(&self) { { - let mut blocks = self.in_memory_state.blocks.write(); + // acquire locks, starting with the numbers lock let mut numbers = self.in_memory_state.numbers.write(); - blocks.clear(); + let mut blocks = self.in_memory_state.blocks.write(); numbers.clear(); + blocks.clear(); self.in_memory_state.pending.send_modify(|p| { p.take(); }); @@ -239,7 +249,7 @@ impl CanonicalInMemoryState { I: IntoIterator, { { - // acquire all locks + // acquire locks, starting with the numbers lock let mut numbers = self.inner.in_memory_state.numbers.write(); let mut blocks = self.inner.in_memory_state.blocks.write(); @@ -288,10 +298,24 @@ impl CanonicalInMemoryState { /// /// This will update the links between blocks and remove all blocks that are [.. /// `persisted_height`]. - pub fn remove_persisted_blocks(&self, persisted_height: u64) { + pub fn remove_persisted_blocks(&self, persisted_num_hash: BlockNumHash) { + // if the persisted hash is not in the canonical in memory state, do nothing, because it + // means canonical blocks were not actually persisted. + // + // This can happen if the persistence task takes a long time, while a reorg is happening. { - let mut blocks = self.inner.in_memory_state.blocks.write(); + if self.inner.in_memory_state.blocks.read().get(&persisted_num_hash.hash).is_none() { + // do nothing + return + } + } + + { + // acquire locks, starting with the numbers lock let mut numbers = self.inner.in_memory_state.numbers.write(); + let mut blocks = self.inner.in_memory_state.blocks.write(); + + let BlockNumHash { number: persisted_height, hash: _ } = persisted_num_hash; // clear all numbers numbers.clear(); @@ -807,16 +831,16 @@ impl NewCanonicalChain { mod tests { use super::*; use crate::test_utils::TestBlockBuilder; + use alloy_primitives::{BlockNumber, Bytes, StorageKey, StorageValue}; use rand::Rng; use reth_errors::ProviderResult; - use reth_primitives::{ - Account, BlockNumber, Bytecode, Bytes, Receipt, Requests, StorageKey, StorageValue, - }; + use reth_primitives::{Account, Bytecode, Receipt, Requests}; use reth_storage_api::{ AccountReader, BlockHashReader, StateProofProvider, StateProvider, StateRootProvider, StorageRootProvider, }; - use reth_trie::{prefix_set::TriePrefixSetsMut, AccountProof, HashedStorage}; + use reth_trie::{AccountProof, HashedStorage, MultiProof, TrieInput}; + use std::collections::HashSet; fn create_mock_state( test_block_builder: &mut TestBlockBuilder, @@ -890,12 +914,7 @@ mod tests { Ok(B256::random()) } - fn state_root_from_nodes( - &self, - _nodes: TrieUpdates, - _post_state: HashedPostState, - _prefix_sets: TriePrefixSetsMut, - ) -> ProviderResult { + fn state_root_from_nodes(&self, _input: TrieInput) -> ProviderResult { Ok(B256::random()) } @@ -908,9 +927,7 @@ mod tests { fn state_root_from_nodes_with_updates( &self, - _nodes: TrieUpdates, - _post_state: HashedPostState, - _prefix_sets: TriePrefixSetsMut, + _input: TrieInput, ) -> ProviderResult<(B256, TrieUpdates)> { Ok((B256::random(), TrieUpdates::default())) } @@ -929,16 +946,24 @@ mod tests { impl StateProofProvider for MockStateProvider { fn proof( &self, - _hashed_state: HashedPostState, + _input: TrieInput, _address: Address, _slots: &[B256], ) -> ProviderResult { Ok(AccountProof::new(Address::random())) } + fn multiproof( + &self, + _input: TrieInput, + _targets: HashMap>, + ) -> ProviderResult { + Ok(MultiProof::default()) + } + fn witness( &self, - _overlay: HashedPostState, + _input: TrieInput, _target: HashedPostState, ) -> ProviderResult> { Ok(HashMap::default()) diff --git a/crates/chain-state/src/memory_overlay.rs b/crates/chain-state/src/memory_overlay.rs index 1782627b91..35315fb521 100644 --- a/crates/chain-state/src/memory_overlay.rs +++ b/crates/chain-state/src/memory_overlay.rs @@ -1,17 +1,18 @@ use super::ExecutedBlock; +use alloy_primitives::{keccak256, Address, BlockNumber, Bytes, StorageKey, StorageValue, B256}; use reth_errors::ProviderResult; -use reth_primitives::{ - keccak256, Account, Address, BlockNumber, Bytecode, Bytes, StorageKey, StorageValue, B256, -}; +use reth_primitives::{Account, Bytecode}; use reth_storage_api::{ AccountReader, BlockHashReader, StateProofProvider, StateProvider, StateProviderBox, StateRootProvider, StorageRootProvider, }; use reth_trie::{ - prefix_set::TriePrefixSetsMut, updates::TrieUpdates, AccountProof, HashedPostState, - HashedStorage, + updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, TrieInput, +}; +use std::{ + collections::{HashMap, HashSet}, + sync::OnceLock, }; -use std::{collections::HashMap, sync::OnceLock}; /// A state provider that stores references to in-memory blocks along with their state as well as /// the historical state provider for fallback lookups. @@ -45,13 +46,12 @@ impl MemoryOverlayStateProvider { /// Return lazy-loaded trie state aggregated from in-memory blocks. fn trie_state(&self) -> &MemoryOverlayTrieState { self.trie_state.get_or_init(|| { - let mut hashed_state = HashedPostState::default(); - let mut trie_nodes = TrieUpdates::default(); + let mut trie_state = MemoryOverlayTrieState::default(); for block in self.in_memory.iter().rev() { - hashed_state.extend_ref(block.hashed_state.as_ref()); - trie_nodes.extend_ref(block.trie.as_ref()); + trie_state.state.extend_ref(block.hashed_state.as_ref()); + trie_state.nodes.extend_ref(block.trie.as_ref()); } - MemoryOverlayTrieState { trie_nodes, hashed_state } + trie_state }) } } @@ -102,81 +102,73 @@ impl AccountReader for MemoryOverlayStateProvider { } impl StateRootProvider for MemoryOverlayStateProvider { - fn state_root(&self, hashed_state: HashedPostState) -> ProviderResult { - let prefix_sets = hashed_state.construct_prefix_sets(); - self.state_root_from_nodes(TrieUpdates::default(), hashed_state, prefix_sets) + fn state_root(&self, state: HashedPostState) -> ProviderResult { + self.state_root_from_nodes(TrieInput::from_state(state)) } - fn state_root_from_nodes( - &self, - nodes: TrieUpdates, - state: HashedPostState, - prefix_sets: TriePrefixSetsMut, - ) -> ProviderResult { - let MemoryOverlayTrieState { mut trie_nodes, mut hashed_state } = self.trie_state().clone(); - trie_nodes.extend(nodes); - hashed_state.extend(state); - self.historical.state_root_from_nodes(trie_nodes, hashed_state, prefix_sets) + fn state_root_from_nodes(&self, mut input: TrieInput) -> ProviderResult { + let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); + input.prepend_cached(nodes, state); + self.historical.state_root_from_nodes(input) } fn state_root_with_updates( &self, - hashed_state: HashedPostState, + state: HashedPostState, ) -> ProviderResult<(B256, TrieUpdates)> { - let prefix_sets = hashed_state.construct_prefix_sets(); - self.state_root_from_nodes_with_updates(TrieUpdates::default(), hashed_state, prefix_sets) + self.state_root_from_nodes_with_updates(TrieInput::from_state(state)) } fn state_root_from_nodes_with_updates( &self, - nodes: TrieUpdates, - state: HashedPostState, - prefix_sets: TriePrefixSetsMut, + mut input: TrieInput, ) -> ProviderResult<(B256, TrieUpdates)> { - let MemoryOverlayTrieState { mut trie_nodes, mut hashed_state } = self.trie_state().clone(); - trie_nodes.extend(nodes); - hashed_state.extend(state); - self.historical.state_root_from_nodes_with_updates(trie_nodes, hashed_state, prefix_sets) + let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); + input.prepend_cached(nodes, state); + self.historical.state_root_from_nodes_with_updates(input) } } impl StorageRootProvider for MemoryOverlayStateProvider { // TODO: Currently this does not reuse available in-memory trie nodes. fn storage_root(&self, address: Address, storage: HashedStorage) -> ProviderResult { - let mut hashed_storage = self - .trie_state() - .hashed_state - .storages - .get(&keccak256(address)) - .cloned() - .unwrap_or_default(); + let mut hashed_storage = + self.trie_state().state.storages.get(&keccak256(address)).cloned().unwrap_or_default(); hashed_storage.extend(&storage); self.historical.storage_root(address, hashed_storage) } } impl StateProofProvider for MemoryOverlayStateProvider { - // TODO: Currently this does not reuse available in-memory trie nodes. fn proof( &self, - state: HashedPostState, + mut input: TrieInput, address: Address, slots: &[B256], ) -> ProviderResult { - let mut hashed_state = self.trie_state().hashed_state.clone(); - hashed_state.extend(state); - self.historical.proof(hashed_state, address, slots) + let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); + input.prepend_cached(nodes, state); + self.historical.proof(input, address, slots) + } + + fn multiproof( + &self, + mut input: TrieInput, + targets: HashMap>, + ) -> ProviderResult { + let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); + input.prepend_cached(nodes, state); + self.historical.multiproof(input, targets) } - // TODO: Currently this does not reuse available in-memory trie nodes. fn witness( &self, - overlay: HashedPostState, + mut input: TrieInput, target: HashedPostState, ) -> ProviderResult> { - let mut hashed_state = self.trie_state().hashed_state.clone(); - hashed_state.extend(overlay); - self.historical.witness(hashed_state, target) + let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); + input.prepend_cached(nodes, state); + self.historical.witness(input, target) } } @@ -207,10 +199,10 @@ impl StateProvider for MemoryOverlayStateProvider { } /// The collection of data necessary for trie-related operations for [`MemoryOverlayStateProvider`]. -#[derive(Clone, Debug)] +#[derive(Clone, Default, Debug)] pub(crate) struct MemoryOverlayTrieState { /// The collection of aggregated in-memory trie updates. - pub(crate) trie_nodes: TrieUpdates, + pub(crate) nodes: TrieUpdates, /// The collection of hashed state from in-memory blocks. - pub(crate) hashed_state: HashedPostState, + pub(crate) state: HashedPostState, } diff --git a/crates/chain-state/src/test_utils.rs b/crates/chain-state/src/test_utils.rs index d9bf7b1572..b1df3befde 100644 --- a/crates/chain-state/src/test_utils.rs +++ b/crates/chain-state/src/test_utils.rs @@ -2,16 +2,17 @@ use crate::{ in_memory::ExecutedBlock, CanonStateNotification, CanonStateNotifications, CanonStateSubscriptions, }; +use alloy_primitives::{Address, BlockNumber, B256, U256}; use alloy_signer::SignerSync; use alloy_signer_local::PrivateKeySigner; use rand::{thread_rng, Rng}; -use reth_chainspec::{ChainSpec, EthereumHardfork}; +use reth_chainspec::{ChainSpec, EthereumHardfork, MIN_TRANSACTION_GAS}; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_primitives::{ constants::{EIP1559_INITIAL_BASE_FEE, EMPTY_ROOT_HASH}, proofs::{calculate_receipt_root, calculate_transaction_root, calculate_withdrawals_root}, - Address, BlockNumber, Header, Receipt, Receipts, Requests, SealedBlock, SealedBlockWithSenders, - Signature, Transaction, TransactionSigned, TransactionSignedEcRecovered, TxEip1559, B256, U256, + Header, Receipt, Receipts, Requests, SealedBlock, SealedBlockWithSenders, Signature, + Transaction, TransactionSigned, TransactionSignedEcRecovered, TxEip1559, }; use reth_trie::{root::state_root_unhashed, updates::TrieUpdates, HashedPostState}; use revm::{db::BundleState, primitives::AccountInfo}; @@ -72,7 +73,7 @@ impl TestBlockBuilder { /// Gas cost of a single transaction generated by the block builder. pub fn single_tx_cost() -> U256 { - U256::from(EIP1559_INITIAL_BASE_FEE * 21_000) + U256::from(EIP1559_INITIAL_BASE_FEE * MIN_TRANSACTION_GAS) } /// Generates a random [`SealedBlockWithSenders`]. @@ -87,7 +88,7 @@ impl TestBlockBuilder { let tx = Transaction::Eip1559(TxEip1559 { chain_id: self.chain_spec.chain.id(), nonce, - gas_limit: 21_000, + gas_limit: MIN_TRANSACTION_GAS as u128, to: Address::random().into(), max_fee_per_gas: EIP1559_INITIAL_BASE_FEE as u128, max_priority_fee_per_gas: 1, @@ -125,7 +126,7 @@ impl TestBlockBuilder { Receipt { tx_type: tx.tx_type(), success: true, - cumulative_gas_used: (idx as u64 + 1) * 21_000, + cumulative_gas_used: (idx as u64 + 1) * MIN_TRANSACTION_GAS, ..Default::default() } .with_bloom() @@ -137,7 +138,7 @@ impl TestBlockBuilder { let header = Header { number, parent_hash, - gas_used: transactions.len() as u64 * 21_000, + gas_used: transactions.len() as u64 * MIN_TRANSACTION_GAS, gas_limit: self.chain_spec.max_gas_limit, mix_hash: B256::random(), base_fee_per_gas: Some(EIP1559_INITIAL_BASE_FEE), @@ -262,7 +263,7 @@ impl TestBlockBuilder { .map(|(idx, tx)| Receipt { tx_type: tx.tx_type(), success: true, - cumulative_gas_used: (idx as u64 + 1) * 21_000, + cumulative_gas_used: (idx as u64 + 1) * MIN_TRANSACTION_GAS, ..Default::default() }) .collect::>(); diff --git a/crates/chainspec/src/api.rs b/crates/chainspec/src/api.rs index f80dd702ae..dbf3a00be2 100644 --- a/crates/chainspec/src/api.rs +++ b/crates/chainspec/src/api.rs @@ -1,8 +1,9 @@ use crate::ChainSpec; use alloy_chains::Chain; +use core::fmt::Debug; /// Trait representing type configuring a chain spec. -pub trait EthChainSpec: Send + Sync + Unpin + 'static { +pub trait EthChainSpec: Send + Sync + Unpin + Debug + 'static { // todo: make chain spec type generic over hardfork //type Hardfork: Clone + Copy + 'static; diff --git a/crates/chainspec/src/constants.rs b/crates/chainspec/src/constants.rs index 7026e76ff4..2e22b2299a 100644 --- a/crates/chainspec/src/constants.rs +++ b/crates/chainspec/src/constants.rs @@ -1,6 +1,8 @@ use crate::spec::DepositContract; use alloy_primitives::{address, b256}; +/// Gas per transaction not creating a contract. +pub const MIN_TRANSACTION_GAS: u64 = 21_000u64; /// Deposit contract address: `0x00000000219ab540356cbb839cbe05303d7705fa` pub(crate) const MAINNET_DEPOSIT_CONTRACT: DepositContract = DepositContract::new( address!("00000000219ab540356cbb839cbe05303d7705fa"), diff --git a/crates/chainspec/src/lib.rs b/crates/chainspec/src/lib.rs index 4badac6bce..424b2b77c2 100644 --- a/crates/chainspec/src/lib.rs +++ b/crates/chainspec/src/lib.rs @@ -9,11 +9,11 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![cfg_attr(not(feature = "std"), no_std)] -#[cfg(not(feature = "std"))] extern crate alloc; /// Chain specific constants pub(crate) mod constants; +pub use constants::MIN_TRANSACTION_GAS; mod api; /// The chain info module. @@ -34,6 +34,13 @@ pub use spec::{ DepositContract, ForkBaseFeeParams, DEV, HOLESKY, MAINNET, SEPOLIA, }; +/// Simple utility to create a `OnceCell` with a value set. +pub fn once_cell_set(value: T) -> once_cell::sync::OnceCell { + let once = once_cell::sync::OnceCell::new(); + let _ = once.set(value); + once +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/chainspec/src/spec.rs b/crates/chainspec/src/spec.rs index 7e019f4dbd..8652a6c976 100644 --- a/crates/chainspec/src/spec.rs +++ b/crates/chainspec/src/spec.rs @@ -1,18 +1,11 @@ -pub use alloy_eips::eip1559::BaseFeeParams; - -#[cfg(not(feature = "std"))] +use crate::{constants::MAINNET_DEPOSIT_CONTRACT, once_cell_set, EthChainSpec}; use alloc::{boxed::Box, sync::Arc, vec::Vec}; -#[cfg(feature = "std")] -use std::sync::Arc; - use alloy_chains::{Chain, ChainKind, NamedChain}; use alloy_genesis::Genesis; use alloy_primitives::{address, b256, Address, BlockNumber, B256, U256}; use alloy_trie::EMPTY_ROOT_HASH; use derive_more::From; -use once_cell::sync::Lazy; -#[cfg(feature = "optimism")] -use reth_ethereum_forks::OptimismHardfork; +use once_cell::sync::{Lazy, OnceCell}; use reth_ethereum_forks::{ ChainHardforks, DisplayHardforks, EthereumHardfork, EthereumHardforks, ForkCondition, ForkFilter, ForkFilterKey, ForkHash, ForkId, Hardfork, Head, DEV_HARDFORKS, @@ -31,7 +24,7 @@ use reth_primitives_traits::{ }; use reth_trie_common::root::state_root_ref_unhashed; -use crate::{constants::MAINNET_DEPOSIT_CONTRACT, EthChainSpec}; +pub use alloy_eips::eip1559::BaseFeeParams; /// The Ethereum mainnet spec pub static MAINNET: Lazy> = Lazy::new(|| { @@ -39,7 +32,8 @@ pub static MAINNET: Lazy> = Lazy::new(|| { chain: Chain::mainnet(), genesis: serde_json::from_str(include_str!("../res/genesis/mainnet.json")) .expect("Can't deserialize Mainnet genesis json"), - genesis_hash: Some(MAINNET_GENESIS_HASH), + genesis_hash: once_cell_set(MAINNET_GENESIS_HASH), + genesis_header: Default::default(), // paris_block_and_final_difficulty: Some(( 15537394, @@ -66,7 +60,8 @@ pub static SEPOLIA: Lazy> = Lazy::new(|| { chain: Chain::sepolia(), genesis: serde_json::from_str(include_str!("../res/genesis/sepolia.json")) .expect("Can't deserialize Sepolia genesis json"), - genesis_hash: Some(SEPOLIA_GENESIS_HASH), + genesis_hash: once_cell_set(SEPOLIA_GENESIS_HASH), + genesis_header: Default::default(), // paris_block_and_final_difficulty: Some((1450409, U256::from(17_000_018_015_853_232u128))), hardforks: EthereumHardfork::sepolia().into(), @@ -90,7 +85,8 @@ pub static HOLESKY: Lazy> = Lazy::new(|| { chain: Chain::holesky(), genesis: serde_json::from_str(include_str!("../res/genesis/holesky.json")) .expect("Can't deserialize Holesky genesis json"), - genesis_hash: Some(HOLESKY_GENESIS_HASH), + genesis_hash: once_cell_set(HOLESKY_GENESIS_HASH), + genesis_header: Default::default(), paris_block_and_final_difficulty: Some((0, U256::from(1))), hardforks: EthereumHardfork::holesky().into(), deposit_contract: Some(DepositContract::new( @@ -115,7 +111,7 @@ pub static DEV: Lazy> = Lazy::new(|| { chain: Chain::dev(), genesis: serde_json::from_str(include_str!("../res/genesis/dev.json")) .expect("Can't deserialize Dev testnet genesis json"), - genesis_hash: Some(DEV_GENESIS_HASH), + genesis_hash: once_cell_set(DEV_GENESIS_HASH), paris_block_and_final_difficulty: Some((0, U256::from(0))), hardforks: DEV_HARDFORKS.clone(), base_fee_params: BaseFeeParamsKind::Constant(BaseFeeParams::ethereum()), @@ -179,14 +175,20 @@ pub struct ChainSpec { /// The chain ID pub chain: Chain, + /// The genesis block. + pub genesis: Genesis, + /// The hash of the genesis block. /// - /// This acts as a small cache for known chains. If the chain is known, then the genesis hash - /// is also known ahead of time, and this will be `Some`. - pub genesis_hash: Option, + /// This is either stored at construction time if it is known using [`once_cell_set`], or + /// computed once on the first access. + pub genesis_hash: OnceCell, - /// The genesis block - pub genesis: Genesis, + /// The header corresponding to the genesis block. + /// + /// This is either stored at construction time if it is known using [`once_cell_set`], or + /// computed once on the first access. + pub genesis_header: OnceCell
, /// The block at which [`EthereumHardfork::Paris`] was activated and the final difficulty at /// this block. @@ -214,6 +216,7 @@ impl Default for ChainSpec { chain: Default::default(), genesis_hash: Default::default(), genesis: Default::default(), + genesis_header: Default::default(), paris_block_and_final_difficulty: Default::default(), hardforks: Default::default(), deposit_contract: Default::default(), @@ -266,7 +269,8 @@ impl ChainSpec { #[inline] #[cfg(feature = "optimism")] pub fn is_optimism(&self) -> bool { - self.chain.is_optimism() || self.hardforks.get(OptimismHardfork::Bedrock).is_some() + self.chain.is_optimism() || + self.hardforks.get(reth_ethereum_forks::OptimismHardfork::Bedrock).is_some() } /// Returns `true` if this chain contains Optimism configuration. @@ -290,7 +294,11 @@ impl ChainSpec { } /// Get the header for the genesis block. - pub fn genesis_header(&self) -> Header { + pub fn genesis_header(&self) -> &Header { + self.genesis_header.get_or_init(|| self.make_genesis_header()) + } + + fn make_genesis_header(&self) -> Header { // If London is activated at genesis, we set the initial base fee as per EIP-1559. let base_fee_per_gas = self.initial_base_fee(); @@ -342,7 +350,7 @@ impl ChainSpec { /// Get the sealed header for the genesis block. pub fn sealed_genesis_header(&self) -> SealedHeader { - SealedHeader::new(self.genesis_header(), self.genesis_hash()) + SealedHeader::new(self.genesis_header().clone(), self.genesis_hash()) } /// Get the initial base fee of the genesis block. @@ -395,7 +403,7 @@ impl ChainSpec { /// Get the hash of the genesis block. pub fn genesis_hash(&self) -> B256 { - self.genesis_hash.unwrap_or_else(|| self.genesis_header().hash_slow()) + *self.genesis_hash.get_or_init(|| self.genesis_header().hash_slow()) } /// Get the timestamp of the genesis block. @@ -542,10 +550,11 @@ impl ChainSpec { ForkCondition::Timestamp(timestamp) => { // to satisfy every timestamp ForkCondition, we find the last ForkCondition::Block // if one exists, and include its block_num in the returned Head - if let Some(last_block_num) = self.last_block_fork_before_merge_or_timestamp() { - return Head { timestamp, number: last_block_num, ..Default::default() } + Head { + timestamp, + number: self.last_block_fork_before_merge_or_timestamp().unwrap_or_default(), + ..Default::default() } - Head { timestamp, ..Default::default() } } ForkCondition::TTD { total_difficulty, .. } => { Head { total_difficulty, ..Default::default() } @@ -619,114 +628,204 @@ impl ChainSpec { impl From for ChainSpec { fn from(genesis: Genesis) -> Self { + #[cfg(not(feature = "optimism"))] + { + into_ethereum_chain_spec(genesis) + } + #[cfg(feature = "optimism")] - let optimism_genesis_info = OptimismGenesisInfo::extract_from(&genesis); - #[cfg(feature = "optimism")] - let genesis_info = - optimism_genesis_info.optimism_chain_info.genesis_info.unwrap_or_default(); - - // Block-based hardforks - let hardfork_opts = [ - (EthereumHardfork::Homestead.boxed(), genesis.config.homestead_block), - (EthereumHardfork::Dao.boxed(), genesis.config.dao_fork_block), - (EthereumHardfork::Tangerine.boxed(), genesis.config.eip150_block), - (EthereumHardfork::SpuriousDragon.boxed(), genesis.config.eip155_block), - (EthereumHardfork::Byzantium.boxed(), genesis.config.byzantium_block), - (EthereumHardfork::Constantinople.boxed(), genesis.config.constantinople_block), - (EthereumHardfork::Petersburg.boxed(), genesis.config.petersburg_block), - (EthereumHardfork::Istanbul.boxed(), genesis.config.istanbul_block), - (EthereumHardfork::MuirGlacier.boxed(), genesis.config.muir_glacier_block), - (EthereumHardfork::Berlin.boxed(), genesis.config.berlin_block), - (EthereumHardfork::London.boxed(), genesis.config.london_block), - (EthereumHardfork::ArrowGlacier.boxed(), genesis.config.arrow_glacier_block), - (EthereumHardfork::GrayGlacier.boxed(), genesis.config.gray_glacier_block), - #[cfg(feature = "optimism")] - (OptimismHardfork::Bedrock.boxed(), genesis_info.bedrock_block), - ]; - let mut hardforks = hardfork_opts - .into_iter() - .filter_map(|(hardfork, opt)| opt.map(|block| (hardfork, ForkCondition::Block(block)))) - .collect::>(); - - // Paris - let paris_block_and_final_difficulty = - if let Some(ttd) = genesis.config.terminal_total_difficulty { - hardforks.push(( - EthereumHardfork::Paris.boxed(), - ForkCondition::TTD { - total_difficulty: ttd, - fork_block: genesis.config.merge_netsplit_block, - }, - )); - - genesis.config.merge_netsplit_block.map(|block| (block, ttd)) - } else { - None - }; + { + into_optimism_chain_spec(genesis) + } + } +} - // Time-based hardforks - let time_hardfork_opts = [ - (EthereumHardfork::Shanghai.boxed(), genesis.config.shanghai_time), - (EthereumHardfork::Cancun.boxed(), genesis.config.cancun_time), - (EthereumHardfork::Prague.boxed(), genesis.config.prague_time), - #[cfg(feature = "optimism")] - (OptimismHardfork::Regolith.boxed(), genesis_info.regolith_time), - #[cfg(feature = "optimism")] - (OptimismHardfork::Canyon.boxed(), genesis_info.canyon_time), - #[cfg(feature = "optimism")] - (OptimismHardfork::Ecotone.boxed(), genesis_info.ecotone_time), - #[cfg(feature = "optimism")] - (OptimismHardfork::Fjord.boxed(), genesis_info.fjord_time), - #[cfg(feature = "optimism")] - (OptimismHardfork::Granite.boxed(), genesis_info.granite_time), - ]; +/// Convert the given [`Genesis`] into an Ethereum [`ChainSpec`]. +#[cfg(not(feature = "optimism"))] +fn into_ethereum_chain_spec(genesis: Genesis) -> ChainSpec { + // Block-based hardforks + let hardfork_opts = [ + (EthereumHardfork::Homestead.boxed(), genesis.config.homestead_block), + (EthereumHardfork::Dao.boxed(), genesis.config.dao_fork_block), + (EthereumHardfork::Tangerine.boxed(), genesis.config.eip150_block), + (EthereumHardfork::SpuriousDragon.boxed(), genesis.config.eip155_block), + (EthereumHardfork::Byzantium.boxed(), genesis.config.byzantium_block), + (EthereumHardfork::Constantinople.boxed(), genesis.config.constantinople_block), + (EthereumHardfork::Petersburg.boxed(), genesis.config.petersburg_block), + (EthereumHardfork::Istanbul.boxed(), genesis.config.istanbul_block), + (EthereumHardfork::MuirGlacier.boxed(), genesis.config.muir_glacier_block), + (EthereumHardfork::Berlin.boxed(), genesis.config.berlin_block), + (EthereumHardfork::London.boxed(), genesis.config.london_block), + (EthereumHardfork::ArrowGlacier.boxed(), genesis.config.arrow_glacier_block), + (EthereumHardfork::GrayGlacier.boxed(), genesis.config.gray_glacier_block), + ]; + let mut hardforks = hardfork_opts + .into_iter() + .filter_map(|(hardfork, opt)| opt.map(|block| (hardfork, ForkCondition::Block(block)))) + .collect::>(); + + // Paris + let paris_block_and_final_difficulty = + if let Some(ttd) = genesis.config.terminal_total_difficulty { + hardforks.push(( + EthereumHardfork::Paris.boxed(), + ForkCondition::TTD { + total_difficulty: ttd, + fork_block: genesis.config.merge_netsplit_block, + }, + )); - let time_hardforks = time_hardfork_opts - .into_iter() - .filter_map(|(hardfork, opt)| { - opt.map(|time| (hardfork, ForkCondition::Timestamp(time))) - }) - .collect::>(); + genesis.config.merge_netsplit_block.map(|block| (block, ttd)) + } else { + None + }; - hardforks.extend(time_hardforks); + // Time-based hardforks + let time_hardfork_opts = [ + (EthereumHardfork::Shanghai.boxed(), genesis.config.shanghai_time), + (EthereumHardfork::Cancun.boxed(), genesis.config.cancun_time), + (EthereumHardfork::Prague.boxed(), genesis.config.prague_time), + ]; - // Uses ethereum or optimism main chains to find proper order - #[cfg(not(feature = "optimism"))] - let mainnet_hardforks: ChainHardforks = EthereumHardfork::mainnet().into(); - #[cfg(not(feature = "optimism"))] - let mainnet_order = mainnet_hardforks.forks_iter(); - #[cfg(feature = "optimism")] - let mainnet_hardforks = OptimismHardfork::op_mainnet(); - #[cfg(feature = "optimism")] - let mainnet_order = mainnet_hardforks.forks_iter(); + let mut time_hardforks = time_hardfork_opts + .into_iter() + .filter_map(|(hardfork, opt)| opt.map(|time| (hardfork, ForkCondition::Timestamp(time)))) + .collect::>(); - let mut ordered_hardforks = Vec::with_capacity(hardforks.len()); - for (hardfork, _) in mainnet_order { - if let Some(pos) = hardforks.iter().position(|(e, _)| **e == *hardfork) { - ordered_hardforks.push(hardforks[pos].clone()); - } + hardforks.append(&mut time_hardforks); + + // Ordered Hardforks + let mainnet_hardforks: ChainHardforks = EthereumHardfork::mainnet().into(); + let mainnet_order = mainnet_hardforks.forks_iter(); + + let mut ordered_hardforks = Vec::with_capacity(hardforks.len()); + for (hardfork, _) in mainnet_order { + if let Some(pos) = hardforks.iter().position(|(e, _)| **e == *hardfork) { + ordered_hardforks.push(hardforks.remove(pos)); } + } - // NOTE: in full node, we prune all receipts except the deposit contract's. We do not - // have the deployment block in the genesis file, so we use block zero. We use the same - // deposit topic as the mainnet contract if we have the deposit contract address in the - // genesis json. - let deposit_contract = genesis.config.deposit_contract_address.map(|address| { - DepositContract { address, block: 0, topic: MAINNET_DEPOSIT_CONTRACT.topic } - }); + // append the remaining unknown hardforks to ensure we don't filter any out + ordered_hardforks.append(&mut hardforks); - Self { - chain: genesis.config.chain_id.into(), - genesis, - genesis_hash: None, - hardforks: ChainHardforks::new(hardforks), - paris_block_and_final_difficulty, - deposit_contract, - #[cfg(feature = "optimism")] - base_fee_params: optimism_genesis_info.base_fee_params, - ..Default::default() + // NOTE: in full node, we prune all receipts except the deposit contract's. We do not + // have the deployment block in the genesis file, so we use block zero. We use the same + // deposit topic as the mainnet contract if we have the deposit contract address in the + // genesis json. + let deposit_contract = genesis.config.deposit_contract_address.map(|address| DepositContract { + address, + block: 0, + topic: MAINNET_DEPOSIT_CONTRACT.topic, + }); + + ChainSpec { + chain: genesis.config.chain_id.into(), + genesis, + genesis_hash: OnceCell::new(), + hardforks: ChainHardforks::new(ordered_hardforks), + paris_block_and_final_difficulty, + deposit_contract, + ..Default::default() + } +} + +#[cfg(feature = "optimism")] +/// Convert the given [`Genesis`] into an Optimism [`ChainSpec`]. +fn into_optimism_chain_spec(genesis: Genesis) -> ChainSpec { + use reth_ethereum_forks::OptimismHardfork; + let optimism_genesis_info = OptimismGenesisInfo::extract_from(&genesis); + let genesis_info = optimism_genesis_info.optimism_chain_info.genesis_info.unwrap_or_default(); + + // Block-based hardforks + let hardfork_opts = [ + (EthereumHardfork::Homestead.boxed(), genesis.config.homestead_block), + (EthereumHardfork::Tangerine.boxed(), genesis.config.eip150_block), + (EthereumHardfork::SpuriousDragon.boxed(), genesis.config.eip155_block), + (EthereumHardfork::Byzantium.boxed(), genesis.config.byzantium_block), + (EthereumHardfork::Constantinople.boxed(), genesis.config.constantinople_block), + (EthereumHardfork::Petersburg.boxed(), genesis.config.petersburg_block), + (EthereumHardfork::Istanbul.boxed(), genesis.config.istanbul_block), + (EthereumHardfork::MuirGlacier.boxed(), genesis.config.muir_glacier_block), + (EthereumHardfork::Berlin.boxed(), genesis.config.berlin_block), + (EthereumHardfork::London.boxed(), genesis.config.london_block), + (EthereumHardfork::ArrowGlacier.boxed(), genesis.config.arrow_glacier_block), + (EthereumHardfork::GrayGlacier.boxed(), genesis.config.gray_glacier_block), + (OptimismHardfork::Bedrock.boxed(), genesis_info.bedrock_block), + ]; + let mut block_hardforks = hardfork_opts + .into_iter() + .filter_map(|(hardfork, opt)| opt.map(|block| (hardfork, ForkCondition::Block(block)))) + .collect::>(); + + // Paris + let paris_block_and_final_difficulty = + if let Some(ttd) = genesis.config.terminal_total_difficulty { + block_hardforks.push(( + EthereumHardfork::Paris.boxed(), + ForkCondition::TTD { + total_difficulty: ttd, + fork_block: genesis.config.merge_netsplit_block, + }, + )); + + genesis.config.merge_netsplit_block.map(|block| (block, ttd)) + } else { + None + }; + + // Time-based hardforks + let time_hardfork_opts = [ + (EthereumHardfork::Shanghai.boxed(), genesis.config.shanghai_time), + (EthereumHardfork::Cancun.boxed(), genesis.config.cancun_time), + (EthereumHardfork::Prague.boxed(), genesis.config.prague_time), + (OptimismHardfork::Regolith.boxed(), genesis_info.regolith_time), + (OptimismHardfork::Canyon.boxed(), genesis_info.canyon_time), + (OptimismHardfork::Ecotone.boxed(), genesis_info.ecotone_time), + (OptimismHardfork::Fjord.boxed(), genesis_info.fjord_time), + (OptimismHardfork::Granite.boxed(), genesis_info.granite_time), + ]; + + let mut time_hardforks = time_hardfork_opts + .into_iter() + .filter_map(|(hardfork, opt)| opt.map(|time| (hardfork, ForkCondition::Timestamp(time)))) + .collect::>(); + + block_hardforks.append(&mut time_hardforks); + + // Ordered Hardforks + let mainnet_hardforks = OptimismHardfork::op_mainnet(); + let mainnet_order = mainnet_hardforks.forks_iter(); + + let mut ordered_hardforks = Vec::with_capacity(block_hardforks.len()); + for (hardfork, _) in mainnet_order { + if let Some(pos) = block_hardforks.iter().position(|(e, _)| **e == *hardfork) { + ordered_hardforks.push(block_hardforks.remove(pos)); } } + + // append the remaining unknown hardforks to ensure we don't filter any out + ordered_hardforks.append(&mut block_hardforks); + + // NOTE: in full node, we prune all receipts except the deposit contract's. We do not + // have the deployment block in the genesis file, so we use block zero. We use the same + // deposit topic as the mainnet contract if we have the deposit contract address in the + // genesis json. + let deposit_contract = genesis.config.deposit_contract_address.map(|address| DepositContract { + address, + block: 0, + topic: MAINNET_DEPOSIT_CONTRACT.topic, + }); + + ChainSpec { + chain: genesis.config.chain_id.into(), + genesis, + genesis_hash: OnceCell::new(), + hardforks: ChainHardforks::new(ordered_hardforks), + paris_block_and_final_difficulty, + deposit_contract, + base_fee_params: optimism_genesis_info.base_fee_params, + ..Default::default() + } } /// A trait for reading the current [`ChainSpec`]. @@ -897,7 +996,7 @@ impl ChainSpecBuilder { #[cfg(feature = "optimism")] pub fn bedrock_activated(mut self) -> Self { self = self.paris_activated(); - self.hardforks.insert(OptimismHardfork::Bedrock, ForkCondition::Block(0)); + self.hardforks.insert(crate::OptimismHardfork::Bedrock, ForkCondition::Block(0)); self } @@ -905,7 +1004,7 @@ impl ChainSpecBuilder { #[cfg(feature = "optimism")] pub fn regolith_activated(mut self) -> Self { self = self.bedrock_activated(); - self.hardforks.insert(OptimismHardfork::Regolith, ForkCondition::Timestamp(0)); + self.hardforks.insert(crate::OptimismHardfork::Regolith, ForkCondition::Timestamp(0)); self } @@ -915,7 +1014,7 @@ impl ChainSpecBuilder { self = self.regolith_activated(); // Canyon also activates changes from L1's Shanghai hardfork self.hardforks.insert(EthereumHardfork::Shanghai, ForkCondition::Timestamp(0)); - self.hardforks.insert(OptimismHardfork::Canyon, ForkCondition::Timestamp(0)); + self.hardforks.insert(crate::OptimismHardfork::Canyon, ForkCondition::Timestamp(0)); self } @@ -924,7 +1023,7 @@ impl ChainSpecBuilder { pub fn ecotone_activated(mut self) -> Self { self = self.canyon_activated(); self.hardforks.insert(EthereumHardfork::Cancun, ForkCondition::Timestamp(0)); - self.hardforks.insert(OptimismHardfork::Ecotone, ForkCondition::Timestamp(0)); + self.hardforks.insert(crate::OptimismHardfork::Ecotone, ForkCondition::Timestamp(0)); self } @@ -932,7 +1031,7 @@ impl ChainSpecBuilder { #[cfg(feature = "optimism")] pub fn fjord_activated(mut self) -> Self { self = self.ecotone_activated(); - self.hardforks.insert(OptimismHardfork::Fjord, ForkCondition::Timestamp(0)); + self.hardforks.insert(crate::OptimismHardfork::Fjord, ForkCondition::Timestamp(0)); self } @@ -940,7 +1039,7 @@ impl ChainSpecBuilder { #[cfg(feature = "optimism")] pub fn granite_activated(mut self) -> Self { self = self.fjord_activated(); - self.hardforks.insert(OptimismHardfork::Granite, ForkCondition::Timestamp(0)); + self.hardforks.insert(crate::OptimismHardfork::Granite, ForkCondition::Timestamp(0)); self } @@ -963,7 +1062,7 @@ impl ChainSpecBuilder { ChainSpec { chain: self.chain.expect("The chain is required"), genesis: self.genesis.expect("The genesis is required"), - genesis_hash: None, + genesis_hash: OnceCell::new(), hardforks: self.hardforks, paris_block_and_final_difficulty, deposit_contract: None, @@ -983,7 +1082,7 @@ impl From<&Arc> for ChainSpecBuilder { } /// `PoS` deposit contract details. -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct DepositContract { /// Deposit Contract Address pub address: Address, @@ -1035,7 +1134,7 @@ impl OptimismGenesisInfo { BaseFeeParams::new(denominator as u128, elasticity as u128), ), ( - OptimismHardfork::Canyon.boxed(), + reth_ethereum_forks::OptimismHardfork::Canyon.boxed(), BaseFeeParams::new(canyon_denominator as u128, elasticity as u128), ), ] @@ -1429,7 +1528,7 @@ Post-merge hard forks (timestamp based): } #[test] - fn mainnet_forkids() { + fn mainnet_fork_ids() { test_fork_ids( &MAINNET, &[ @@ -1505,7 +1604,7 @@ Post-merge hard forks (timestamp based): } #[test] - fn holesky_forkids() { + fn holesky_fork_ids() { test_fork_ids( &HOLESKY, &[ @@ -1543,7 +1642,7 @@ Post-merge hard forks (timestamp based): } #[test] - fn sepolia_forkids() { + fn sepolia_fork_ids() { test_fork_ids( &SEPOLIA, &[ @@ -1583,7 +1682,7 @@ Post-merge hard forks (timestamp based): } #[test] - fn dev_forkids() { + fn dev_fork_ids() { test_fork_ids( &DEV, &[( @@ -1945,7 +2044,7 @@ Post-merge hard forks (timestamp based): assert_eq!(&alloy_rlp::encode(TrieAccount::from(account.clone())), expected_rlp); } - assert_eq!(chainspec.genesis_hash, None); + assert_eq!(chainspec.genesis_hash.get(), None); let expected_state_root: B256 = hex!("078dc6061b1d8eaa8493384b59c9c65ceb917201221d08b80c4de6770b6ec7e7").into(); assert_eq!(chainspec.genesis_header().state_root, expected_state_root); @@ -2020,7 +2119,7 @@ Post-merge hard forks (timestamp based): let genesis = serde_json::from_str::(hive_json).unwrap(); let chainspec: ChainSpec = genesis.into(); - assert_eq!(chainspec.genesis_hash, None); + assert_eq!(chainspec.genesis_hash.get(), None); assert_eq!(chainspec.chain, Chain::from_named(NamedChain::Optimism)); let expected_state_root: B256 = hex!("9a6049ac535e3dc7436c189eaa81c73f35abd7f282ab67c32944ff0301d63360").into(); @@ -2256,7 +2355,7 @@ Post-merge hard forks (timestamp based): .genesis(genesis) .cancun_activated() .build(); - let mut header = default_chainspec.genesis_header(); + let mut header = default_chainspec.genesis_header().clone(); // set the state root to the same as in the hive test the hash was pulled from header.state_root = @@ -2339,7 +2438,7 @@ Post-merge hard forks (timestamp based): let spec = ChainSpec { chain: Chain::mainnet(), genesis: Genesis::default(), - genesis_hash: None, + genesis_hash: OnceCell::new(), hardforks: ChainHardforks::new(vec![( EthereumHardfork::Frontier.boxed(), ForkCondition::Never, @@ -2357,7 +2456,7 @@ Post-merge hard forks (timestamp based): let spec = ChainSpec { chain: Chain::mainnet(), genesis: Genesis::default(), - genesis_hash: None, + genesis_hash: OnceCell::new(), hardforks: ChainHardforks::new(vec![( EthereumHardfork::Shanghai.boxed(), ForkCondition::Never, @@ -2377,4 +2476,139 @@ Post-merge hard forks (timestamp based): MAINNET.latest_fork_id() ) } + + #[test] + #[cfg(not(feature = "optimism"))] + fn test_fork_order_ethereum_mainnet() { + let genesis = Genesis { + config: ChainConfig { + chain_id: 0, + homestead_block: Some(0), + dao_fork_block: Some(0), + dao_fork_support: false, + eip150_block: Some(0), + eip155_block: Some(0), + eip158_block: Some(0), + byzantium_block: Some(0), + constantinople_block: Some(0), + petersburg_block: Some(0), + istanbul_block: Some(0), + muir_glacier_block: Some(0), + berlin_block: Some(0), + london_block: Some(0), + arrow_glacier_block: Some(0), + gray_glacier_block: Some(0), + merge_netsplit_block: Some(0), + shanghai_time: Some(0), + cancun_time: Some(0), + terminal_total_difficulty: Some(U256::ZERO), + ..Default::default() + }, + ..Default::default() + }; + + let chain_spec = into_ethereum_chain_spec(genesis); + + let hardforks: Vec<_> = chain_spec.hardforks.forks_iter().map(|(h, _)| h).collect(); + let expected_hardforks = vec![ + EthereumHardfork::Homestead.boxed(), + EthereumHardfork::Dao.boxed(), + EthereumHardfork::Tangerine.boxed(), + EthereumHardfork::SpuriousDragon.boxed(), + EthereumHardfork::Byzantium.boxed(), + EthereumHardfork::Constantinople.boxed(), + EthereumHardfork::Petersburg.boxed(), + EthereumHardfork::Istanbul.boxed(), + EthereumHardfork::MuirGlacier.boxed(), + EthereumHardfork::Berlin.boxed(), + EthereumHardfork::London.boxed(), + EthereumHardfork::ArrowGlacier.boxed(), + EthereumHardfork::GrayGlacier.boxed(), + EthereumHardfork::Paris.boxed(), + EthereumHardfork::Shanghai.boxed(), + EthereumHardfork::Cancun.boxed(), + ]; + + assert!(expected_hardforks + .iter() + .zip(hardforks.iter()) + .all(|(expected, actual)| &**expected == *actual)); + assert_eq!(expected_hardforks.len(), hardforks.len()); + } + + #[test] + #[cfg(feature = "optimism")] + fn test_fork_order_optimism_mainnet() { + use crate::OptimismHardfork; + + let genesis = Genesis { + config: ChainConfig { + chain_id: 0, + homestead_block: Some(0), + dao_fork_block: Some(0), + dao_fork_support: false, + eip150_block: Some(0), + eip155_block: Some(0), + eip158_block: Some(0), + byzantium_block: Some(0), + constantinople_block: Some(0), + petersburg_block: Some(0), + istanbul_block: Some(0), + muir_glacier_block: Some(0), + berlin_block: Some(0), + london_block: Some(0), + arrow_glacier_block: Some(0), + gray_glacier_block: Some(0), + merge_netsplit_block: Some(0), + shanghai_time: Some(0), + cancun_time: Some(0), + terminal_total_difficulty: Some(U256::ZERO), + extra_fields: [ + (String::from("bedrockBlock"), 0.into()), + (String::from("regolithTime"), 0.into()), + (String::from("canyonTime"), 0.into()), + (String::from("ecotoneTime"), 0.into()), + (String::from("fjordTime"), 0.into()), + (String::from("graniteTime"), 0.into()), + ] + .into_iter() + .collect(), + ..Default::default() + }, + ..Default::default() + }; + + let chain_spec: ChainSpec = into_optimism_chain_spec(genesis); + + let hardforks: Vec<_> = chain_spec.hardforks.forks_iter().map(|(h, _)| h).collect(); + let expected_hardforks = vec![ + EthereumHardfork::Homestead.boxed(), + EthereumHardfork::Tangerine.boxed(), + EthereumHardfork::SpuriousDragon.boxed(), + EthereumHardfork::Byzantium.boxed(), + EthereumHardfork::Constantinople.boxed(), + EthereumHardfork::Petersburg.boxed(), + EthereumHardfork::Istanbul.boxed(), + EthereumHardfork::MuirGlacier.boxed(), + EthereumHardfork::Berlin.boxed(), + EthereumHardfork::London.boxed(), + EthereumHardfork::ArrowGlacier.boxed(), + EthereumHardfork::GrayGlacier.boxed(), + EthereumHardfork::Paris.boxed(), + OptimismHardfork::Bedrock.boxed(), + OptimismHardfork::Regolith.boxed(), + EthereumHardfork::Shanghai.boxed(), + OptimismHardfork::Canyon.boxed(), + EthereumHardfork::Cancun.boxed(), + OptimismHardfork::Ecotone.boxed(), + OptimismHardfork::Fjord.boxed(), + OptimismHardfork::Granite.boxed(), + ]; + + assert!(expected_hardforks + .iter() + .zip(hardforks.iter()) + .all(|(expected, actual)| &**expected == *actual)); + assert_eq!(expected_hardforks.len(), hardforks.len()); + } } diff --git a/crates/cli/cli/src/chainspec.rs b/crates/cli/cli/src/chainspec.rs index f8caca7796..63705bd28f 100644 --- a/crates/cli/cli/src/chainspec.rs +++ b/crates/cli/cli/src/chainspec.rs @@ -2,12 +2,40 @@ use std::sync::Arc; use clap::builder::TypedValueParser; +#[derive(Debug, Clone)] +struct Parser(std::marker::PhantomData); + +impl TypedValueParser for Parser { + type Value = Arc; + + fn parse_ref( + &self, + _cmd: &clap::Command, + arg: Option<&clap::Arg>, + value: &std::ffi::OsStr, + ) -> Result { + let val = + value.to_str().ok_or_else(|| clap::Error::new(clap::error::ErrorKind::InvalidUtf8))?; + C::parse(val).map_err(|err| { + let arg = arg.map(|a| a.to_string()).unwrap_or_else(|| "...".to_owned()); + let possible_values = C::SUPPORTED_CHAINS.join(","); + let msg = format!( + "Invalid value '{val}' for {arg}: {err}.\n [possible values: {possible_values}]" + ); + clap::Error::raw(clap::error::ErrorKind::InvalidValue, msg) + }) + } +} + /// Trait for parsing chain specifications. /// /// This trait extends [`clap::builder::TypedValueParser`] to provide a parser for chain /// specifications. Implementers of this trait must provide a list of supported chains and a /// function to parse a given string into a chain spec. -pub trait ChainSpecParser: TypedValueParser> + Default { +pub trait ChainSpecParser: Clone + Send + Sync + 'static { + /// The chain specification type. + type ChainSpec: std::fmt::Debug + Send + Sync; + /// List of supported chains. const SUPPORTED_CHAINS: &'static [&'static str]; @@ -21,5 +49,15 @@ pub trait ChainSpecParser: TypedValueParser> + /// /// This function will return an error if the input string cannot be parsed into a valid /// chain spec. - fn parse(s: &str) -> eyre::Result>; + fn parse(s: &str) -> eyre::Result>; + + /// Produces a [`TypedValueParser`] for this chain spec parser. + fn parser() -> impl TypedValueParser> { + Parser(std::marker::PhantomData::) + } + + /// Produces a help message for the chain spec argument. + fn help_message() -> String { + format!("The chain this node is running.\nPossible values are either a built-in chain or the path to a chain specification file.\n\nBuilt-in chains:\n {}", Self::SUPPORTED_CHAINS.join(", ")) + } } diff --git a/crates/cli/commands/Cargo.toml b/crates/cli/commands/Cargo.toml index b0e4bb750c..dd300229ff 100644 --- a/crates/cli/commands/Cargo.toml +++ b/crates/cli/commands/Cargo.toml @@ -10,8 +10,10 @@ repository.workspace = true [lints] [dependencies] +# reth reth-beacon-consensus.workspace = true reth-chainspec.workspace = true +reth-cli.workspace = true reth-cli-runner.workspace = true reth-cli-util.workspace = true reth-config.workspace = true @@ -36,11 +38,15 @@ reth-primitives.workspace = true reth-provider.workspace = true reth-prune.workspace = true reth-stages.workspace = true -reth-static-file-types.workspace = true +reth-static-file-types = { workspace = true, features = ["clap"] } reth-static-file.workspace = true reth-trie = { workspace = true, features = ["metrics"] } reth-trie-db = { workspace = true, features = ["metrics"] } +# ethereum +alloy-eips.workspace = true +alloy-primitives.workspace = true + itertools.workspace = true futures.workspace = true tokio.workspace = true diff --git a/crates/cli/commands/src/common.rs b/crates/cli/commands/src/common.rs index b97a3a4841..b720ce95f4 100644 --- a/crates/cli/commands/src/common.rs +++ b/crates/cli/commands/src/common.rs @@ -1,21 +1,20 @@ //! Contains common `reth` arguments +use alloy_primitives::B256; use clap::Parser; use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::ChainSpec; +use reth_cli::chainspec::ChainSpecParser; use reth_config::{config::EtlConfig, Config}; use reth_db::{init_db, open_db_read_only, DatabaseEnv}; use reth_db_common::init::init_genesis; use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader}; use reth_evm::noop::NoopBlockExecutorProvider; +use reth_node_builder::{NodeTypesWithDBAdapter, NodeTypesWithEngine}; use reth_node_core::{ - args::{ - utils::{chain_help, chain_value_parser, SUPPORTED_CHAINS}, - DatabaseArgs, DatadirArgs, PerformanceOptimizationArgs, - }, + args::{DatabaseArgs, DatadirArgs, PerformanceOptimizationArgs}, dirs::{ChainPath, DataDirPath}, }; -use reth_primitives::B256; use reth_provider::{providers::StaticFileProvider, ProviderFactory, StaticFileProviderFactory}; use reth_stages::{sets::DefaultStages, Pipeline, PipelineTarget}; use reth_static_file::StaticFileProducer; @@ -25,7 +24,7 @@ use tracing::{debug, info, warn}; /// Struct to hold config and datadir paths #[derive(Debug, Parser)] -pub struct EnvironmentArgs { +pub struct EnvironmentArgs { /// Parameters for datadir configuration #[command(flatten)] pub datadir: DatadirArgs, @@ -40,11 +39,11 @@ pub struct EnvironmentArgs { #[arg( long, value_name = "CHAIN_OR_PATH", - long_help = chain_help(), - default_value = SUPPORTED_CHAINS[0], - value_parser = chain_value_parser + long_help = C::help_message(), + default_value = C::SUPPORTED_CHAINS[0], + value_parser = C::parser() )] - pub chain: Arc, + pub chain: Arc, /// All database related arguments #[command(flatten)] @@ -55,10 +54,13 @@ pub struct EnvironmentArgs { pub performance_optimization: PerformanceOptimizationArgs, } -impl EnvironmentArgs { +impl> EnvironmentArgs { /// Initializes environment according to [`AccessRights`] and returns an instance of /// [`Environment`]. - pub fn init(&self, access: AccessRights) -> eyre::Result { + pub fn init>( + &self, + access: AccessRights, + ) -> eyre::Result> { let data_dir = self.datadir.clone().resolve_datadir(self.chain.chain); let db_path = data_dir.db(); let sf_path = data_dir.static_files(); @@ -89,14 +91,14 @@ impl EnvironmentArgs { ), AccessRights::RO => ( Arc::new(open_db_read_only(&db_path, self.db.database_args())?), - StaticFileProvider::read_only(sf_path)?, + StaticFileProvider::read_only(sf_path, false)?, ), }; let provider_factory = self.create_provider_factory(&config, db, sfp)?; if access.is_read_write() { debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis"); - init_genesis(provider_factory.clone())?; + init_genesis(&provider_factory)?; } Ok(Environment { config, provider_factory, data_dir }) @@ -107,17 +109,21 @@ impl EnvironmentArgs { /// If it's a read-write environment and an issue is found, it will attempt to heal (including a /// pipeline unwind). Otherwise, it will print out an warning, advising the user to restart the /// node to heal. - fn create_provider_factory( + fn create_provider_factory>( &self, config: &Config, db: Arc, static_file_provider: StaticFileProvider, - ) -> eyre::Result>> { + ) -> eyre::Result>>> { let has_receipt_pruning = config.prune.as_ref().map_or(false, |a| a.has_receipts_pruning()); let prune_modes = config.prune.as_ref().map(|prune| prune.segments.clone()).unwrap_or_default(); - let factory = ProviderFactory::new(db, self.chain.clone(), static_file_provider) - .with_prune_modes(prune_modes.clone()); + let factory = ProviderFactory::>>::new( + db, + self.chain.clone(), + static_file_provider, + ) + .with_prune_modes(prune_modes.clone()); // Check for consistency between database and static files. if let Some(unwind_target) = factory @@ -138,7 +144,7 @@ impl EnvironmentArgs { let (_tip_tx, tip_rx) = watch::channel(B256::ZERO); // Builds and executes an unwind-only pipeline - let mut pipeline = Pipeline::builder() + let mut pipeline = Pipeline::>>::builder() .add_stages(DefaultStages::new( factory.clone(), tip_rx, @@ -163,11 +169,11 @@ impl EnvironmentArgs { /// Environment built from [`EnvironmentArgs`]. #[derive(Debug)] -pub struct Environment { +pub struct Environment { /// Configuration for reth node pub config: Config, /// Provider factory. - pub provider_factory: ProviderFactory>, + pub provider_factory: ProviderFactory>>, /// Datadir path. pub data_dir: ChainPath, } diff --git a/crates/cli/commands/src/db/checksum.rs b/crates/cli/commands/src/db/checksum.rs index abc183da4e..7aeed6dfe1 100644 --- a/crates/cli/commands/src/db/checksum.rs +++ b/crates/cli/commands/src/db/checksum.rs @@ -1,9 +1,11 @@ use crate::db::get::{maybe_json_value_parser, table_key}; use ahash::RandomState; use clap::Parser; +use reth_chainspec::ChainSpec; use reth_db::{DatabaseEnv, RawKey, RawTable, RawValue, TableViewer, Tables}; -use reth_db_api::{cursor::DbCursorRO, database::Database, table::Table, transaction::DbTx}; +use reth_db_api::{cursor::DbCursorRO, table::Table, transaction::DbTx}; use reth_db_common::DbTool; +use reth_node_builder::{NodeTypesWithDB, NodeTypesWithDBAdapter, NodeTypesWithEngine}; use std::{ hash::{BuildHasher, Hasher}, sync::Arc, @@ -33,7 +35,10 @@ pub struct Command { impl Command { /// Execute `db checksum` command - pub fn execute(self, tool: &DbTool>) -> eyre::Result<()> { + pub fn execute>( + self, + tool: &DbTool>>, + ) -> eyre::Result<()> { warn!("This command should be run without the node running!"); self.table.view(&ChecksumViewer { tool, @@ -45,20 +50,22 @@ impl Command { } } -pub(crate) struct ChecksumViewer<'a, DB: Database> { - tool: &'a DbTool, +pub(crate) struct ChecksumViewer<'a, N: NodeTypesWithDB> { + tool: &'a DbTool, start_key: Option, end_key: Option, limit: Option, } -impl ChecksumViewer<'_, DB> { - pub(crate) const fn new(tool: &'_ DbTool) -> ChecksumViewer<'_, DB> { +impl ChecksumViewer<'_, N> { + pub(crate) const fn new(tool: &'_ DbTool) -> ChecksumViewer<'_, N> { ChecksumViewer { tool, start_key: None, end_key: None, limit: None } } } -impl TableViewer<(u64, Duration)> for ChecksumViewer<'_, DB> { +impl> TableViewer<(u64, Duration)> + for ChecksumViewer<'_, N> +{ type Error = eyre::Report; fn view(&self) -> Result<(u64, Duration), Self::Error> { diff --git a/crates/cli/commands/src/db/clear.rs b/crates/cli/commands/src/db/clear.rs index b9edf458d3..de92c7dcac 100644 --- a/crates/cli/commands/src/db/clear.rs +++ b/crates/cli/commands/src/db/clear.rs @@ -5,6 +5,7 @@ use reth_db_api::{ table::Table, transaction::{DbTx, DbTxMut}, }; +use reth_node_builder::NodeTypesWithDB; use reth_provider::{ProviderFactory, StaticFileProviderFactory}; use reth_static_file_types::{find_fixed_range, StaticFileSegment}; @@ -17,7 +18,10 @@ pub struct Command { impl Command { /// Execute `db clear` command - pub fn execute(self, provider_factory: ProviderFactory) -> eyre::Result<()> { + pub fn execute( + self, + provider_factory: ProviderFactory, + ) -> eyre::Result<()> { match self.subcommand { Subcommands::Mdbx { table } => { table.view(&ClearViewer { db: provider_factory.db_ref() })? diff --git a/crates/cli/commands/src/db/diff.rs b/crates/cli/commands/src/db/diff.rs index e025c4648c..0b7b779073 100644 --- a/crates/cli/commands/src/db/diff.rs +++ b/crates/cli/commands/src/db/diff.rs @@ -2,6 +2,7 @@ use clap::Parser; use reth_db::{open_db_read_only, tables_to_generic, DatabaseEnv, Tables}; use reth_db_api::{cursor::DbCursorRO, database::Database, table::Table, transaction::DbTx}; use reth_db_common::DbTool; +use reth_node_builder::{NodeTypesWithDBAdapter, NodeTypesWithEngine}; use reth_node_core::{ args::DatabaseArgs, dirs::{DataDirPath, PlatformPath}, @@ -51,7 +52,10 @@ impl Command { /// /// The discrepancies and extra elements, along with a brief summary of the diff results are /// then written to a file in the output directory. - pub fn execute(self, tool: &DbTool>) -> eyre::Result<()> { + pub fn execute( + self, + tool: &DbTool>>, + ) -> eyre::Result<()> { warn!("Make sure the node is not running when running `reth db diff`!"); // open second db let second_db_path: PathBuf = self.secondary_datadir.join("db").into(); diff --git a/crates/cli/commands/src/db/get.rs b/crates/cli/commands/src/db/get.rs index 9b5b011d3b..b5ab9168fa 100644 --- a/crates/cli/commands/src/db/get.rs +++ b/crates/cli/commands/src/db/get.rs @@ -1,16 +1,16 @@ +use alloy_primitives::BlockHash; use clap::Parser; +use reth_chainspec::ChainSpec; use reth_db::{ static_file::{ ColumnSelectorOne, ColumnSelectorTwo, HeaderMask, ReceiptMask, SidecarMask, TransactionMask, }, tables, RawKey, RawTable, Receipts, Sidecars, TableViewer, Transactions, }; -use reth_db_api::{ - database::Database, - table::{Decompress, DupSort, Table}, -}; +use reth_db_api::table::{Decompress, DupSort, Table}; use reth_db_common::DbTool; -use reth_primitives::{BlobSidecars, BlockHash, Header}; +use reth_node_builder::NodeTypesWithDB; +use reth_primitives::{BlobSidecars, Header}; use reth_provider::StaticFileProviderFactory; use reth_static_file_types::StaticFileSegment; use tracing::error; @@ -56,7 +56,10 @@ enum Subcommand { impl Command { /// Execute `db get` command - pub fn execute(self, tool: &DbTool) -> eyre::Result<()> { + pub fn execute>( + self, + tool: &DbTool, + ) -> eyre::Result<()> { match self.subcommand { Subcommand::Mdbx { table, key, subkey, raw } => { table.view(&GetValueViewer { tool, key, subkey, raw })? @@ -150,14 +153,14 @@ fn table_subkey(subkey: &Option) -> Result { - tool: &'a DbTool, +struct GetValueViewer<'a, N: NodeTypesWithDB> { + tool: &'a DbTool, key: String, subkey: Option, raw: bool, } -impl TableViewer<()> for GetValueViewer<'_, DB> { +impl> TableViewer<()> for GetValueViewer<'_, N> { type Error = eyre::Report; fn view(&self) -> Result<(), Self::Error> { @@ -214,10 +217,10 @@ pub(crate) fn maybe_json_value_parser(value: &str) -> Result>) -> eyre::Result<()> { + pub fn execute>( + self, + tool: &DbTool>>, + ) -> eyre::Result<()> { self.table.view(&ListTableViewer { tool, args: &self }) } @@ -81,12 +86,12 @@ impl Command { } } -struct ListTableViewer<'a> { - tool: &'a DbTool>, +struct ListTableViewer<'a, N: NodeTypesWithEngine> { + tool: &'a DbTool>>, args: &'a Command, } -impl TableViewer<()> for ListTableViewer<'_> { +impl TableViewer<()> for ListTableViewer<'_, N> { type Error = eyre::Report; fn view(&self) -> Result<(), Self::Error> { diff --git a/crates/cli/commands/src/db/mod.rs b/crates/cli/commands/src/db/mod.rs index de1f1cc382..6d48256101 100644 --- a/crates/cli/commands/src/db/mod.rs +++ b/crates/cli/commands/src/db/mod.rs @@ -1,7 +1,10 @@ use crate::common::{AccessRights, Environment, EnvironmentArgs}; use clap::{Parser, Subcommand}; +use reth_chainspec::ChainSpec; +use reth_cli::chainspec::ChainSpecParser; use reth_db::version::{get_db_version, DatabaseVersionError, DB_VERSION}; use reth_db_common::DbTool; +use reth_node_builder::NodeTypesWithEngine; use std::io::{self, Write}; mod checksum; @@ -15,9 +18,9 @@ mod tui; /// `reth db` command #[derive(Debug, Parser)] -pub struct Command { +pub struct Command { #[command(flatten)] - env: EnvironmentArgs, + env: EnvironmentArgs, #[command(subcommand)] command: Subcommands, @@ -52,17 +55,19 @@ pub enum Subcommands { /// `db_ro_exec` opens a database in read-only mode, and then execute with the provided command macro_rules! db_ro_exec { - ($env:expr, $tool:ident, $command:block) => { - let Environment { provider_factory, .. } = $env.init(AccessRights::RO)?; + ($env:expr, $tool:ident, $N:ident, $command:block) => { + let Environment { provider_factory, .. } = $env.init::<$N>(AccessRights::RO)?; let $tool = DbTool::new(provider_factory.clone())?; $command; }; } -impl Command { +impl> Command { /// Execute `db` command - pub async fn execute(self) -> eyre::Result<()> { + pub async fn execute>( + self, + ) -> eyre::Result<()> { let data_dir = self.env.datadir.clone().resolve_datadir(self.env.chain.chain); let db_path = data_dir.db(); let static_files_path = data_dir.static_files(); @@ -80,27 +85,27 @@ impl Command { match self.command { // TODO: We'll need to add this on the DB trait. Subcommands::Stats(command) => { - db_ro_exec!(self.env, tool, { + db_ro_exec!(self.env, tool, N, { command.execute(data_dir, &tool)?; }); } Subcommands::List(command) => { - db_ro_exec!(self.env, tool, { + db_ro_exec!(self.env, tool, N, { command.execute(&tool)?; }); } Subcommands::Checksum(command) => { - db_ro_exec!(self.env, tool, { + db_ro_exec!(self.env, tool, N, { command.execute(&tool)?; }); } Subcommands::Diff(command) => { - db_ro_exec!(self.env, tool, { + db_ro_exec!(self.env, tool, N, { command.execute(&tool)?; }); } Subcommands::Get(command) => { - db_ro_exec!(self.env, tool, { + db_ro_exec!(self.env, tool, N, { command.execute(&tool)?; }); } @@ -120,12 +125,12 @@ impl Command { } } - let Environment { provider_factory, .. } = self.env.init(AccessRights::RW)?; + let Environment { provider_factory, .. } = self.env.init::(AccessRights::RW)?; let tool = DbTool::new(provider_factory)?; tool.drop(db_path, static_files_path)?; } Subcommands::Clear(command) => { - let Environment { provider_factory, .. } = self.env.init(AccessRights::RW)?; + let Environment { provider_factory, .. } = self.env.init::(AccessRights::RW)?; command.execute(provider_factory)?; } Subcommands::Version => { @@ -155,13 +160,19 @@ impl Command { #[cfg(test)] mod tests { use super::*; - use reth_node_core::args::utils::SUPPORTED_CHAINS; + use reth_node_core::args::utils::{DefaultChainSpecParser, SUPPORTED_CHAINS}; use std::path::Path; #[test] fn parse_stats_globals() { let path = format!("../{}", SUPPORTED_CHAINS[0]); - let cmd = Command::try_parse_from(["reth", "--datadir", &path, "stats"]).unwrap(); + let cmd = Command::::try_parse_from([ + "reth", + "--datadir", + &path, + "stats", + ]) + .unwrap(); assert_eq!(cmd.env.datadir.resolve_datadir(cmd.env.chain.chain).as_ref(), Path::new(&path)); } } diff --git a/crates/cli/commands/src/db/stats.rs b/crates/cli/commands/src/db/stats.rs index 37f7d617ba..051b4469ab 100644 --- a/crates/cli/commands/src/db/stats.rs +++ b/crates/cli/commands/src/db/stats.rs @@ -4,10 +4,12 @@ use comfy_table::{Cell, Row, Table as ComfyTable}; use eyre::WrapErr; use human_bytes::human_bytes; use itertools::Itertools; +use reth_chainspec::ChainSpec; use reth_db::{mdbx, static_file::iter_static_files, DatabaseEnv, TableViewer, Tables}; use reth_db_api::database::Database; use reth_db_common::DbTool; use reth_fs_util as fs; +use reth_node_builder::{NodeTypesWithDB, NodeTypesWithDBAdapter, NodeTypesWithEngine}; use reth_node_core::dirs::{ChainPath, DataDirPath}; use reth_provider::providers::StaticFileProvider; use reth_static_file_types::{find_fixed_range, SegmentRangeInclusive}; @@ -36,10 +38,10 @@ pub struct Command { impl Command { /// Execute `db stats` command - pub fn execute( + pub fn execute>( self, data_dir: ChainPath, - tool: &DbTool>, + tool: &DbTool>>, ) -> eyre::Result<()> { if self.checksum { let checksum_report = self.checksum_report(tool)?; @@ -58,7 +60,10 @@ impl Command { Ok(()) } - fn db_stats_table(&self, tool: &DbTool>) -> eyre::Result { + fn db_stats_table>>( + &self, + tool: &DbTool, + ) -> eyre::Result { let mut table = ComfyTable::new(); table.load_preset(comfy_table::presets::ASCII_MARKDOWN); table.set_header([ @@ -168,7 +173,7 @@ impl Command { } let static_files = iter_static_files(data_dir.static_files())?; - let static_file_provider = StaticFileProvider::read_only(data_dir.static_files())?; + let static_file_provider = StaticFileProvider::read_only(data_dir.static_files(), false)?; let mut total_data_size = 0; let mut total_index_size = 0; @@ -244,6 +249,12 @@ impl Command { total_index_size += index_size; total_offsets_size += offsets_size; total_config_size += config_size; + + // Manually drop provider, otherwise removal from cache will deadlock. + drop(jar_provider); + + // Removes from cache, since if we have many files, it may hit ulimit limits + static_file_provider.remove_cached_provider(segment, fixed_block_range.end()); } if !self.detailed_segments { @@ -306,7 +317,10 @@ impl Command { Ok(table) } - fn checksum_report(&self, tool: &DbTool>) -> eyre::Result { + fn checksum_report>( + &self, + tool: &DbTool, + ) -> eyre::Result { let mut table = ComfyTable::new(); table.load_preset(comfy_table::presets::ASCII_MARKDOWN); table.set_header(vec![Cell::new("Table"), Cell::new("Checksum"), Cell::new("Elapsed")]); diff --git a/crates/cli/commands/src/dump_genesis.rs b/crates/cli/commands/src/dump_genesis.rs index ae425ca8c2..30d3bc9651 100644 --- a/crates/cli/commands/src/dump_genesis.rs +++ b/crates/cli/commands/src/dump_genesis.rs @@ -1,26 +1,27 @@ //! Command that dumps genesis block JSON configuration to stdout +use std::sync::Arc; + use clap::Parser; use reth_chainspec::ChainSpec; -use reth_node_core::args::utils::{chain_help, chain_value_parser, SUPPORTED_CHAINS}; -use std::sync::Arc; +use reth_cli::chainspec::ChainSpecParser; /// Dumps genesis block JSON configuration to stdout #[derive(Debug, Parser)] -pub struct DumpGenesisCommand { +pub struct DumpGenesisCommand { /// The chain this node is running. /// /// Possible values are either a built-in chain or the path to a chain specification file. #[arg( long, value_name = "CHAIN_OR_PATH", - long_help = chain_help(), - default_value = SUPPORTED_CHAINS[0], - value_parser = chain_value_parser + long_help = C::help_message(), + default_value = C::SUPPORTED_CHAINS[0], + value_parser = C::parser() )] - chain: Arc, + chain: Arc, } -impl DumpGenesisCommand { +impl> DumpGenesisCommand { /// Execute the `dump-genesis` command pub async fn execute(self) -> eyre::Result<()> { println!("{}", serde_json::to_string_pretty(self.chain.genesis())?); @@ -31,11 +32,12 @@ impl DumpGenesisCommand { #[cfg(test)] mod tests { use super::*; + use reth_node_core::args::utils::{DefaultChainSpecParser, SUPPORTED_CHAINS}; #[test] fn parse_dump_genesis_command_chain_args() { for chain in SUPPORTED_CHAINS { - let args: DumpGenesisCommand = + let args: DumpGenesisCommand = DumpGenesisCommand::parse_from(["reth", "--chain", chain]); assert_eq!( Ok(args.chain.chain), diff --git a/crates/cli/commands/src/import.rs b/crates/cli/commands/src/import.rs index bef16e4741..ddad4aac1d 100644 --- a/crates/cli/commands/src/import.rs +++ b/crates/cli/commands/src/import.rs @@ -1,13 +1,15 @@ //! Command that initializes the node by importing a chain from a file. use crate::common::{AccessRights, Environment, EnvironmentArgs}; +use alloy_primitives::B256; use clap::Parser; use futures::{Stream, StreamExt}; use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::ChainSpec; +use reth_cli::chainspec::ChainSpecParser; use reth_config::Config; use reth_consensus::Consensus; use reth_db::tables; -use reth_db_api::{database::Database, transaction::DbTx}; +use reth_db_api::transaction::DbTx; use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, file_client::{ChunkedFileReader, FileClient, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE}, @@ -18,9 +20,9 @@ use reth_network_p2p::{ bodies::downloader::BodyDownloader, headers::downloader::{HeaderDownloader, SyncTarget}, }; +use reth_node_builder::{NodeTypesWithDB, NodeTypesWithEngine}; use reth_node_core::version::SHORT_VERSION; use reth_node_events::node::NodeEvent; -use reth_primitives::B256; use reth_provider::{ BlockNumReader, ChainSpecProvider, HeaderProvider, ProviderError, ProviderFactory, StageCheckpointReader, @@ -34,9 +36,9 @@ use tracing::{debug, error, info}; /// Syncs RLP encoded blocks from a file. #[derive(Debug, Parser)] -pub struct ImportCommand { +pub struct ImportCommand { #[command(flatten)] - env: EnvironmentArgs, + env: EnvironmentArgs, /// Disables stages that require state. #[arg(long, verbatim_doc_comment)] @@ -54,12 +56,13 @@ pub struct ImportCommand { path: PathBuf, } -impl ImportCommand { +impl> ImportCommand { /// Execute `import` command - pub async fn execute(self, executor: F) -> eyre::Result<()> + pub async fn execute(self, executor: F) -> eyre::Result<()> where + N: NodeTypesWithEngine, E: BlockExecutorProvider, - F: FnOnce(Arc) -> E, + F: FnOnce(Arc) -> E, { info!(target: "reth::cli", "reth {} starting", SHORT_VERSION); @@ -72,7 +75,7 @@ impl ImportCommand { "Chunking chain import" ); - let Environment { provider_factory, config, .. } = self.env.init(AccessRights::RW)?; + let Environment { provider_factory, config, .. } = self.env.init::(AccessRights::RW)?; let executor = executor(provider_factory.chain_spec()); let consensus = Arc::new(EthBeaconConsensus::new(self.env.chain.clone())); @@ -157,18 +160,18 @@ impl ImportCommand { /// If configured to execute, all stages will run. Otherwise, only stages that don't require state /// will run. #[allow(clippy::too_many_arguments)] -pub fn build_import_pipeline( +pub fn build_import_pipeline( config: &Config, - provider_factory: ProviderFactory, + provider_factory: ProviderFactory, consensus: &Arc, file_client: Arc, - static_file_producer: StaticFileProducer, + static_file_producer: StaticFileProducer>, disable_exec: bool, executor: E, skip_state_root_validation: bool, -) -> eyre::Result<(Pipeline, impl Stream)> +) -> eyre::Result<(Pipeline, impl Stream)> where - DB: Database + Clone + Unpin + 'static, + N: NodeTypesWithDB, C: Consensus + 'static, E: BlockExecutorProvider, { @@ -203,7 +206,7 @@ where let max_block = file_client.max_block().unwrap_or(0); - let pipeline = Pipeline::builder() + let pipeline = Pipeline::::builder() .with_tip_sender(tip_tx) // we want to sync all blocks the file client provides or 0 if empty .with_max_block(max_block) @@ -232,12 +235,13 @@ where #[cfg(test)] mod tests { use super::*; - use reth_node_core::args::utils::SUPPORTED_CHAINS; + use reth_node_core::args::utils::{DefaultChainSpecParser, SUPPORTED_CHAINS}; #[test] fn parse_common_import_command_chain_args() { for chain in SUPPORTED_CHAINS { - let args: ImportCommand = ImportCommand::parse_from(["reth", "--chain", chain, "."]); + let args: ImportCommand = + ImportCommand::parse_from(["reth", "--chain", chain, "."]); assert_eq!( Ok(args.env.chain.chain), chain.parse::(), diff --git a/crates/cli/commands/src/init_cmd.rs b/crates/cli/commands/src/init_cmd.rs index 933527cc56..63a8827eb2 100644 --- a/crates/cli/commands/src/init_cmd.rs +++ b/crates/cli/commands/src/init_cmd.rs @@ -2,22 +2,27 @@ use crate::common::{AccessRights, Environment, EnvironmentArgs}; use clap::Parser; +use reth_chainspec::ChainSpec; +use reth_cli::chainspec::ChainSpecParser; +use reth_node_builder::NodeTypesWithEngine; use reth_provider::BlockHashReader; use tracing::info; /// Initializes the database with the genesis block. #[derive(Debug, Parser)] -pub struct InitCommand { +pub struct InitCommand { #[command(flatten)] - env: EnvironmentArgs, + env: EnvironmentArgs, } -impl InitCommand { +impl> InitCommand { /// Execute the `init` command - pub async fn execute(self) -> eyre::Result<()> { + pub async fn execute>( + self, + ) -> eyre::Result<()> { info!(target: "reth::cli", "reth init starting"); - let Environment { provider_factory, .. } = self.env.init(AccessRights::RW)?; + let Environment { provider_factory, .. } = self.env.init::(AccessRights::RW)?; let hash = provider_factory .block_hash(0)? diff --git a/crates/cli/commands/src/init_state.rs b/crates/cli/commands/src/init_state.rs index af26d15e01..67955d714a 100644 --- a/crates/cli/commands/src/init_state.rs +++ b/crates/cli/commands/src/init_state.rs @@ -1,11 +1,13 @@ //! Command that initializes the node from a genesis file. use crate::common::{AccessRights, Environment, EnvironmentArgs}; +use alloy_primitives::B256; use clap::Parser; +use reth_chainspec::ChainSpec; +use reth_cli::chainspec::ChainSpecParser; use reth_config::config::EtlConfig; -use reth_db_api::database::Database; use reth_db_common::init::init_from_state_dump; -use reth_primitives::B256; +use reth_node_builder::{NodeTypesWithDB, NodeTypesWithEngine}; use reth_provider::ProviderFactory; use std::{fs::File, io::BufReader, path::PathBuf}; @@ -13,9 +15,9 @@ use tracing::info; /// Initializes the database with the genesis block. #[derive(Debug, Parser)] -pub struct InitStateCommand { +pub struct InitStateCommand { #[command(flatten)] - env: EnvironmentArgs, + pub env: EnvironmentArgs, /// JSONL file with state dump. /// @@ -35,15 +37,17 @@ pub struct InitStateCommand { /// Allows init at a non-genesis block. Caution! Blocks must be manually imported up until /// and including the non-genesis block to init chain at. See 'import' command. #[arg(value_name = "STATE_DUMP_FILE", verbatim_doc_comment)] - state: PathBuf, + pub state: PathBuf, } -impl InitStateCommand { +impl> InitStateCommand { /// Execute the `init` command - pub async fn execute(self) -> eyre::Result<()> { + pub async fn execute>( + self, + ) -> eyre::Result<()> { info!(target: "reth::cli", "Reth init-state starting"); - let Environment { config, provider_factory, .. } = self.env.init(AccessRights::RW)?; + let Environment { config, provider_factory, .. } = self.env.init::(AccessRights::RW)?; info!(target: "reth::cli", "Initiating state dump"); @@ -55,9 +59,9 @@ impl InitStateCommand { } /// Initialize chain with state at specific block, from a file with state dump. -pub fn init_at_state( +pub fn init_at_state>( state_dump_path: PathBuf, - factory: ProviderFactory, + factory: ProviderFactory, etl_config: EtlConfig, ) -> eyre::Result { info!(target: "reth::cli", @@ -67,5 +71,9 @@ pub fn init_at_state( let file = File::open(state_dump_path)?; let reader = BufReader::new(file); - init_from_state_dump(reader, factory, etl_config) + let provider_rw = factory.provider_rw()?; + let hash = init_from_state_dump(reader, &provider_rw.0, etl_config)?; + provider_rw.commit()?; + + Ok(hash) } diff --git a/crates/cli/commands/src/node.rs b/crates/cli/commands/src/node.rs index 54496f1ad8..66a67bdfcf 100644 --- a/crates/cli/commands/src/node.rs +++ b/crates/cli/commands/src/node.rs @@ -2,15 +2,15 @@ use clap::{value_parser, Args, Parser}; use reth_chainspec::ChainSpec; +use reth_cli::chainspec::ChainSpecParser; use reth_cli_runner::CliContext; use reth_cli_util::parse_socket_address; use reth_db::{init_db, DatabaseEnv}; use reth_node_builder::{NodeBuilder, WithLaunchContext}; use reth_node_core::{ args::{ - utils::{chain_help, chain_value_parser, SUPPORTED_CHAINS}, - DatabaseArgs, DatadirArgs, DebugArgs, DevArgs, NetworkArgs, PayloadBuilderArgs, - PerformanceOptimizationArgs, PruningArgs, RpcServerArgs, TxPoolArgs, + utils::DefaultChainSpecParser, DatabaseArgs, DatadirArgs, DebugArgs, DevArgs, NetworkArgs, + PayloadBuilderArgs, PerformanceOptimizationArgs, PruningArgs, RpcServerArgs, TxPoolArgs, }, node_config::NodeConfig, version, @@ -20,7 +20,10 @@ use std::{ffi::OsString, fmt, future::Future, net::SocketAddr, path::PathBuf, sy /// Start the node #[derive(Debug, Parser)] -pub struct NodeCommand { +pub struct NodeCommand< + C: ChainSpecParser = DefaultChainSpecParser, + Ext: clap::Args + fmt::Debug = NoArgs, +> { /// The path to the configuration file to use. #[arg(long, value_name = "FILE", verbatim_doc_comment)] pub config: Option, @@ -31,13 +34,13 @@ pub struct NodeCommand { #[arg( long, value_name = "CHAIN_OR_PATH", - long_help = chain_help(), - default_value = SUPPORTED_CHAINS[0], + long_help = C::help_message(), + default_value = C::SUPPORTED_CHAINS[0], default_value_if("dev", "true", "dev"), - value_parser = chain_value_parser, + value_parser = C::parser(), required = false, )] - pub chain: Arc, + pub chain: Arc, /// Enable Prometheus metrics. /// @@ -117,7 +120,7 @@ pub struct NodeCommand { pub performance_optimization: PerformanceOptimizationArgs, } -impl NodeCommand { +impl> NodeCommand { /// Parsers only the default CLI arguments pub fn parse_args() -> Self { Self::parse() @@ -133,7 +136,7 @@ impl NodeCommand { } } -impl NodeCommand { +impl, Ext: clap::Args + fmt::Debug> NodeCommand { /// Launches the node /// /// This transforms the node command into a node config and launches the node using the given @@ -214,6 +217,7 @@ pub struct NoArgs; mod tests { use super::*; use reth_discv4::DEFAULT_DISCOVERY_PORT; + use reth_node_core::args::utils::SUPPORTED_CHAINS; use std::{ net::{IpAddr, Ipv4Addr}, path::Path, @@ -221,28 +225,29 @@ mod tests { #[test] fn parse_help_node_command() { - let err = NodeCommand::try_parse_args_from(["reth", "--help"]).unwrap_err(); + let err = NodeCommand::::try_parse_args_from(["reth", "--help"]) + .unwrap_err(); assert_eq!(err.kind(), clap::error::ErrorKind::DisplayHelp); } #[test] fn parse_common_node_command_chain_args() { for chain in SUPPORTED_CHAINS { - let args: NodeCommand = NodeCommand::::parse_from(["reth", "--chain", chain]); + let args: NodeCommand = NodeCommand::parse_from(["reth", "--chain", chain]); assert_eq!(args.chain.chain, chain.parse::().unwrap()); } } #[test] fn parse_discovery_addr() { - let cmd = + let cmd: NodeCommand = NodeCommand::try_parse_args_from(["reth", "--discovery.addr", "127.0.0.1"]).unwrap(); assert_eq!(cmd.network.discovery.addr, IpAddr::V4(Ipv4Addr::LOCALHOST)); } #[test] fn parse_addr() { - let cmd = NodeCommand::try_parse_args_from([ + let cmd: NodeCommand = NodeCommand::try_parse_args_from([ "reth", "--discovery.addr", "127.0.0.1", @@ -256,13 +261,14 @@ mod tests { #[test] fn parse_discovery_port() { - let cmd = NodeCommand::try_parse_args_from(["reth", "--discovery.port", "300"]).unwrap(); + let cmd: NodeCommand = + NodeCommand::try_parse_args_from(["reth", "--discovery.port", "300"]).unwrap(); assert_eq!(cmd.network.discovery.port, 300); } #[test] fn parse_port() { - let cmd = + let cmd: NodeCommand = NodeCommand::try_parse_args_from(["reth", "--discovery.port", "300", "--port", "99"]) .unwrap(); assert_eq!(cmd.network.discovery.port, 300); @@ -271,27 +277,29 @@ mod tests { #[test] fn parse_metrics_port() { - let cmd = NodeCommand::try_parse_args_from(["reth", "--metrics", "9001"]).unwrap(); + let cmd: NodeCommand = + NodeCommand::try_parse_args_from(["reth", "--metrics", "9001"]).unwrap(); assert_eq!(cmd.metrics, Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 9001))); - let cmd = NodeCommand::try_parse_args_from(["reth", "--metrics", ":9001"]).unwrap(); + let cmd: NodeCommand = + NodeCommand::try_parse_args_from(["reth", "--metrics", ":9001"]).unwrap(); assert_eq!(cmd.metrics, Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 9001))); - let cmd = + let cmd: NodeCommand = NodeCommand::try_parse_args_from(["reth", "--metrics", "localhost:9001"]).unwrap(); assert_eq!(cmd.metrics, Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 9001))); } #[test] fn parse_config_path() { - let cmd = + let cmd: NodeCommand = NodeCommand::try_parse_args_from(["reth", "--config", "my/path/to/reth.toml"]).unwrap(); // always store reth.toml in the data dir, not the chain specific data dir let data_dir = cmd.datadir.resolve_datadir(cmd.chain.chain); let config_path = cmd.config.unwrap_or_else(|| data_dir.config()); assert_eq!(config_path, Path::new("my/path/to/reth.toml")); - let cmd = NodeCommand::try_parse_args_from(["reth"]).unwrap(); + let cmd: NodeCommand = NodeCommand::try_parse_args_from(["reth"]).unwrap(); // always store reth.toml in the data dir, not the chain specific data dir let data_dir = cmd.datadir.resolve_datadir(cmd.chain.chain); @@ -302,14 +310,14 @@ mod tests { #[test] fn parse_db_path() { - let cmd = NodeCommand::try_parse_args_from(["reth"]).unwrap(); + let cmd: NodeCommand = NodeCommand::try_parse_args_from(["reth"]).unwrap(); let data_dir = cmd.datadir.resolve_datadir(cmd.chain.chain); let db_path = data_dir.db(); let end = format!("reth/{}/db", SUPPORTED_CHAINS[0]); assert!(db_path.ends_with(end), "{:?}", cmd.config); - let cmd = + let cmd: NodeCommand = NodeCommand::try_parse_args_from(["reth", "--datadir", "my/custom/path"]).unwrap(); let data_dir = cmd.datadir.resolve_datadir(cmd.chain.chain); @@ -319,7 +327,7 @@ mod tests { #[test] fn parse_dev() { - let cmd = NodeCommand::::parse_from(["reth", "--dev"]); + let cmd: NodeCommand = NodeCommand::parse_from(["reth", "--dev"]); let chain = reth_chainspec::DEV.clone(); assert_eq!(cmd.chain.chain, chain.chain); assert_eq!(cmd.chain.genesis_hash, chain.genesis_hash); @@ -337,7 +345,7 @@ mod tests { #[test] fn parse_instance() { - let mut cmd = NodeCommand::::parse_from(["reth"]); + let mut cmd: NodeCommand = NodeCommand::parse_from(["reth"]); cmd.rpc.adjust_instance_ports(cmd.instance); cmd.network.port = DEFAULT_DISCOVERY_PORT + cmd.instance - 1; // check rpc port numbers @@ -347,7 +355,7 @@ mod tests { // check network listening port number assert_eq!(cmd.network.port, 30303); - let mut cmd = NodeCommand::::parse_from(["reth", "--instance", "2"]); + let mut cmd: NodeCommand = NodeCommand::parse_from(["reth", "--instance", "2"]); cmd.rpc.adjust_instance_ports(cmd.instance); cmd.network.port = DEFAULT_DISCOVERY_PORT + cmd.instance - 1; // check rpc port numbers @@ -357,7 +365,7 @@ mod tests { // check network listening port number assert_eq!(cmd.network.port, 30304); - let mut cmd = NodeCommand::::parse_from(["reth", "--instance", "3"]); + let mut cmd: NodeCommand = NodeCommand::parse_from(["reth", "--instance", "3"]); cmd.rpc.adjust_instance_ports(cmd.instance); cmd.network.port = DEFAULT_DISCOVERY_PORT + cmd.instance - 1; // check rpc port numbers @@ -370,21 +378,25 @@ mod tests { #[test] fn parse_with_unused_ports() { - let cmd = NodeCommand::::parse_from(["reth", "--with-unused-ports"]); + let cmd: NodeCommand = NodeCommand::parse_from(["reth", "--with-unused-ports"]); assert!(cmd.with_unused_ports); } #[test] fn with_unused_ports_conflicts_with_instance() { - let err = - NodeCommand::try_parse_args_from(["reth", "--with-unused-ports", "--instance", "2"]) - .unwrap_err(); + let err = NodeCommand::::try_parse_args_from([ + "reth", + "--with-unused-ports", + "--instance", + "2", + ]) + .unwrap_err(); assert_eq!(err.kind(), clap::error::ErrorKind::ArgumentConflict); } #[test] fn with_unused_ports_check_zero() { - let mut cmd = NodeCommand::::parse_from(["reth"]); + let mut cmd: NodeCommand = NodeCommand::parse_from(["reth"]); cmd.rpc = cmd.rpc.with_unused_ports(); cmd.network = cmd.network.with_unused_ports(); diff --git a/crates/cli/commands/src/p2p/mod.rs b/crates/cli/commands/src/p2p/mod.rs index 452df736d7..5989e9f2a5 100644 --- a/crates/cli/commands/src/p2p/mod.rs +++ b/crates/cli/commands/src/p2p/mod.rs @@ -2,27 +2,25 @@ use std::{path::PathBuf, sync::Arc}; +use alloy_eips::BlockHashOrNumber; use backon::{ConstantBuilder, Retryable}; use clap::{Parser, Subcommand}; use reth_chainspec::ChainSpec; +use reth_cli::chainspec::ChainSpecParser; use reth_cli_util::{get_secret_key, hash_or_num_value_parser}; use reth_config::Config; use reth_network::NetworkConfigBuilder; use reth_network_p2p::bodies::client::BodiesClient; use reth_node_core::{ - args::{ - utils::{chain_help, chain_value_parser, SUPPORTED_CHAINS}, - DatabaseArgs, DatadirArgs, NetworkArgs, - }, + args::{DatabaseArgs, DatadirArgs, NetworkArgs}, utils::get_single_header, }; -use reth_primitives::BlockHashOrNumber; mod rlpx; /// `reth p2p` command #[derive(Debug, Parser)] -pub struct Command { +pub struct Command { /// The path to the configuration file to use. #[arg(long, value_name = "FILE", verbatim_doc_comment)] config: Option, @@ -33,11 +31,11 @@ pub struct Command { #[arg( long, value_name = "CHAIN_OR_PATH", - long_help = chain_help(), - default_value = SUPPORTED_CHAINS[0], - value_parser = chain_value_parser + long_help = C::help_message(), + default_value = C::SUPPORTED_CHAINS[0], + value_parser = C::parser() )] - chain: Arc, + chain: Arc, /// The number of retries per request #[arg(long, default_value = "5")] @@ -75,7 +73,7 @@ pub enum Subcommands { Rlpx(rlpx::Command), } -impl Command { +impl> Command { /// Execute `p2p` command pub async fn execute(self) -> eyre::Result<()> { let data_dir = self.datadir.clone().resolve_datadir(self.chain.chain); diff --git a/crates/cli/commands/src/prune.rs b/crates/cli/commands/src/prune.rs index a8d33d8d43..d19247e21a 100644 --- a/crates/cli/commands/src/prune.rs +++ b/crates/cli/commands/src/prune.rs @@ -1,21 +1,26 @@ //! Command that runs pruning without any limits. use crate::common::{AccessRights, Environment, EnvironmentArgs}; use clap::Parser; +use reth_chainspec::ChainSpec; +use reth_cli::chainspec::ChainSpecParser; +use reth_node_builder::NodeTypesWithEngine; use reth_prune::PrunerBuilder; use reth_static_file::StaticFileProducer; use tracing::info; /// Prunes according to the configuration without any limits #[derive(Debug, Parser)] -pub struct PruneCommand { +pub struct PruneCommand { #[command(flatten)] - env: EnvironmentArgs, + env: EnvironmentArgs, } -impl PruneCommand { +impl> PruneCommand { /// Execute the `prune` command - pub async fn execute(self) -> eyre::Result<()> { - let Environment { config, provider_factory, .. } = self.env.init(AccessRights::RW)?; + pub async fn execute>( + self, + ) -> eyre::Result<()> { + let Environment { config, provider_factory, .. } = self.env.init::(AccessRights::RW)?; let prune_config = config.prune.unwrap_or_default(); // Copy data from database to static files diff --git a/crates/cli/commands/src/recover/mod.rs b/crates/cli/commands/src/recover/mod.rs index d082f4e073..9bf8181745 100644 --- a/crates/cli/commands/src/recover/mod.rs +++ b/crates/cli/commands/src/recover/mod.rs @@ -1,29 +1,35 @@ //! `reth recover` command. use clap::{Parser, Subcommand}; +use reth_chainspec::ChainSpec; +use reth_cli::chainspec::ChainSpecParser; use reth_cli_runner::CliContext; +use reth_node_builder::NodeTypesWithEngine; mod storage_tries; /// `reth recover` command #[derive(Debug, Parser)] -pub struct Command { +pub struct Command { #[command(subcommand)] - command: Subcommands, + command: Subcommands, } /// `reth recover` subcommands #[derive(Subcommand, Debug)] -pub enum Subcommands { +pub enum Subcommands { /// Recover the node by deleting dangling storage tries. - StorageTries(storage_tries::Command), + StorageTries(storage_tries::Command), } -impl Command { +impl> Command { /// Execute `recover` command - pub async fn execute(self, ctx: CliContext) -> eyre::Result<()> { + pub async fn execute>( + self, + ctx: CliContext, + ) -> eyre::Result<()> { match self.command { - Subcommands::StorageTries(command) => command.execute(ctx).await, + Subcommands::StorageTries(command) => command.execute::(ctx).await, } } } diff --git a/crates/cli/commands/src/recover/storage_tries.rs b/crates/cli/commands/src/recover/storage_tries.rs index 5c1ae7bfca..65cb741f32 100644 --- a/crates/cli/commands/src/recover/storage_tries.rs +++ b/crates/cli/commands/src/recover/storage_tries.rs @@ -1,11 +1,14 @@ use crate::common::{AccessRights, Environment, EnvironmentArgs}; use clap::Parser; +use reth_chainspec::ChainSpec; +use reth_cli::chainspec::ChainSpecParser; use reth_cli_runner::CliContext; use reth_db::tables; use reth_db_api::{ cursor::{DbCursorRO, DbDupCursorRW}, transaction::DbTx, }; +use reth_node_builder::NodeTypesWithEngine; use reth_provider::{BlockNumReader, HeaderProvider, ProviderError}; use reth_trie::StateRoot; use reth_trie_db::DatabaseStateRoot; @@ -13,15 +16,18 @@ use tracing::*; /// `reth recover storage-tries` command #[derive(Debug, Parser)] -pub struct Command { +pub struct Command { #[command(flatten)] - env: EnvironmentArgs, + env: EnvironmentArgs, } -impl Command { +impl> Command { /// Execute `storage-tries` recovery command - pub async fn execute(self, _ctx: CliContext) -> eyre::Result<()> { - let Environment { provider_factory, .. } = self.env.init(AccessRights::RW)?; + pub async fn execute>( + self, + _ctx: CliContext, + ) -> eyre::Result<()> { + let Environment { provider_factory, .. } = self.env.init::(AccessRights::RW)?; let mut provider = provider_factory.provider_rw()?; let best_block = provider.best_block_number()?; diff --git a/crates/cli/commands/src/stage/drop.rs b/crates/cli/commands/src/stage/drop.rs index 24274c61a4..41f8459a00 100644 --- a/crates/cli/commands/src/stage/drop.rs +++ b/crates/cli/commands/src/stage/drop.rs @@ -2,12 +2,15 @@ use crate::common::{AccessRights, Environment, EnvironmentArgs}; use clap::Parser; use itertools::Itertools; +use reth_chainspec::ChainSpec; +use reth_cli::chainspec::ChainSpecParser; use reth_db::{static_file::iter_static_files, tables}; use reth_db_api::transaction::DbTxMut; use reth_db_common::{ init::{insert_genesis_header, insert_genesis_history, insert_genesis_state}, DbTool, }; +use reth_node_builder::NodeTypesWithEngine; use reth_node_core::args::StageEnum; use reth_provider::{writer::UnifiedStorageWriter, StaticFileProviderFactory}; use reth_stages::StageId; @@ -15,17 +18,19 @@ use reth_static_file_types::{find_fixed_range, StaticFileSegment}; /// `reth drop-stage` command #[derive(Debug, Parser)] -pub struct Command { +pub struct Command { #[command(flatten)] - env: EnvironmentArgs, + env: EnvironmentArgs, stage: StageEnum, } -impl Command { +impl> Command { /// Execute `db` command - pub async fn execute(self) -> eyre::Result<()> { - let Environment { provider_factory, .. } = self.env.init(AccessRights::RW)?; + pub async fn execute>( + self, + ) -> eyre::Result<()> { + let Environment { provider_factory, .. } = self.env.init::(AccessRights::RW)?; let static_file_provider = provider_factory.static_file_provider(); @@ -68,7 +73,7 @@ impl Command { StageId::Headers.to_string(), Default::default(), )?; - insert_genesis_header(&provider_rw, &static_file_provider, self.env.chain)?; + insert_genesis_header(&provider_rw.0, &static_file_provider, &self.env.chain)?; } StageEnum::Bodies => { tx.clear::()?; @@ -82,7 +87,7 @@ impl Command { StageId::Bodies.to_string(), Default::default(), )?; - insert_genesis_header(&provider_rw, &static_file_provider, self.env.chain)?; + insert_genesis_header(&provider_rw.0, &static_file_provider, &self.env.chain)?; } StageEnum::Senders => { tx.clear::()?; @@ -104,7 +109,7 @@ impl Command { Default::default(), )?; let alloc = &self.env.chain.genesis().alloc; - insert_genesis_state(&provider_rw, alloc.len(), alloc.iter())?; + insert_genesis_state(&provider_rw.0, alloc.iter())?; } StageEnum::AccountHashing => { tx.clear::()?; @@ -162,7 +167,7 @@ impl Command { StageId::IndexStorageHistory.to_string(), Default::default(), )?; - insert_genesis_history(&provider_rw, self.env.chain.genesis.alloc.iter())?; + insert_genesis_history(&provider_rw.0, self.env.chain.genesis.alloc.iter())?; } StageEnum::TxLookup => { tx.clear::()?; @@ -170,7 +175,7 @@ impl Command { StageId::TransactionLookup.to_string(), Default::default(), )?; - insert_genesis_header(&provider_rw, &static_file_provider, self.env.chain)?; + insert_genesis_header(&provider_rw.0, &static_file_provider, &self.env.chain)?; } } diff --git a/crates/cli/commands/src/stage/dump/execution.rs b/crates/cli/commands/src/stage/dump/execution.rs index 61fc5e41ce..dfc320f15d 100644 --- a/crates/cli/commands/src/stage/dump/execution.rs +++ b/crates/cli/commands/src/stage/dump/execution.rs @@ -1,17 +1,21 @@ +use std::sync::Arc; + use super::setup; +use reth_chainspec::ChainSpec; use reth_db::{tables, DatabaseEnv}; use reth_db_api::{ cursor::DbCursorRO, database::Database, table::TableImporter, transaction::DbTx, }; use reth_db_common::DbTool; use reth_evm::{execute::BlockExecutorProvider, noop::NoopBlockExecutorProvider}; +use reth_node_builder::{NodeTypesWithDB, NodeTypesWithDBAdapter}; use reth_node_core::dirs::{ChainPath, DataDirPath}; use reth_provider::{providers::StaticFileProvider, ProviderFactory}; use reth_stages::{stages::ExecutionStage, Stage, StageCheckpoint, UnwindInput}; use tracing::info; -pub(crate) async fn dump_execution_stage( - db_tool: &DbTool, +pub(crate) async fn dump_execution_stage( + db_tool: &DbTool, from: u64, to: u64, output_datadir: ChainPath, @@ -19,7 +23,7 @@ pub(crate) async fn dump_execution_stage( executor: E, ) -> eyre::Result<()> where - DB: Database, + N: NodeTypesWithDB, E: BlockExecutorProvider, { let (output_db, tip_block_number) = setup(from, to, &output_datadir.db(), db_tool)?; @@ -30,8 +34,8 @@ where if should_run { dry_run( - ProviderFactory::new( - output_db, + ProviderFactory::>>::new( + Arc::new(output_db), db_tool.chain(), StaticFileProvider::read_write(output_datadir.static_files())?, ), @@ -45,9 +49,9 @@ where } /// Imports all the tables that can be copied over a range. -fn import_tables_with_range( +fn import_tables_with_range( output_db: &DatabaseEnv, - db_tool: &DbTool, + db_tool: &DbTool, from: u64, to: u64, ) -> eyre::Result<()> { @@ -125,8 +129,8 @@ fn import_tables_with_range( /// Dry-run an unwind to FROM block, so we can get the `PlainStorageState` and /// `PlainAccountState` safely. There might be some state dependency from an address /// which hasn't been changed in the given range. -fn unwind_and_copy( - db_tool: &DbTool, +fn unwind_and_copy>( + db_tool: &DbTool, from: u64, tip_block_number: u64, output_db: &DatabaseEnv, @@ -155,14 +159,14 @@ fn unwind_and_copy( } /// Try to re-execute the stage without committing -fn dry_run( - output_provider_factory: ProviderFactory, +fn dry_run( + output_provider_factory: ProviderFactory, to: u64, from: u64, executor: E, ) -> eyre::Result<()> where - DB: Database, + N: NodeTypesWithDB, E: BlockExecutorProvider, { info!(target: "reth::cli", "Executing stage. [dry-run]"); diff --git a/crates/cli/commands/src/stage/dump/hashing_account.rs b/crates/cli/commands/src/stage/dump/hashing_account.rs index 025899231d..1d96de778d 100644 --- a/crates/cli/commands/src/stage/dump/hashing_account.rs +++ b/crates/cli/commands/src/stage/dump/hashing_account.rs @@ -1,16 +1,20 @@ +use std::sync::Arc; + use super::setup; +use alloy_primitives::BlockNumber; use eyre::Result; +use reth_chainspec::ChainSpec; use reth_db::{tables, DatabaseEnv}; use reth_db_api::{database::Database, table::TableImporter}; use reth_db_common::DbTool; +use reth_node_builder::{NodeTypesWithDB, NodeTypesWithDBAdapter}; use reth_node_core::dirs::{ChainPath, DataDirPath}; -use reth_primitives::BlockNumber; use reth_provider::{providers::StaticFileProvider, ProviderFactory}; use reth_stages::{stages::AccountHashingStage, Stage, StageCheckpoint, UnwindInput}; use tracing::info; -pub(crate) async fn dump_hashing_account_stage( - db_tool: &DbTool, +pub(crate) async fn dump_hashing_account_stage>( + db_tool: &DbTool, from: BlockNumber, to: BlockNumber, output_datadir: ChainPath, @@ -31,8 +35,8 @@ pub(crate) async fn dump_hashing_account_stage( if should_run { dry_run( - ProviderFactory::new( - output_db, + ProviderFactory::>>::new( + Arc::new(output_db), db_tool.chain(), StaticFileProvider::read_write(output_datadir.static_files())?, ), @@ -45,8 +49,8 @@ pub(crate) async fn dump_hashing_account_stage( } /// Dry-run an unwind to FROM block and copy the necessary table data to the new database. -fn unwind_and_copy( - db_tool: &DbTool, +fn unwind_and_copy>( + db_tool: &DbTool, from: u64, tip_block_number: u64, output_db: &DatabaseEnv, @@ -70,8 +74,8 @@ fn unwind_and_copy( } /// Try to re-execute the stage straight away -fn dry_run( - output_provider_factory: ProviderFactory, +fn dry_run>( + output_provider_factory: ProviderFactory, to: u64, from: u64, ) -> eyre::Result<()> { diff --git a/crates/cli/commands/src/stage/dump/hashing_storage.rs b/crates/cli/commands/src/stage/dump/hashing_storage.rs index ad62988874..57f0ed53ac 100644 --- a/crates/cli/commands/src/stage/dump/hashing_storage.rs +++ b/crates/cli/commands/src/stage/dump/hashing_storage.rs @@ -1,15 +1,19 @@ +use std::sync::Arc; + use super::setup; use eyre::Result; +use reth_chainspec::ChainSpec; use reth_db::{tables, DatabaseEnv}; use reth_db_api::{database::Database, table::TableImporter}; use reth_db_common::DbTool; +use reth_node_builder::{NodeTypesWithDB, NodeTypesWithDBAdapter}; use reth_node_core::dirs::{ChainPath, DataDirPath}; use reth_provider::{providers::StaticFileProvider, ProviderFactory}; use reth_stages::{stages::StorageHashingStage, Stage, StageCheckpoint, UnwindInput}; use tracing::info; -pub(crate) async fn dump_hashing_storage_stage( - db_tool: &DbTool, +pub(crate) async fn dump_hashing_storage_stage>( + db_tool: &DbTool, from: u64, to: u64, output_datadir: ChainPath, @@ -21,8 +25,8 @@ pub(crate) async fn dump_hashing_storage_stage( if should_run { dry_run( - ProviderFactory::new( - output_db, + ProviderFactory::>>::new( + Arc::new(output_db), db_tool.chain(), StaticFileProvider::read_write(output_datadir.static_files())?, ), @@ -35,8 +39,8 @@ pub(crate) async fn dump_hashing_storage_stage( } /// Dry-run an unwind to FROM block and copy the necessary table data to the new database. -fn unwind_and_copy( - db_tool: &DbTool, +fn unwind_and_copy>( + db_tool: &DbTool, from: u64, tip_block_number: u64, output_db: &DatabaseEnv, @@ -65,8 +69,8 @@ fn unwind_and_copy( } /// Try to re-execute the stage straight away -fn dry_run( - output_provider_factory: ProviderFactory, +fn dry_run>( + output_provider_factory: ProviderFactory, to: u64, from: u64, ) -> eyre::Result<()> { diff --git a/crates/cli/commands/src/stage/dump/merkle.rs b/crates/cli/commands/src/stage/dump/merkle.rs index 2d13c15153..bcd05ca947 100644 --- a/crates/cli/commands/src/stage/dump/merkle.rs +++ b/crates/cli/commands/src/stage/dump/merkle.rs @@ -1,13 +1,17 @@ +use std::sync::Arc; + use super::setup; +use alloy_primitives::BlockNumber; use eyre::Result; +use reth_chainspec::ChainSpec; use reth_config::config::EtlConfig; use reth_db::{tables, DatabaseEnv}; use reth_db_api::{database::Database, table::TableImporter}; use reth_db_common::DbTool; use reth_evm::noop::NoopBlockExecutorProvider; use reth_exex::ExExManagerHandle; +use reth_node_builder::{NodeTypesWithDB, NodeTypesWithDBAdapter}; use reth_node_core::dirs::{ChainPath, DataDirPath}; -use reth_primitives::BlockNumber; use reth_provider::{providers::StaticFileProvider, ProviderFactory}; use reth_prune::PruneModes; use reth_stages::{ @@ -19,8 +23,8 @@ use reth_stages::{ }; use tracing::info; -pub(crate) async fn dump_merkle_stage( - db_tool: &DbTool, +pub(crate) async fn dump_merkle_stage>( + db_tool: &DbTool, from: BlockNumber, to: BlockNumber, output_datadir: ChainPath, @@ -48,8 +52,8 @@ pub(crate) async fn dump_merkle_stage( if should_run { dry_run( - ProviderFactory::new( - output_db, + ProviderFactory::>>::new( + Arc::new(output_db), db_tool.chain(), StaticFileProvider::read_write(output_datadir.static_files())?, ), @@ -62,8 +66,8 @@ pub(crate) async fn dump_merkle_stage( } /// Dry-run an unwind to FROM block and copy the necessary table data to the new database. -fn unwind_and_copy( - db_tool: &DbTool, +fn unwind_and_copy>( + db_tool: &DbTool, range: (u64, u64), tip_block_number: u64, output_db: &DatabaseEnv, @@ -140,8 +144,8 @@ fn unwind_and_copy( } /// Try to re-execute the stage straight away -fn dry_run( - output_provider_factory: ProviderFactory, +fn dry_run>( + output_provider_factory: ProviderFactory, to: u64, from: u64, ) -> eyre::Result<()> { diff --git a/crates/cli/commands/src/stage/dump/mod.rs b/crates/cli/commands/src/stage/dump/mod.rs index 7366ff9981..44161d9b3b 100644 --- a/crates/cli/commands/src/stage/dump/mod.rs +++ b/crates/cli/commands/src/stage/dump/mod.rs @@ -2,6 +2,7 @@ use crate::common::{AccessRights, Environment, EnvironmentArgs}; use clap::Parser; use reth_chainspec::ChainSpec; +use reth_cli::chainspec::ChainSpecParser; use reth_db::{init_db, mdbx::DatabaseArguments, tables, DatabaseEnv}; use reth_db_api::{ cursor::DbCursorRO, database::Database, models::ClientVersion, table::TableImporter, @@ -9,6 +10,7 @@ use reth_db_api::{ }; use reth_db_common::DbTool; use reth_evm::execute::BlockExecutorProvider; +use reth_node_builder::{NodeTypesWithDB, NodeTypesWithEngine}; use reth_node_core::{ args::DatadirArgs, dirs::{DataDirPath, PlatformPath}, @@ -30,9 +32,9 @@ use merkle::dump_merkle_stage; /// `reth dump-stage` command #[derive(Debug, Parser)] -pub struct Command { +pub struct Command { #[command(flatten)] - env: EnvironmentArgs, + env: EnvironmentArgs, #[command(subcommand)] command: Stages, @@ -84,14 +86,15 @@ macro_rules! handle_stage { }}; } -impl Command { +impl> Command { /// Execute `dump-stage` command - pub async fn execute(self, executor: F) -> eyre::Result<()> + pub async fn execute(self, executor: F) -> eyre::Result<()> where + N: NodeTypesWithEngine, E: BlockExecutorProvider, F: FnOnce(Arc) -> E, { - let Environment { provider_factory, .. } = self.env.init(AccessRights::RO)?; + let Environment { provider_factory, .. } = self.env.init::(AccessRights::RO)?; let tool = DbTool::new(provider_factory)?; match &self.command { @@ -110,11 +113,11 @@ impl Command { /// Sets up the database and initial state on [`tables::BlockBodyIndices`]. Also returns the tip /// block number. -pub(crate) fn setup( +pub(crate) fn setup( from: u64, to: u64, output_db: &PathBuf, - db_tool: &DbTool, + db_tool: &DbTool, ) -> eyre::Result<(DatabaseEnv, u64)> { assert!(from < to, "FROM block should be bigger than TO block."); diff --git a/crates/cli/commands/src/stage/mod.rs b/crates/cli/commands/src/stage/mod.rs index e0365c879d..a4e0d088ac 100644 --- a/crates/cli/commands/src/stage/mod.rs +++ b/crates/cli/commands/src/stage/mod.rs @@ -4,8 +4,10 @@ use std::sync::Arc; use clap::{Parser, Subcommand}; use reth_chainspec::ChainSpec; +use reth_cli::chainspec::ChainSpecParser; use reth_cli_runner::CliContext; use reth_evm::execute::BlockExecutorProvider; +use reth_node_builder::NodeTypesWithEngine; pub mod drop; pub mod dump; @@ -14,41 +16,42 @@ pub mod unwind; /// `reth stage` command #[derive(Debug, Parser)] -pub struct Command { +pub struct Command { #[command(subcommand)] - command: Subcommands, + command: Subcommands, } /// `reth stage` subcommands #[derive(Subcommand, Debug)] -pub enum Subcommands { +pub enum Subcommands { /// Run a single stage. /// /// Note that this won't use the Pipeline and as a result runs stages /// assuming that all the data can be held in memory. It is not recommended /// to run a stage for really large block ranges if your computer does not have /// a lot of memory to store all the data. - Run(run::Command), + Run(run::Command), /// Drop a stage's tables from the database. - Drop(drop::Command), + Drop(drop::Command), /// Dumps a stage from a range into a new database. - Dump(dump::Command), + Dump(dump::Command), /// Unwinds a certain block range, deleting it from the database. - Unwind(unwind::Command), + Unwind(unwind::Command), } -impl Command { +impl> Command { /// Execute `stage` command - pub async fn execute(self, ctx: CliContext, executor: F) -> eyre::Result<()> + pub async fn execute(self, ctx: CliContext, executor: F) -> eyre::Result<()> where + N: NodeTypesWithEngine, E: BlockExecutorProvider, F: FnOnce(Arc) -> E, { match self.command { - Subcommands::Run(command) => command.execute(ctx, executor).await, - Subcommands::Drop(command) => command.execute().await, - Subcommands::Dump(command) => command.execute(executor).await, - Subcommands::Unwind(command) => command.execute().await, + Subcommands::Run(command) => command.execute::(ctx, executor).await, + Subcommands::Drop(command) => command.execute::().await, + Subcommands::Dump(command) => command.execute::(executor).await, + Subcommands::Unwind(command) => command.execute::().await, } } } diff --git a/crates/cli/commands/src/stage/run.rs b/crates/cli/commands/src/stage/run.rs index f4d872507e..542e0a5ef5 100644 --- a/crates/cli/commands/src/stage/run.rs +++ b/crates/cli/commands/src/stage/run.rs @@ -3,9 +3,11 @@ //! Stage debugging tool use crate::common::{AccessRights, Environment, EnvironmentArgs}; +use alloy_eips::BlockHashOrNumber; use clap::Parser; use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::ChainSpec; +use reth_cli::chainspec::ChainSpecParser; use reth_cli_runner::CliContext; use reth_cli_util::get_secret_key; use reth_config::config::{HashingConfig, SenderRecoveryConfig, TransactionLookupConfig}; @@ -16,15 +18,16 @@ use reth_downloaders::{ use reth_evm::execute::BlockExecutorProvider; use reth_exex::ExExManagerHandle; use reth_network_p2p::HeadersClient; +use reth_node_builder::NodeTypesWithEngine; use reth_node_core::{ args::{NetworkArgs, StageEnum}, - primitives::BlockHashOrNumber, version::{ BUILD_PROFILE_NAME, CARGO_PKG_VERSION, VERGEN_BUILD_TIMESTAMP, VERGEN_CARGO_FEATURES, VERGEN_CARGO_TARGET_TRIPLE, VERGEN_GIT_SHA, }, }; use reth_node_metrics::{ + chain::ChainSpecInfo, hooks::Hooks, server::{MetricServer, MetricServerConfig}, version::VersionInfo, @@ -48,9 +51,9 @@ use tracing::*; /// `reth stage` command #[derive(Debug, Parser)] -pub struct Command { +pub struct Command { #[command(flatten)] - env: EnvironmentArgs, + env: EnvironmentArgs, /// Enable Prometheus metrics. /// @@ -98,10 +101,11 @@ pub struct Command { network: NetworkArgs, } -impl Command { +impl> Command { /// Execute `stage` command - pub async fn execute(self, ctx: CliContext, executor: F) -> eyre::Result<()> + pub async fn execute(self, ctx: CliContext, executor: F) -> eyre::Result<()> where + N: NodeTypesWithEngine, E: BlockExecutorProvider, F: FnOnce(Arc) -> E, { @@ -109,7 +113,8 @@ impl Command { // Does not do anything on windows. let _ = fdlimit::raise_fd_limit(); - let Environment { provider_factory, config, data_dir } = self.env.init(AccessRights::RW)?; + let Environment { provider_factory, config, data_dir } = + self.env.init::(AccessRights::RW)?; let mut provider_rw = provider_factory.provider_rw()?; @@ -125,6 +130,7 @@ impl Command { target_triple: VERGEN_CARGO_TARGET_TRIPLE, build_profile: BUILD_PROFILE_NAME, }, + ChainSpecInfo { name: provider_factory.chain_spec().chain.to_string() }, ctx.task_executor, Hooks::new( provider_factory.db_ref().clone(), diff --git a/crates/cli/commands/src/stage/unwind.rs b/crates/cli/commands/src/stage/unwind.rs index c9fa658039..ce52c4a658 100644 --- a/crates/cli/commands/src/stage/unwind.rs +++ b/crates/cli/commands/src/stage/unwind.rs @@ -1,16 +1,20 @@ //! Unwinding a certain block range use crate::common::{AccessRights, Environment, EnvironmentArgs}; +use alloy_eips::BlockHashOrNumber; +use alloy_primitives::{BlockNumber, B256}; use clap::{Parser, Subcommand}; use reth_beacon_consensus::EthBeaconConsensus; +use reth_chainspec::ChainSpec; +use reth_cli::chainspec::ChainSpecParser; use reth_config::Config; use reth_consensus::Consensus; -use reth_db_api::database::Database; +use reth_db::DatabaseEnv; use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader}; use reth_evm::noop::NoopBlockExecutorProvider; use reth_exex::ExExManagerHandle; +use reth_node_builder::{NodeTypesWithDB, NodeTypesWithEngine}; use reth_node_core::args::NetworkArgs; -use reth_primitives::{BlockHashOrNumber, BlockNumber, B256}; use reth_provider::{ BlockExecutionWriter, BlockNumReader, ChainSpecProvider, FinalizedBlockReader, FinalizedBlockWriter, ProviderFactory, StaticFileProviderFactory, @@ -28,9 +32,9 @@ use tracing::info; /// `reth stage unwind` command #[derive(Debug, Parser)] -pub struct Command { +pub struct Command { #[command(flatten)] - env: EnvironmentArgs, + env: EnvironmentArgs, #[command(flatten)] network: NetworkArgs, @@ -44,10 +48,12 @@ pub struct Command { offline: bool, } -impl Command { +impl> Command { /// Execute `db stage unwind` command - pub async fn execute(self) -> eyre::Result<()> { - let Environment { provider_factory, config, .. } = self.env.init(AccessRights::RW)?; + pub async fn execute>( + self, + ) -> eyre::Result<()> { + let Environment { provider_factory, config, .. } = self.env.init::(AccessRights::RW)?; let range = self.command.unwind_range(provider_factory.clone())?; if *range.start() == 0 { @@ -110,11 +116,11 @@ impl Command { Ok(()) } - fn build_pipeline( + fn build_pipeline>( self, config: Config, - provider_factory: ProviderFactory>, - ) -> Result>, eyre::Error> { + provider_factory: ProviderFactory, + ) -> Result, eyre::Error> { let consensus: Arc = Arc::new(EthBeaconConsensus::new(provider_factory.chain_spec())); let stage_conf = &config.stages; @@ -126,7 +132,7 @@ impl Command { let executor = NoopBlockExecutorProvider::default(); let builder = if self.offline { - Pipeline::builder().add_stages( + Pipeline::::builder().add_stages( OfflineStages::new( executor, config.stages, @@ -137,7 +143,7 @@ impl Command { .disable(reth_stages::StageId::SenderRecovery), ) } else { - Pipeline::builder().with_tip_sender(tip_tx).add_stages( + Pipeline::::builder().with_tip_sender(tip_tx).add_stages( DefaultStages::new( provider_factory.clone(), tip_rx, @@ -189,9 +195,9 @@ impl Subcommands { /// Returns the block range to unwind. /// /// This returns an inclusive range: [target..=latest] - fn unwind_range( + fn unwind_range>>( &self, - factory: ProviderFactory, + factory: ProviderFactory, ) -> eyre::Result> { let provider = factory.provider()?; let last = provider.last_block_number()?; @@ -213,14 +219,28 @@ impl Subcommands { #[cfg(test)] mod tests { + use reth_node_core::args::utils::DefaultChainSpecParser; + use super::*; #[test] fn parse_unwind() { - let cmd = Command::parse_from(["reth", "--datadir", "dir", "to-block", "100"]); + let cmd = Command::::parse_from([ + "reth", + "--datadir", + "dir", + "to-block", + "100", + ]); assert_eq!(cmd.command, Subcommands::ToBlock { target: BlockHashOrNumber::Number(100) }); - let cmd = Command::parse_from(["reth", "--datadir", "dir", "num-blocks", "100"]); + let cmd = Command::::parse_from([ + "reth", + "--datadir", + "dir", + "num-blocks", + "100", + ]); assert_eq!(cmd.command, Subcommands::NumBlocks { amount: 100 }); } } diff --git a/crates/cli/commands/src/test_vectors/tables.rs b/crates/cli/commands/src/test_vectors/tables.rs index d7138444f6..112685251d 100644 --- a/crates/cli/commands/src/test_vectors/tables.rs +++ b/crates/cli/commands/src/test_vectors/tables.rs @@ -1,9 +1,10 @@ +use alloy_primitives::private::getrandom::getrandom; use arbitrary::Arbitrary; use eyre::Result; use proptest::{ prelude::ProptestConfig, strategy::{Strategy, ValueTree}, - test_runner::TestRunner, + test_runner::{TestRng, TestRunner}, }; use proptest_arbitrary_interop::arb; use reth_db::tables; @@ -17,7 +18,16 @@ const PER_TABLE: usize = 1000; /// Generates test vectors for specified `tables`. If list is empty, then generate for all tables. pub(crate) fn generate_vectors(mut tables: Vec) -> Result<()> { - let mut runner = TestRunner::new(ProptestConfig::default()); + // Prepare random seed for test (same method as used by proptest) + let mut seed = [0u8; 32]; + getrandom(&mut seed)?; + println!("Seed for test vectors: {:?}", seed); + + // Start the runner with the seed + let config = ProptestConfig::default(); + let rng = TestRng::from_seed(config.rng_algorithm, &seed); + let mut runner = TestRunner::new_with_rng(config, rng); + fs::create_dir_all(VECTORS_FOLDER)?; macro_rules! generate_vector { diff --git a/crates/cli/util/Cargo.toml b/crates/cli/util/Cargo.toml index ba09093559..d96a882a67 100644 --- a/crates/cli/util/Cargo.toml +++ b/crates/cli/util/Cargo.toml @@ -18,12 +18,21 @@ reth-fs-util.workspace = true alloy-primitives.workspace = true alloy-eips.workspace = true -secp256k1 = { workspace = true, features = ["rand"] } -rand.workspace = true - # misc -thiserror.workspace = true +cfg-if.workspace = true eyre.workspace = true +rand.workspace = true +secp256k1 = { workspace = true, features = ["rand"] } +thiserror.workspace = true + +tracy-client = { workspace = true, optional = true, features = ["demangle"] } [target.'cfg(unix)'.dependencies] +tikv-jemallocator = { workspace = true, optional = true } libc = "0.2" + +[features] +jemalloc = ["dep:tikv-jemallocator"] +jemalloc-prof = ["jemalloc", "tikv-jemallocator?/profiling"] + +tracy-allocator = ["dep:tracy-client"] diff --git a/crates/cli/util/src/allocator.rs b/crates/cli/util/src/allocator.rs new file mode 100644 index 0000000000..b5974e2245 --- /dev/null +++ b/crates/cli/util/src/allocator.rs @@ -0,0 +1,35 @@ +//! Custom allocator implementation. + +// We use jemalloc for performance reasons. +cfg_if::cfg_if! { + if #[cfg(all(feature = "jemalloc", unix))] { + type AllocatorInner = tikv_jemallocator::Jemalloc; + } else { + type AllocatorInner = std::alloc::System; + } +} + +cfg_if::cfg_if! { + if #[cfg(feature = "tracy-allocator")] { + type AllocatorWrapper = tracy_client::ProfiledAllocator; + const fn new_allocator_wrapper() -> AllocatorWrapper { + AllocatorWrapper::new(AllocatorInner {}, 100) + } + } else { + type AllocatorWrapper = AllocatorInner; + const fn new_allocator_wrapper() -> AllocatorWrapper { + AllocatorInner {} + } + } +} + +#[cfg(feature = "tracy-allocator")] +tracy_client::register_demangler!(); + +/// Custom allocator. +pub type Allocator = AllocatorWrapper; + +/// Creates a new [custom allocator][Allocator]. +pub const fn new_allocator() -> Allocator { + new_allocator_wrapper() +} diff --git a/crates/cli/util/src/lib.rs b/crates/cli/util/src/lib.rs index fd43f1b489..a5bc2452fe 100644 --- a/crates/cli/util/src/lib.rs +++ b/crates/cli/util/src/lib.rs @@ -8,6 +8,8 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +pub mod allocator; + /// Helper function to load a secret key from a file. pub mod load_secret_key; pub use load_secret_key::get_secret_key; diff --git a/crates/config/Cargo.toml b/crates/config/Cargo.toml index bb3276f578..3c14368871 100644 --- a/crates/config/Cargo.toml +++ b/crates/config/Cargo.toml @@ -28,8 +28,9 @@ eyre.workspace = true [dev-dependencies] tempfile.workspace = true reth-network-peers.workspace = true +reth-primitives.workspace = true [features] bsc = [ "reth-primitives/bsc" -] \ No newline at end of file +] diff --git a/crates/config/src/config.rs b/crates/config/src/config.rs index 32620a7793..91f0ef47fc 100644 --- a/crates/config/src/config.rs +++ b/crates/config/src/config.rs @@ -399,6 +399,34 @@ impl PruneConfig { pub fn has_receipts_pruning(&self) -> bool { self.segments.receipts.is_some() || !self.segments.receipts_log_filter.is_empty() } + + /// Merges another `PruneConfig` into this one, taking values from the other config if and only + /// if the corresponding value in this config is not set. + pub fn merge(&mut self, other: Option) { + let Some(other) = other else { return }; + + // Merge block_interval + if self.block_interval == 0 { + self.block_interval = other.block_interval; + } + + // Merge the various segment prune modes + self.segments.sender_recovery = + self.segments.sender_recovery.or(other.segments.sender_recovery); + self.segments.transaction_lookup = + self.segments.transaction_lookup.or(other.segments.transaction_lookup); + self.segments.receipts = self.segments.receipts.or(other.segments.receipts); + self.segments.account_history = + self.segments.account_history.or(other.segments.account_history); + self.segments.storage_history = + self.segments.storage_history.or(other.segments.storage_history); + + if self.segments.receipts_log_filter.0.is_empty() && + !other.segments.receipts_log_filter.0.is_empty() + { + self.segments.receipts_log_filter = other.segments.receipts_log_filter; + } + } } /// Helper type to support older versions of Duration deserialization. @@ -422,8 +450,11 @@ where #[cfg(test)] mod tests { use super::{Config, EXTENSION}; + use crate::PruneConfig; use reth_network_peers::TrustedPeer; - use std::{path::Path, str::FromStr, time::Duration}; + use reth_primitives::Address; + use reth_prune_types::{PruneMode, PruneModes, ReceiptsLogPruneConfig}; + use std::{collections::BTreeMap, path::Path, str::FromStr, time::Duration}; fn with_tempdir(filename: &str, proc: fn(&std::path::Path)) { let temp_dir = tempfile::tempdir().unwrap(); @@ -900,6 +931,54 @@ receipts = 'full' assert!(err.contains("invalid value: string \"full\""), "{}", err); } + #[test] + fn test_prune_config_merge() { + let mut config1 = PruneConfig { + block_interval: 5, + recent_sidecars_kept_blocks: 0, + segments: PruneModes { + sender_recovery: Some(PruneMode::Full), + transaction_lookup: None, + receipts: Some(PruneMode::Distance(1000)), + account_history: None, + storage_history: Some(PruneMode::Before(5000)), + receipts_log_filter: ReceiptsLogPruneConfig(BTreeMap::from([( + Address::random(), + PruneMode::Full, + )])), + }, + }; + + let config2 = PruneConfig { + block_interval: 10, + recent_sidecars_kept_blocks: 0, + segments: PruneModes { + sender_recovery: Some(PruneMode::Distance(500)), + transaction_lookup: Some(PruneMode::Full), + receipts: Some(PruneMode::Full), + account_history: Some(PruneMode::Distance(2000)), + storage_history: Some(PruneMode::Distance(3000)), + receipts_log_filter: ReceiptsLogPruneConfig(BTreeMap::from([ + (Address::random(), PruneMode::Distance(1000)), + (Address::random(), PruneMode::Before(2000)), + ])), + }, + }; + + let original_filter = config1.segments.receipts_log_filter.clone(); + config1.merge(Some(config2)); + + // Check that the configuration has been merged. Any configuration present in config1 + // should not be overwritten by config2 + assert_eq!(config1.block_interval, 5); + assert_eq!(config1.segments.sender_recovery, Some(PruneMode::Full)); + assert_eq!(config1.segments.transaction_lookup, Some(PruneMode::Full)); + assert_eq!(config1.segments.receipts, Some(PruneMode::Distance(1000))); + assert_eq!(config1.segments.account_history, Some(PruneMode::Distance(2000))); + assert_eq!(config1.segments.storage_history, Some(PruneMode::Before(5000))); + assert_eq!(config1.segments.receipts_log_filter, original_filter); + } + #[test] fn test_conf_trust_nodes_only() { let trusted_nodes_only = r"# diff --git a/crates/consensus/auto-seal/Cargo.toml b/crates/consensus/auto-seal/Cargo.toml index 9c4c26c91f..4e3e2db868 100644 --- a/crates/consensus/auto-seal/Cargo.toml +++ b/crates/consensus/auto-seal/Cargo.toml @@ -31,6 +31,9 @@ reth-network-peers.workspace = true reth-tokio-util.workspace = true reth-trie.workspace = true +# ethereum +alloy-primitives.workspace = true + # async futures-util.workspace = true tokio = { workspace = true, features = ["sync", "time"] } diff --git a/crates/consensus/auto-seal/src/client.rs b/crates/consensus/auto-seal/src/client.rs index d55cf64431..f9b80f10bb 100644 --- a/crates/consensus/auto-seal/src/client.rs +++ b/crates/consensus/auto-seal/src/client.rs @@ -1,6 +1,7 @@ //! This includes download client implementations for auto sealing miners. use crate::Storage; +use alloy_primitives::B256; use reth_network_p2p::{ bodies::client::{BodiesClient, BodiesFut}, download::DownloadClient, @@ -8,7 +9,7 @@ use reth_network_p2p::{ priority::Priority, }; use reth_network_peers::{PeerId, WithPeerId}; -use reth_primitives::{BlockBody, BlockHashOrNumber, Header, B256}; +use reth_primitives::{BlockBody, BlockHashOrNumber, Header}; use std::fmt::Debug; use tracing::{trace, warn}; diff --git a/crates/consensus/auto-seal/src/lib.rs b/crates/consensus/auto-seal/src/lib.rs index 4b8b18253a..b56851e8e7 100644 --- a/crates/consensus/auto-seal/src/lib.rs +++ b/crates/consensus/auto-seal/src/lib.rs @@ -15,6 +15,7 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +use alloy_primitives::{BlockHash, BlockNumber, Bloom, B256, U256}; use reth_beacon_consensus::BeaconEngineMessage; use reth_chainspec::{ChainSpec, EthereumHardforks}; use reth_consensus::{Consensus, ConsensusError, PostExecutionInput}; @@ -24,9 +25,8 @@ use reth_execution_errors::{ }; use reth_execution_types::ExecutionOutcome; use reth_primitives::{ - eip4844::calculate_excess_blob_gas, proofs, Block, BlockBody, BlockHash, BlockHashOrNumber, - BlockNumber, BlockWithSenders, Bloom, Header, Requests, SealedBlock, SealedHeader, - TransactionSigned, Withdrawals, B256, U256, + eip4844::calculate_excess_blob_gas, proofs, Block, BlockBody, BlockHashOrNumber, + BlockWithSenders, Header, Requests, SealedBlock, SealedHeader, TransactionSigned, Withdrawals, }; use reth_provider::{BlockReaderIdExt, StateProviderFactory, StateRootProvider}; use reth_revm::database::StateProviderDatabase; @@ -426,3 +426,257 @@ impl StorageInner { Ok((new_header, execution_outcome)) } } + +#[cfg(test)] +mod tests { + use reth_chainspec::{ChainHardforks, EthereumHardfork, ForkCondition}; + use reth_primitives::Transaction; + + use super::*; + + #[test] + fn test_block_hash() { + let mut storage = StorageInner::default(); + + // Define two block hashes and their corresponding block numbers. + let block_hash_1: BlockHash = B256::random(); + let block_number_1: BlockNumber = 1; + let block_hash_2: BlockHash = B256::random(); + let block_number_2: BlockNumber = 2; + + // Insert the block number and hash pairs into the `hash_to_number` map. + storage.hash_to_number.insert(block_hash_1, block_number_1); + storage.hash_to_number.insert(block_hash_2, block_number_2); + + // Verify that `block_hash` returns the correct block hash for the given block number. + assert_eq!(storage.block_hash(block_number_1), Some(block_hash_1)); + assert_eq!(storage.block_hash(block_number_2), Some(block_hash_2)); + + // Test that `block_hash` returns `None` for a non-existent block number. + let block_number_3: BlockNumber = 3; + assert_eq!(storage.block_hash(block_number_3), None); + } + + #[test] + fn test_header_by_hash_or_number() { + let mut storage = StorageInner::default(); + + // Define block numbers, headers, and hashes. + let block_number_1: u64 = 1; + let block_number_2: u64 = 2; + let header_1 = Header { number: block_number_1, ..Default::default() }; + let header_2 = Header { number: block_number_2, ..Default::default() }; + let block_hash_1: BlockHash = B256::random(); + let block_hash_2: BlockHash = B256::random(); + + // Insert headers and hash-to-number mappings. + storage.headers.insert(block_number_1, header_1.clone()); + storage.headers.insert(block_number_2, header_2.clone()); + storage.hash_to_number.insert(block_hash_1, block_number_1); + storage.hash_to_number.insert(block_hash_2, block_number_2); + + // Test header retrieval by block number. + assert_eq!( + storage.header_by_hash_or_number(BlockHashOrNumber::Number(block_number_1)), + Some(header_1.clone()) + ); + assert_eq!( + storage.header_by_hash_or_number(BlockHashOrNumber::Number(block_number_2)), + Some(header_2.clone()) + ); + + // Test header retrieval by block hash. + assert_eq!( + storage.header_by_hash_or_number(BlockHashOrNumber::Hash(block_hash_1)), + Some(header_1) + ); + assert_eq!( + storage.header_by_hash_or_number(BlockHashOrNumber::Hash(block_hash_2)), + Some(header_2) + ); + + // Test non-existent block number and hash. + assert_eq!(storage.header_by_hash_or_number(BlockHashOrNumber::Number(999)), None); + let non_existent_hash: BlockHash = B256::random(); + assert_eq!( + storage.header_by_hash_or_number(BlockHashOrNumber::Hash(non_existent_hash)), + None + ); + } + + #[test] + fn test_insert_new_block() { + let mut storage = StorageInner::default(); + + // Define headers and block bodies. + let header_1 = Header { difficulty: U256::from(100), ..Default::default() }; + let body_1 = BlockBody::default(); + let header_2 = Header { difficulty: U256::from(200), ..Default::default() }; + let body_2 = BlockBody::default(); + + // Insert the first block. + storage.insert_new_block(header_1.clone(), body_1.clone()); + let best_block_1 = storage.best_block; + let best_hash_1 = storage.best_hash; + + // Verify the block was inserted correctly. + assert_eq!( + storage.headers.get(&best_block_1), + Some(&Header { number: 1, ..header_1.clone() }) + ); + assert_eq!(storage.bodies.get(&best_hash_1), Some(&body_1)); + assert_eq!(storage.hash_to_number.get(&best_hash_1), Some(&best_block_1)); + + // Insert the second block. + storage.insert_new_block(header_2.clone(), body_2.clone()); + let best_block_2 = storage.best_block; + let best_hash_2 = storage.best_hash; + + // Verify the second block was inserted correctly. + assert_eq!( + storage.headers.get(&best_block_2), + Some(&Header { + number: 2, + parent_hash: Header { number: 1, ..header_1 }.hash_slow(), + ..header_2 + }) + ); + assert_eq!(storage.bodies.get(&best_hash_2), Some(&body_2)); + assert_eq!(storage.hash_to_number.get(&best_hash_2), Some(&best_block_2)); + + // Check that the total difficulty was updated. + assert_eq!(storage.total_difficulty, header_1.difficulty + header_2.difficulty); + } + + #[test] + fn test_build_basic_header_template() { + let mut storage = StorageInner::default(); + let chain_spec = ChainSpec::default(); + + let best_block_number = 1; + let best_block_hash = B256::random(); + let timestamp = 1_600_000_000; + + // Set up best block information + storage.best_block = best_block_number; + storage.best_hash = best_block_hash; + + // Build header template + let header = storage.build_header_template( + timestamp, + &[], // no transactions + &[], // no ommers + None, // no withdrawals + None, // no requests + &chain_spec, + ); + + // Verify basic fields + assert_eq!(header.parent_hash, best_block_hash); + assert_eq!(header.number, best_block_number + 1); + assert_eq!(header.timestamp, timestamp); + assert_eq!(header.gas_limit, chain_spec.max_gas_limit); + } + + #[test] + fn test_ommers_and_transactions_roots() { + let storage = StorageInner::default(); + let chain_spec = ChainSpec::default(); + let timestamp = 1_600_000_000; + + // Setup ommers and transactions + let ommers = vec![Header::default()]; + let transactions = vec![TransactionSigned::default()]; + + // Build header template + let header = storage.build_header_template( + timestamp, + &transactions, + &ommers, + None, // no withdrawals + None, // no requests + &chain_spec, + ); + + // Verify ommers and transactions roots + assert_eq!(header.ommers_hash, proofs::calculate_ommers_root(&ommers)); + assert_eq!(header.transactions_root, proofs::calculate_transaction_root(&transactions)); + } + + // Test base fee calculation from the parent block + #[test] + fn test_base_fee_calculation() { + let mut storage = StorageInner::default(); + let chain_spec = ChainSpec::default(); + let timestamp = 1_600_000_000; + + // Set up the parent header with base fee + let base_fee = Some(100); + let parent_header = Header { base_fee_per_gas: base_fee, ..Default::default() }; + storage.headers.insert(storage.best_block, parent_header); + + // Build header template + let header = storage.build_header_template( + timestamp, + &[], // no transactions + &[], // no ommers + None, // no withdrawals + None, // no requests + &chain_spec, + ); + + // Verify base fee is correctly propagated + assert_eq!(header.base_fee_per_gas, base_fee); + } + + // Test blob gas and excess blob gas calculation when Cancun is active + #[test] + fn test_blob_gas_calculation_cancun() { + let storage = StorageInner::default(); + let chain_spec = ChainSpec { + hardforks: ChainHardforks::new(vec![( + EthereumHardfork::Cancun.boxed(), + ForkCondition::Timestamp(25), + )]), + ..Default::default() + }; + let timestamp = 26; + + // Set up a transaction with blob gas + let blob_tx = TransactionSigned { + transaction: Transaction::Eip4844(Default::default()), + ..Default::default() + }; + let transactions = vec![blob_tx]; + + // Build header template + let header = storage.build_header_template( + timestamp, + &transactions, + &[], // no ommers + None, // no withdrawals + None, // no requests + &chain_spec, + ); + + // Verify that the header has the correct fields including blob gas + assert_eq!( + header, + Header { + parent_hash: B256::ZERO, + ommers_hash: proofs::calculate_ommers_root(&[]), + transactions_root: proofs::calculate_transaction_root(&transactions), + withdrawals_root: None, + difficulty: U256::from(2), + number: 1, + gas_limit: chain_spec.max_gas_limit, + timestamp, + base_fee_per_gas: None, + blob_gas_used: Some(0), + requests_root: None, + excess_blob_gas: Some(0), + ..Default::default() + } + ); + } +} diff --git a/crates/consensus/auto-seal/src/mode.rs b/crates/consensus/auto-seal/src/mode.rs index 2ff918af63..82750c8e47 100644 --- a/crates/consensus/auto-seal/src/mode.rs +++ b/crates/consensus/auto-seal/src/mode.rs @@ -1,7 +1,7 @@ //! The mode the auto seal miner is operating in. +use alloy_primitives::TxHash; use futures_util::{stream::Fuse, StreamExt}; -use reth_primitives::TxHash; use reth_transaction_pool::{TransactionPool, ValidPoolTransaction}; use std::{ fmt, diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index f19e43deaf..d9067b2af8 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -12,13 +12,11 @@ workspace = true [dependencies] # reth -reth-chainspec.workspace = true reth-ethereum-consensus.workspace = true reth-blockchain-tree-api.workspace = true reth-primitives.workspace = true reth-stages-api.workspace = true reth-errors.workspace = true -reth-db-api.workspace = true reth-provider.workspace = true reth-rpc-types.workspace = true reth-tasks.workspace = true @@ -30,8 +28,11 @@ reth-static-file.workspace = true reth-tokio-util.workspace = true reth-engine-primitives.workspace = true reth-network-p2p.workspace = true +reth-node-types.workspace = true reth-bsc-consensus = { workspace = true, optional = true } +# ethereum +alloy-primitives.workspace = true # async tokio = { workspace = true, features = ["sync"] } @@ -56,6 +57,7 @@ reth-consensus = { workspace = true, features = ["test-utils"] } reth-stages = { workspace = true, features = ["test-utils"] } reth-blockchain-tree = { workspace = true, features = ["test-utils"] } reth-db = { workspace = true, features = ["test-utils"] } +reth-db-api.workspace = true reth-provider = { workspace = true, features = ["test-utils"] } reth-evm = { workspace = true, features = ["test-utils"] } reth-network-p2p = { workspace = true, features = ["test-utils"] } @@ -68,6 +70,7 @@ reth-config.workspace = true reth-testing-utils.workspace = true reth-exex-types.workspace = true reth-prune-types.workspace = true +reth-chainspec.workspace = true alloy-genesis.workspace = true assert_matches.workspace = true @@ -79,5 +82,5 @@ optimism = [ "reth-blockchain-tree/optimism", ] bsc = [ - "reth-bsc-consensus/bsc" + "reth-bsc-consensus/bsc", ] \ No newline at end of file diff --git a/crates/consensus/beacon/src/engine/event.rs b/crates/consensus/beacon/src/engine/event.rs index 4561f2e5ef..994cda060c 100644 --- a/crates/consensus/beacon/src/engine/event.rs +++ b/crates/consensus/beacon/src/engine/event.rs @@ -1,5 +1,6 @@ use crate::engine::forkchoice::ForkchoiceStatus; -use reth_primitives::{SealedBlock, SealedHeader, B256}; +use alloy_primitives::B256; +use reth_primitives::{SealedBlock, SealedHeader}; use reth_rpc_types::engine::ForkchoiceState; use std::{sync::Arc, time::Duration}; @@ -8,14 +9,14 @@ use std::{sync::Arc, time::Duration}; pub enum BeaconConsensusEngineEvent { /// The fork choice state was updated, and the current fork choice status ForkchoiceUpdated(ForkchoiceState, ForkchoiceStatus), + /// A block was added to the fork chain. + ForkBlockAdded(Arc, Duration), /// A block was added to the canonical chain, and the elapsed time validating the block CanonicalBlockAdded(Arc, Duration), /// A canonical chain was committed, and the elapsed time committing the data CanonicalChainCommitted(Box, Duration), /// The consensus engine is involved in live sync, and has specific progress LiveSyncProgress(ConsensusEngineLiveSyncProgress), - /// A block was added to the fork chain. - ForkBlockAdded(Arc), } impl BeaconConsensusEngineEvent { diff --git a/crates/consensus/beacon/src/engine/forkchoice.rs b/crates/consensus/beacon/src/engine/forkchoice.rs index d8b3344f6d..f02b120033 100644 --- a/crates/consensus/beacon/src/engine/forkchoice.rs +++ b/crates/consensus/beacon/src/engine/forkchoice.rs @@ -1,4 +1,4 @@ -use reth_primitives::B256; +use alloy_primitives::B256; use reth_rpc_types::engine::{ForkchoiceState, PayloadStatusEnum}; /// The struct that keeps track of the received forkchoice state and their status. @@ -75,11 +75,55 @@ impl ForkchoiceStateTracker { self.last_syncing.as_ref().map(|s| s.head_block_hash) } + /// Returns the latest received `ForkchoiceState`. + /// + /// Caution: this can be invalid. + pub const fn latest_state(&self) -> Option { + self.last_valid + } + + /// Returns the last valid `ForkchoiceState`. + pub const fn last_valid_state(&self) -> Option { + self.last_valid + } + + /// Returns the last valid finalized hash. + /// + /// This will return [`None`], if either there is no valid finalized forkchoice state, or the + /// finalized hash for the latest valid forkchoice state is zero. + #[inline] + pub fn last_valid_finalized(&self) -> Option { + self.last_valid.and_then(|state| { + // if the hash is zero then we should act like there is no finalized hash + if state.finalized_block_hash.is_zero() { + None + } else { + Some(state.finalized_block_hash) + } + }) + } + /// Returns the last received `ForkchoiceState` to which we need to sync. pub const fn sync_target_state(&self) -> Option { self.last_syncing } + /// Returns the sync target finalized hash. + /// + /// This will return [`None`], if either there is no sync target forkchoice state, or the + /// finalized hash for the sync target forkchoice state is zero. + #[inline] + pub fn sync_target_finalized(&self) -> Option { + self.last_syncing.and_then(|state| { + // if the hash is zero then we should act like there is no finalized hash + if state.finalized_block_hash.is_zero() { + None + } else { + Some(state.finalized_block_hash) + } + }) + } + /// Returns true if no forkchoice state has been received yet. pub const fn is_empty(&self) -> bool { self.latest.is_none() diff --git a/crates/consensus/beacon/src/engine/hooks/mod.rs b/crates/consensus/beacon/src/engine/hooks/mod.rs index 5d699921a6..b5e6ea61e3 100644 --- a/crates/consensus/beacon/src/engine/hooks/mod.rs +++ b/crates/consensus/beacon/src/engine/hooks/mod.rs @@ -1,5 +1,5 @@ +use alloy_primitives::BlockNumber; use reth_errors::{RethError, RethResult}; -use reth_primitives::BlockNumber; use std::{ fmt, task::{Context, Poll}, diff --git a/crates/consensus/beacon/src/engine/hooks/prune.rs b/crates/consensus/beacon/src/engine/hooks/prune.rs index 16b796dd86..409fc98b80 100644 --- a/crates/consensus/beacon/src/engine/hooks/prune.rs +++ b/crates/consensus/beacon/src/engine/hooks/prune.rs @@ -4,16 +4,15 @@ use crate::{ engine::hooks::{EngineHook, EngineHookContext, EngineHookError, EngineHookEvent}, hooks::EngineHookDBAccessLevel, }; +use alloy_primitives::BlockNumber; use futures::FutureExt; use metrics::Counter; -use reth_db_api::database::Database; use reth_errors::{RethError, RethResult}; -use reth_primitives::BlockNumber; -use reth_provider::ProviderFactory; +use reth_provider::{DatabaseProviderFactory, PruneCheckpointReader, PruneCheckpointWriter}; use reth_prune::{Pruner, PrunerError, PrunerWithResult}; use reth_tasks::TaskSpawner; use std::{ - fmt, + fmt::{self, Debug}, task::{ready, Context, Poll}, }; use tokio::sync::oneshot; @@ -21,15 +20,18 @@ use tokio::sync::oneshot; /// Manages pruning under the control of the engine. /// /// This type controls the [Pruner]. -pub struct PruneHook { +pub struct PruneHook { /// The current state of the pruner. - pruner_state: PrunerState, + pruner_state: PrunerState, /// The type that can spawn the pruner task. pruner_task_spawner: Box, metrics: Metrics, } -impl fmt::Debug for PruneHook { +impl fmt::Debug for PruneHook +where + PF: DatabaseProviderFactory + fmt::Debug, +{ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("PruneHook") .field("pruner_state", &self.pruner_state) @@ -38,10 +40,10 @@ impl fmt::Debug for PruneHook { } } -impl PruneHook { +impl PruneHook { /// Create a new instance pub fn new( - pruner: Pruner>, + pruner: Pruner, pruner_task_spawner: Box, ) -> Self { Self { @@ -79,7 +81,13 @@ impl PruneHook { Poll::Ready(Ok(event)) } +} +impl PruneHook +where + PF: DatabaseProviderFactory + + 'static, +{ /// This will try to spawn the pruner if it is idle: /// 1. Check if pruning is needed through [`Pruner::is_pruning_needed`]. /// @@ -117,7 +125,11 @@ impl PruneHook { } } -impl EngineHook for PruneHook { +impl EngineHook for PruneHook +where + PF: DatabaseProviderFactory + + 'static, +{ fn name(&self) -> &'static str { "Prune" } @@ -152,12 +164,23 @@ impl EngineHook for PruneHook { /// running, it acquires the write lock over the database. This means that we cannot forward to the /// blockchain tree any messages that would result in database writes, since it would result in a /// deadlock. -#[derive(Debug)] -enum PrunerState { +enum PrunerState { /// Pruner is idle. - Idle(Option>>), + Idle(Option>), /// Pruner is running and waiting for a response - Running(oneshot::Receiver>>), + Running(oneshot::Receiver>), +} + +impl fmt::Debug for PrunerState +where + PF: DatabaseProviderFactory + Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Idle(f0) => f.debug_tuple("Idle").field(&f0).finish(), + Self::Running(f0) => f.debug_tuple("Running").field(&f0).finish(), + } + } } #[derive(reth_metrics::Metrics)] diff --git a/crates/consensus/beacon/src/engine/hooks/static_file.rs b/crates/consensus/beacon/src/engine/hooks/static_file.rs index 54dd1ccb9a..c6b61d3462 100644 --- a/crates/consensus/beacon/src/engine/hooks/static_file.rs +++ b/crates/consensus/beacon/src/engine/hooks/static_file.rs @@ -4,10 +4,13 @@ use crate::{ engine::hooks::{EngineHook, EngineHookContext, EngineHookError, EngineHookEvent}, hooks::EngineHookDBAccessLevel, }; +use alloy_primitives::BlockNumber; use futures::FutureExt; -use reth_db_api::database::Database; use reth_errors::RethResult; -use reth_primitives::{static_file::HighestStaticFiles, BlockNumber}; +use reth_primitives::static_file::HighestStaticFiles; +use reth_provider::{ + BlockReader, DatabaseProviderFactory, StageCheckpointReader, StaticFileProviderFactory, +}; use reth_static_file::{StaticFileProducer, StaticFileProducerWithResult}; use reth_tasks::TaskSpawner; use std::task::{ready, Context, Poll}; @@ -18,17 +21,22 @@ use tracing::trace; /// /// This type controls the [`StaticFileProducer`]. #[derive(Debug)] -pub struct StaticFileHook { +pub struct StaticFileHook { /// The current state of the `static_file_producer`. - state: StaticFileProducerState, + state: StaticFileProducerState, /// The type that can spawn the `static_file_producer` task. task_spawner: Box, } -impl StaticFileHook { +impl StaticFileHook +where + Provider: StaticFileProviderFactory + + DatabaseProviderFactory + + 'static, +{ /// Create a new instance pub fn new( - static_file_producer: StaticFileProducer, + static_file_producer: StaticFileProducer, task_spawner: Box, ) -> Self { Self { state: StaticFileProducerState::Idle(Some(static_file_producer)), task_spawner } @@ -127,7 +135,12 @@ impl StaticFileHook { } } -impl EngineHook for StaticFileHook { +impl EngineHook for StaticFileHook +where + Provider: StaticFileProviderFactory + + DatabaseProviderFactory + + 'static, +{ fn name(&self) -> &'static str { "StaticFile" } @@ -172,9 +185,9 @@ impl EngineHook for StaticFileHook { /// [`StaticFileProducerState::Idle`] means that the static file producer is currently idle. /// [`StaticFileProducerState::Running`] means that the static file producer is currently running. #[derive(Debug)] -enum StaticFileProducerState { +enum StaticFileProducerState { /// [`StaticFileProducer`] is idle. - Idle(Option>), + Idle(Option>), /// [`StaticFileProducer`] is running and waiting for a response - Running(oneshot::Receiver>), + Running(oneshot::Receiver>), } diff --git a/crates/consensus/beacon/src/engine/invalid_headers.rs b/crates/consensus/beacon/src/engine/invalid_headers.rs index b93138901d..ff5ede5a7a 100644 --- a/crates/consensus/beacon/src/engine/invalid_headers.rs +++ b/crates/consensus/beacon/src/engine/invalid_headers.rs @@ -1,8 +1,9 @@ +use alloy_primitives::B256; use reth_metrics::{ metrics::{Counter, Gauge}, Metrics, }; -use reth_primitives::{Header, SealedHeader, B256}; +use reth_primitives::{Header, SealedHeader}; use schnellru::{ByLength, LruMap}; use std::sync::Arc; use tracing::warn; diff --git a/crates/consensus/beacon/src/engine/message.rs b/crates/consensus/beacon/src/engine/message.rs index f58f620b44..04f79b2d4d 100644 --- a/crates/consensus/beacon/src/engine/message.rs +++ b/crates/consensus/beacon/src/engine/message.rs @@ -8,6 +8,7 @@ use reth_rpc_types::engine::{ ForkchoiceUpdateError, ForkchoiceUpdated, PayloadId, PayloadStatus, PayloadStatusEnum, }; use std::{ + fmt::Display, future::Future, pin::Pin, task::{ready, Context, Poll}, @@ -160,3 +161,31 @@ pub enum BeaconEngineMessage { /// Message with exchanged transition configuration. TransitionConfigurationExchanged, } + +impl Display for BeaconEngineMessage { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::NewPayload { payload, .. } => { + write!( + f, + "NewPayload(parent: {}, number: {}, hash: {})", + payload.parent_hash(), + payload.block_number(), + payload.block_hash() + ) + } + Self::ForkchoiceUpdated { state, payload_attrs, .. } => { + // we don't want to print the entire payload attributes, because for OP this + // includes all txs + write!( + f, + "ForkchoiceUpdated {{ state: {state:?}, has_payload_attributes: {} }}", + payload_attrs.is_some() + ) + } + Self::TransitionConfigurationExchanged => { + write!(f, "TransitionConfigurationExchanged") + } + } + } +} diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 97a6854713..61a8c7c68b 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1,27 +1,26 @@ +use alloy_primitives::{BlockNumber, B256}; use futures::{stream::BoxStream, Future, StreamExt}; use itertools::Either; use reth_blockchain_tree_api::{ error::{BlockchainTreeError, CanonicalError, InsertBlockError, InsertBlockErrorKind}, BlockStatus, BlockValidationKind, BlockchainTreeEngine, CanonicalOutcome, InsertPayloadOk, }; -use reth_chainspec::ChainSpec; -use reth_db_api::database::Database; -use reth_engine_primitives::EngineTypes; +use reth_engine_primitives::{EngineTypes, PayloadTypes}; use reth_errors::{BlockValidationError, ProviderResult, RethError, RethResult}; use reth_network_p2p::{ sync::{NetworkSyncUpdater, SyncState}, BlockClient, }; +use reth_node_types::NodeTypesWithEngine; use reth_payload_builder::PayloadBuilderHandle; use reth_payload_primitives::{PayloadAttributes, PayloadBuilderAttributes}; use reth_payload_validator::ExecutionPayloadValidator; use reth_primitives::{ - constants::EPOCH_SLOTS, BlockNumHash, BlockNumber, Head, Header, SealedBlock, SealedHeader, - B256, + constants::EPOCH_SLOTS, BlockNumHash, Head, Header, SealedBlock, SealedHeader, }; use reth_provider::{ - BlockIdReader, BlockReader, BlockSource, CanonChainTracker, ChainSpecProvider, ProviderError, - StageCheckpointReader, + providers::ProviderNodeTypes, BlockIdReader, BlockReader, BlockSource, CanonChainTracker, + ChainSpecProvider, ProviderError, StageCheckpointReader, }; use reth_rpc_types::engine::{ CancunPayloadFields, ExecutionPayload, ForkchoiceState, PayloadStatus, PayloadStatusEnum, @@ -88,6 +87,11 @@ const MAX_INVALID_HEADERS: u32 = 512u32; /// If the distance exceeds this threshold, the pipeline will be used for sync. pub const MIN_BLOCKS_FOR_PIPELINE_RUN: u64 = EPOCH_SLOTS; +/// Helper trait expressing requirements for node types to be used in engine. +pub trait EngineNodeTypes: ProviderNodeTypes + NodeTypesWithEngine {} + +impl EngineNodeTypes for T where T: ProviderNodeTypes + NodeTypesWithEngine {} + /// Represents a pending forkchoice update. /// /// This type encapsulates the necessary components for a pending forkchoice update @@ -168,40 +172,40 @@ type PendingForkchoiceUpdate = /// If the future is polled more than once. Leads to undefined state. #[must_use = "Future does nothing unless polled"] #[allow(missing_debug_implementations)] -pub struct BeaconConsensusEngine +pub struct BeaconConsensusEngine where - DB: Database, + N: EngineNodeTypes, Client: BlockClient, BT: BlockchainTreeEngine + BlockReader + BlockIdReader + CanonChainTracker + StageCheckpointReader, - EngineT: EngineTypes, { /// Controls syncing triggered by engine updates. - sync: EngineSyncController, + sync: EngineSyncController, /// The type we can use to query both the database and the blockchain tree. blockchain: BT, /// Used for emitting updates about whether the engine is syncing or not. sync_state_updater: Box, /// The Engine API message receiver. - engine_message_stream: BoxStream<'static, BeaconEngineMessage>, + engine_message_stream: BoxStream<'static, BeaconEngineMessage>, /// A clone of the handle - handle: BeaconConsensusEngineHandle, + handle: BeaconConsensusEngineHandle, /// Tracks the received forkchoice state updates received by the CL. forkchoice_state_tracker: ForkchoiceStateTracker, /// The payload store. - payload_builder: PayloadBuilderHandle, + payload_builder: PayloadBuilderHandle, /// Validator for execution payloads payload_validator: ExecutionPayloadValidator, /// Current blockchain tree action. - blockchain_tree_action: Option>, + blockchain_tree_action: Option>, /// Pending forkchoice update. /// It is recorded if we cannot process the forkchoice update because /// a hook with database read-write access is active. /// This is a temporary solution to always process missed FCUs. - pending_forkchoice_update: Option>, + pending_forkchoice_update: + Option::PayloadAttributes>>, /// Tracks the header of invalid payloads that were rejected by the engine because they're /// invalid. invalid_headers: InvalidHeaderCache, @@ -224,33 +228,32 @@ where metrics: EngineMetrics, } -impl BeaconConsensusEngine +impl BeaconConsensusEngine where - DB: Database + Unpin + 'static, + N: EngineNodeTypes, BT: BlockchainTreeEngine + BlockReader + BlockIdReader + CanonChainTracker + StageCheckpointReader - + ChainSpecProvider + + ChainSpecProvider + 'static, Client: BlockClient + 'static, - EngineT: EngineTypes + Unpin, { /// Create a new instance of the [`BeaconConsensusEngine`]. #[allow(clippy::too_many_arguments)] pub fn new( client: Client, - pipeline: Pipeline, + pipeline: Pipeline, blockchain: BT, task_spawner: Box, sync_state_updater: Box, max_block: Option, - payload_builder: PayloadBuilderHandle, + payload_builder: PayloadBuilderHandle, target: Option, pipeline_run_threshold: u64, hooks: EngineHooks, - ) -> RethResult<(Self, BeaconConsensusEngineHandle)> { + ) -> RethResult<(Self, BeaconConsensusEngineHandle)> { let (to_engine, rx) = mpsc::unbounded_channel(); Self::with_channel( client, @@ -284,18 +287,18 @@ where #[allow(clippy::too_many_arguments)] pub fn with_channel( client: Client, - pipeline: Pipeline, + pipeline: Pipeline, blockchain: BT, task_spawner: Box, sync_state_updater: Box, max_block: Option, - payload_builder: PayloadBuilderHandle, + payload_builder: PayloadBuilderHandle, target: Option, pipeline_run_threshold: u64, - to_engine: UnboundedSender>, - engine_message_stream: BoxStream<'static, BeaconEngineMessage>, + to_engine: UnboundedSender>, + engine_message_stream: BoxStream<'static, BeaconEngineMessage>, hooks: EngineHooks, - ) -> RethResult<(Self, BeaconConsensusEngineHandle)> { + ) -> RethResult<(Self, BeaconConsensusEngineHandle)> { let event_sender = EventSender::default(); let handle = BeaconConsensusEngineHandle::new(to_engine, event_sender.clone()); let sync = EngineSyncController::new( @@ -349,7 +352,7 @@ where } /// Set the next blockchain tree action. - fn set_blockchain_tree_action(&mut self, action: BlockchainTreeAction) { + fn set_blockchain_tree_action(&mut self, action: BlockchainTreeAction) { let previous_action = self.blockchain_tree_action.replace(action); debug_assert!(previous_action.is_none(), "Pre-existing action found"); } @@ -391,7 +394,7 @@ where fn on_forkchoice_updated_make_canonical_result( &mut self, state: ForkchoiceState, - mut attrs: Option, + mut attrs: Option<::PayloadAttributes>, make_canonical_result: Result, elapsed: Duration, ) -> Result { @@ -455,7 +458,7 @@ where &self, head: &BlockNumHash, header: &SealedHeader, - attrs: &mut Option, + attrs: &mut Option<::PayloadAttributes>, ) -> bool { // On Optimism, the proposers are allowed to reorg their own chain at will. #[cfg(feature = "optimism")] @@ -499,7 +502,7 @@ where fn on_forkchoice_updated( &mut self, state: ForkchoiceState, - attrs: Option, + attrs: Option<::PayloadAttributes>, tx: oneshot::Sender>, ) { self.metrics.forkchoice_updated_messages.increment(1); @@ -621,7 +624,7 @@ where /// /// The [`BeaconConsensusEngineHandle`] can be used to interact with this /// [`BeaconConsensusEngine`] - pub fn handle(&self) -> BeaconConsensusEngineHandle { + pub fn handle(&self) -> BeaconConsensusEngineHandle { self.handle.clone() } @@ -1157,7 +1160,7 @@ where /// return an error if the payload attributes are invalid. fn process_payload_attributes( &self, - attrs: EngineT::PayloadAttributes, + attrs: ::PayloadAttributes, head: Header, state: ForkchoiceState, ) -> OnForkChoiceUpdated { @@ -1174,7 +1177,7 @@ where // forkchoiceState.headBlockHash and identified via buildProcessId value if // payloadAttributes is not null and the forkchoice state has been updated successfully. // The build process is specified in the Payload building section. - match ::try_new( + match <::PayloadBuilderAttributes as PayloadBuilderAttributes>::try_new( state.head_block_hash, attrs, ) { @@ -1246,7 +1249,7 @@ where let event = if attachment.is_canonical() { BeaconConsensusEngineEvent::CanonicalBlockAdded(block, elapsed) } else { - BeaconConsensusEngineEvent::ForkBlockAdded(block) + BeaconConsensusEngineEvent::ForkBlockAdded(block, elapsed) }; self.event_sender.notify(event); PayloadStatusEnum::Valid @@ -1596,7 +1599,7 @@ where /// so the state change should be handled accordingly. fn on_blockchain_tree_action( &mut self, - action: BlockchainTreeAction, + action: BlockchainTreeAction, ) -> RethResult { match action { BlockchainTreeAction::MakeForkchoiceHeadCanonical { state, attrs, tx } => { @@ -1798,19 +1801,18 @@ where /// local forkchoice state, it will launch the pipeline to sync to the head hash. /// While the pipeline is syncing, the consensus engine will keep processing messages from the /// receiver and forwarding them to the blockchain tree. -impl Future for BeaconConsensusEngine +impl Future for BeaconConsensusEngine where - DB: Database + Unpin + 'static, + N: EngineNodeTypes, Client: BlockClient + 'static, BT: BlockchainTreeEngine + BlockReader + BlockIdReader + CanonChainTracker + StageCheckpointReader - + ChainSpecProvider + + ChainSpecProvider + Unpin + 'static, - EngineT: EngineTypes + Unpin, { type Output = Result<(), BeaconConsensusEngineError>; @@ -2165,8 +2167,8 @@ mod tests { assert_matches!(rx.await, Ok(Ok(()))); } - fn insert_blocks<'a, DB: Database>( - provider_factory: ProviderFactory, + fn insert_blocks<'a, N: ProviderNodeTypes>( + provider_factory: ProviderFactory, mut blocks: impl Iterator, ) { let provider = provider_factory.provider_rw().unwrap(); @@ -2184,11 +2186,11 @@ mod tests { mod fork_choice_updated { use super::*; + use alloy_primitives::U256; use generators::BlockParams; - use reth_db::{tables, test_utils::create_test_static_files_dir}; + use reth_db::{tables, test_utils::create_test_static_files_dir, Database}; use reth_db_api::transaction::DbTxMut; - use reth_primitives::U256; - use reth_provider::providers::StaticFileProvider; + use reth_provider::{providers::StaticFileProvider, test_utils::MockNodeTypesWithDB}; use reth_rpc_types::engine::ForkchoiceUpdateError; use reth_testing_utils::generators::random_block; @@ -2257,8 +2259,8 @@ mod tests { let (_static_dir, static_dir_path) = create_test_static_files_dir(); insert_blocks( - ProviderFactory::new( - env.db.as_ref(), + ProviderFactory::::new( + env.db.clone(), chain_spec.clone(), StaticFileProvider::read_write(static_dir_path).unwrap(), ), @@ -2325,8 +2327,8 @@ mod tests { let (_static_dir, static_dir_path) = create_test_static_files_dir(); insert_blocks( - ProviderFactory::new( - env.db.as_ref(), + ProviderFactory::::new( + env.db.clone(), chain_spec.clone(), StaticFileProvider::read_write(static_dir_path).unwrap(), ), @@ -2356,8 +2358,8 @@ mod tests { // Insert next head immediately after sending forkchoice update insert_blocks( - ProviderFactory::new( - env.db.as_ref(), + ProviderFactory::::new( + env.db.clone(), chain_spec.clone(), StaticFileProvider::read_write(static_dir_path).unwrap(), ), @@ -2412,8 +2414,8 @@ mod tests { let (_static_dir, static_dir_path) = create_test_static_files_dir(); insert_blocks( - ProviderFactory::new( - env.db.as_ref(), + ProviderFactory::::new( + env.db.clone(), chain_spec.clone(), StaticFileProvider::read_write(static_dir_path).unwrap(), ), @@ -2495,8 +2497,8 @@ mod tests { let (_static_dir, static_dir_path) = create_test_static_files_dir(); insert_blocks( - ProviderFactory::new( - env.db.as_ref(), + ProviderFactory::::new( + env.db.clone(), chain_spec.clone(), StaticFileProvider::read_write(static_dir_path).unwrap(), ), @@ -2556,8 +2558,8 @@ mod tests { let (_temp_dir, temp_dir_path) = create_test_static_files_dir(); insert_blocks( - ProviderFactory::new( - env.db.as_ref(), + ProviderFactory::::new( + env.db.clone(), chain_spec.clone(), StaticFileProvider::read_write(temp_dir_path).unwrap(), ), @@ -2585,11 +2587,13 @@ mod tests { mod new_payload { use super::*; use alloy_genesis::Genesis; + use alloy_primitives::U256; use generators::BlockParams; use reth_db::test_utils::create_test_static_files_dir; - use reth_primitives::{EthereumHardfork, U256}; + use reth_primitives::EthereumHardfork; use reth_provider::{ - providers::StaticFileProvider, test_utils::blocks::BlockchainTestData, + providers::StaticFileProvider, + test_utils::{blocks::BlockchainTestData, MockNodeTypesWithDB}, }; use reth_testing_utils::{generators::random_block, GenesisAllocator}; #[tokio::test] @@ -2689,8 +2693,8 @@ mod tests { let (_static_dir, static_dir_path) = create_test_static_files_dir(); insert_blocks( - ProviderFactory::new( - env.db.as_ref(), + ProviderFactory::::new( + env.db.clone(), chain_spec.clone(), StaticFileProvider::read_write(static_dir_path).unwrap(), ), @@ -2769,8 +2773,8 @@ mod tests { let (_static_dir, static_dir_path) = create_test_static_files_dir(); insert_blocks( - ProviderFactory::new( - env.db.as_ref(), + ProviderFactory::::new( + env.db.clone(), chain_spec.clone(), StaticFileProvider::read_write(static_dir_path).unwrap(), ), @@ -2819,8 +2823,8 @@ mod tests { let (_static_dir, static_dir_path) = create_test_static_files_dir(); insert_blocks( - ProviderFactory::new( - env.db.as_ref(), + ProviderFactory::::new( + env.db.clone(), chain_spec.clone(), StaticFileProvider::read_write(static_dir_path).unwrap(), ), @@ -2890,8 +2894,8 @@ mod tests { let (_static_dir, static_dir_path) = create_test_static_files_dir(); insert_blocks( - ProviderFactory::new( - env.db.as_ref(), + ProviderFactory::::new( + env.db.clone(), chain_spec.clone(), StaticFileProvider::read_write(static_dir_path).unwrap(), ), diff --git a/crates/consensus/beacon/src/engine/sync.rs b/crates/consensus/beacon/src/engine/sync.rs index 1be70a5c43..d5fccf73a5 100644 --- a/crates/consensus/beacon/src/engine/sync.rs +++ b/crates/consensus/beacon/src/engine/sync.rs @@ -5,16 +5,17 @@ use crate::EthBeaconConsensus; use crate::{ engine::metrics::EngineSyncMetrics, BeaconConsensusEngineEvent, ConsensusEngineLiveSyncProgress, }; +use alloy_primitives::{BlockNumber, B256}; use futures::FutureExt; #[cfg(feature = "bsc")] use reth_bsc_consensus::Parlia; -use reth_chainspec::ChainSpec; -use reth_db_api::database::Database; use reth_network_p2p::{ full_block::{FetchFullBlockFuture, FetchFullBlockRangeFuture, FullBlockClient}, BlockClient, }; -use reth_primitives::{BlockNumber, SealedBlock, B256}; +use reth_node_types::NodeTypesWithDB; +use reth_primitives::SealedBlock; +use reth_provider::providers::ProviderNodeTypes; use reth_stages_api::{ControlFlow, Pipeline, PipelineError, PipelineTarget, PipelineWithResult}; use reth_tasks::TaskSpawner; use reth_tokio_util::EventSender; @@ -35,9 +36,9 @@ use tracing::trace; /// Caution: If the pipeline is running, this type will not emit blocks downloaded from the network /// [`EngineSyncEvent::FetchedFullBlock`] until the pipeline is idle to prevent commits to the /// database while the pipeline is still active. -pub(crate) struct EngineSyncController +pub(crate) struct EngineSyncController where - DB: Database, + N: NodeTypesWithDB, Client: BlockClient, { /// A downloader that can download full blocks from the network. @@ -46,7 +47,7 @@ where pipeline_task_spawner: Box, /// The current state of the pipeline. /// The pipeline is used for large ranges. - pipeline_state: PipelineState, + pipeline_state: PipelineState, /// Pending target block for the pipeline to sync pending_pipeline_target: Option, /// In-flight full block requests in progress. @@ -65,18 +66,18 @@ where metrics: EngineSyncMetrics, } -impl EngineSyncController +impl EngineSyncController where - DB: Database + 'static, + N: ProviderNodeTypes, Client: BlockClient + 'static, { /// Create a new instance pub(crate) fn new( - pipeline: Pipeline, + pipeline: Pipeline, client: Client, pipeline_task_spawner: Box, max_block: Option, - chain_spec: Arc, + chain_spec: Arc, event_sender: EventSender, ) -> Self { #[cfg(not(feature = "bsc"))] @@ -401,14 +402,14 @@ pub(crate) enum EngineSyncEvent { /// running, it acquires the write lock over the database. This means that we cannot forward to the /// blockchain tree any messages that would result in database writes, since it would result in a /// deadlock. -enum PipelineState { +enum PipelineState { /// Pipeline is idle. - Idle(Option>), + Idle(Option>), /// Pipeline is running and waiting for a response - Running(oneshot::Receiver>), + Running(oneshot::Receiver>), } -impl PipelineState { +impl PipelineState { /// Returns `true` if the state matches idle. const fn is_idle(&self) -> bool { matches!(self, Self::Idle(_)) @@ -420,12 +421,12 @@ mod tests { use super::*; use assert_matches::assert_matches; use futures::poll; - use reth_chainspec::{ChainSpecBuilder, MAINNET}; - use reth_db::{mdbx::DatabaseEnv, test_utils::TempDatabase}; + use reth_chainspec::{ChainSpec, ChainSpecBuilder, MAINNET}; use reth_network_p2p::{either::Either, test_utils::TestFullBlockClient}; use reth_primitives::{BlockBody, Header, SealedHeader}; use reth_provider::{ - test_utils::create_test_provider_factory_with_chain_spec, ExecutionOutcome, + test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB}, + ExecutionOutcome, }; use reth_prune_types::PruneModes; use reth_stages::{test_utils::TestStages, ExecOutput, StageError}; @@ -475,12 +476,12 @@ mod tests { } /// Builds the pipeline. - fn build(self, chain_spec: Arc) -> Pipeline>> { + fn build(self, chain_spec: Arc) -> Pipeline { reth_tracing::init_test_tracing(); // Setup pipeline let (tip_tx, _tip_rx) = watch::channel(B256::default()); - let mut pipeline = Pipeline::builder() + let mut pipeline = Pipeline::::builder() .add_stages(TestStages::new(self.pipeline_exec_outputs, Default::default())) .with_tip_sender(tip_tx); @@ -522,13 +523,13 @@ mod tests { } /// Builds the sync controller. - fn build( + fn build( self, - pipeline: Pipeline, + pipeline: Pipeline, chain_spec: Arc, - ) -> EngineSyncController> + ) -> EngineSyncController> where - DB: Database + 'static, + N: ProviderNodeTypes, Client: BlockClient + 'static, { let client = self diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index e6f5cd080c..737fdce0de 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -3,6 +3,7 @@ use crate::{ BeaconConsensusEngineError, BeaconConsensusEngineHandle, BeaconForkChoiceUpdateError, BeaconOnNewPayloadError, EthBeaconConsensus, MIN_BLOCKS_FOR_PIPELINE_RUN, }; +use alloy_primitives::{BlockNumber, B256}; use reth_blockchain_tree::{ config::BlockchainTreeConfig, externals::TreeExternals, BlockchainTree, ShareableBlockchainTree, }; @@ -20,10 +21,10 @@ use reth_evm_ethereum::execute::EthExecutorProvider; use reth_exex_types::FinishedExExHeight; use reth_network_p2p::{sync::NoopSyncStateUpdater, test_utils::NoopFullBlockClient, BlockClient}; use reth_payload_builder::test_utils::spawn_test_payload_service; -use reth_primitives::{BlockNumber, B256}; use reth_provider::{ - providers::BlockchainProvider, test_utils::create_test_provider_factory_with_chain_spec, - ExecutionOutcome, ProviderFactory, + providers::BlockchainProvider, + test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB}, + ExecutionOutcome, }; use reth_prune::Pruner; use reth_prune_types::PruneModes; @@ -39,10 +40,9 @@ use tokio::sync::{oneshot, watch}; type DatabaseEnv = TempDatabase; type TestBeaconConsensusEngine = BeaconConsensusEngine< - Arc, - BlockchainProvider>, + MockNodeTypesWithDB, + BlockchainProvider, Arc>, - EthEngineTypes, >; #[derive(Debug)] @@ -355,7 +355,7 @@ where // Setup pipeline let (tip_tx, tip_rx) = watch::channel(B256::default()); let mut pipeline = match self.base_config.pipeline_config { - TestPipelineConfig::Test(outputs) => Pipeline::builder() + TestPipelineConfig::Test(outputs) => Pipeline::::builder() .add_stages(TestStages::new(outputs, Default::default())) .with_tip_sender(tip_tx), TestPipelineConfig::Real => { @@ -367,7 +367,7 @@ where .build(client.clone(), consensus.clone(), provider_factory.clone()) .into_task(); - Pipeline::builder().add_stages(DefaultStages::new( + Pipeline::::builder().add_stages(DefaultStages::new( provider_factory.clone(), tip_rx.clone(), Arc::clone(&consensus), @@ -390,19 +390,15 @@ where // Setup blockchain tree let externals = TreeExternals::new(provider_factory.clone(), consensus, executor_factory); let tree = Arc::new(ShareableBlockchainTree::new( - BlockchainTree::new( - externals, - BlockchainTreeConfig::new(1, 2, 3, 2), - PruneModes::default(), - ) - .expect("failed to create tree"), + BlockchainTree::new(externals, BlockchainTreeConfig::new(1, 2, 3, 2)) + .expect("failed to create tree"), )); - let genesis_block = self.base_config.chain_spec.genesis_header().seal_slow(); + let genesis_block = self.base_config.chain_spec.genesis_header().clone().seal_slow(); let blockchain_provider = BlockchainProvider::with_blocks(provider_factory.clone(), tree, genesis_block, None); - let pruner = Pruner::<_, ProviderFactory<_>>::new( + let pruner = Pruner::new_with_factory( provider_factory.clone(), vec![], 5, diff --git a/crates/consensus/common/Cargo.toml b/crates/consensus/common/Cargo.toml index 8a8cad1943..1f2a43f926 100644 --- a/crates/consensus/common/Cargo.toml +++ b/crates/consensus/common/Cargo.toml @@ -16,6 +16,9 @@ reth-chainspec.workspace = true reth-primitives.workspace = true reth-consensus.workspace = true +# ethereum +alloy-primitives.workspace = true + [dev-dependencies] reth-storage-api.workspace = true rand.workspace = true diff --git a/crates/consensus/common/src/calc.rs b/crates/consensus/common/src/calc.rs index e4b2abc13b..88fd10f177 100644 --- a/crates/consensus/common/src/calc.rs +++ b/crates/consensus/common/src/calc.rs @@ -1,5 +1,6 @@ +use alloy_primitives::{BlockNumber, U256}; use reth_chainspec::{ChainSpec, EthereumHardfork}; -use reth_primitives::{constants::ETH_TO_WEI, BlockNumber, U256}; +use reth_primitives::constants::ETH_TO_WEI; /// Calculates the base block reward. /// @@ -57,7 +58,7 @@ pub fn base_block_reward_pre_merge(chain_spec: &ChainSpec, block_number: BlockNu /// # use reth_chainspec::MAINNET; /// # use reth_consensus_common::calc::{base_block_reward, block_reward}; /// # use reth_primitives::constants::ETH_TO_WEI; -/// # use reth_primitives::U256; +/// # use alloy_primitives::U256; /// # /// // This is block 126 on mainnet. /// let block_number = 126; diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index 9a81ae1bf8..99627fde8d 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -300,13 +300,13 @@ pub fn validate_against_parent_4844( #[cfg(test)] mod tests { use super::*; + use alloy_primitives::{hex_literal::hex, Address, BlockHash, BlockNumber, Bytes, U256}; use mockall::mock; use rand::Rng; use reth_chainspec::ChainSpecBuilder; use reth_primitives::{ - hex_literal::hex, proofs, Account, Address, BlockBody, BlockHash, BlockHashOrNumber, - BlockNumber, Bytes, Signature, Transaction, TransactionSigned, TxEip4844, Withdrawal, - Withdrawals, U256, + proofs, Account, BlockBody, BlockHashOrNumber, Signature, Transaction, TransactionSigned, + TxEip4844, Withdrawal, Withdrawals, }; use reth_storage_api::{ errors::provider::ProviderResult, AccountReader, HeaderProvider, WithdrawalsProvider, @@ -419,7 +419,6 @@ mod tests { max_priority_fee_per_gas: 0x28f000fff, max_fee_per_blob_gas: 0x7, gas_limit: 10, - placeholder: Some(()), to: Address::default(), value: U256::from(3_u64), input: Bytes::from(vec![1, 2]), diff --git a/crates/consensus/consensus/Cargo.toml b/crates/consensus/consensus/Cargo.toml index 8e7ecbb688..660b43865e 100644 --- a/crates/consensus/consensus/Cargo.toml +++ b/crates/consensus/consensus/Cargo.toml @@ -14,6 +14,9 @@ workspace = true # reth reth-primitives.workspace = true +# ethereum +alloy-primitives.workspace = true + # misc auto_impl.workspace = true derive_more.workspace = true diff --git a/crates/consensus/consensus/src/lib.rs b/crates/consensus/consensus/src/lib.rs index 0528dce701..14da889c1a 100644 --- a/crates/consensus/consensus/src/lib.rs +++ b/crates/consensus/consensus/src/lib.rs @@ -9,19 +9,14 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![cfg_attr(not(feature = "std"), no_std)] -use reth_primitives::{ - constants::MINIMUM_GAS_LIMIT, BlockHash, BlockNumber, BlockWithSenders, Bloom, GotExpected, - GotExpectedBoxed, Header, InvalidTransactionError, Receipt, Request, SealedBlock, SealedHeader, - B256, U256, -}; - -#[cfg(feature = "std")] -use std::fmt::Debug; - -#[cfg(not(feature = "std"))] extern crate alloc; -#[cfg(not(feature = "std"))] + use alloc::{fmt::Debug, vec::Vec}; +use alloy_primitives::{BlockHash, BlockNumber, Bloom, B256, U256}; +use reth_primitives::{ + constants::MINIMUM_GAS_LIMIT, BlockWithSenders, GotExpected, GotExpectedBoxed, Header, + InvalidTransactionError, Receipt, Request, SealedBlock, SealedHeader, +}; /// A consensus implementation that does nothing. pub mod noop; @@ -127,7 +122,7 @@ pub trait Consensus: Debug + Send + Sync { } /// Consensus Errors -#[derive(Debug, PartialEq, Eq, Clone, derive_more::Display)] +#[derive(Debug, PartialEq, Eq, Clone, derive_more::Display, derive_more::Error)] pub enum ConsensusError { /// Error when the gas used in the header exceeds the gas limit. #[display("block used gas ({gas_used}) is greater than gas limit ({gas_limit})")] @@ -419,16 +414,6 @@ pub enum ConsensusError { }, } -#[cfg(feature = "std")] -impl std::error::Error for ConsensusError { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - Self::InvalidTransaction(err) => std::error::Error::source(err), - _ => Option::None, - } - } -} - impl ConsensusError { /// Returns `true` if the error is a state root error. pub const fn is_state_root_error(&self) -> bool { @@ -443,9 +428,6 @@ impl From for ConsensusError { } /// `HeaderConsensusError` combines a `ConsensusError` with the `SealedHeader` it relates to. -#[derive(derive_more::Display, Debug)] +#[derive(derive_more::Display, derive_more::Error, Debug)] #[display("Consensus error: {_0}, Invalid header: {_1:?}")] pub struct HeaderConsensusError(ConsensusError, SealedHeader); - -#[cfg(feature = "std")] -impl std::error::Error for HeaderConsensusError {} diff --git a/crates/consensus/consensus/src/noop.rs b/crates/consensus/consensus/src/noop.rs index 31c168eb0c..53bdb72afb 100644 --- a/crates/consensus/consensus/src/noop.rs +++ b/crates/consensus/consensus/src/noop.rs @@ -1,5 +1,6 @@ use crate::{Consensus, ConsensusError, PostExecutionInput}; -use reth_primitives::{BlockWithSenders, Header, SealedBlock, SealedHeader, U256}; +use alloy_primitives::U256; +use reth_primitives::{BlockWithSenders, Header, SealedBlock, SealedHeader}; /// A Consensus implementation that does nothing. #[derive(Debug, Copy, Clone, Default)] diff --git a/crates/consensus/consensus/src/test_utils.rs b/crates/consensus/consensus/src/test_utils.rs index 14f565a69d..4369472091 100644 --- a/crates/consensus/consensus/src/test_utils.rs +++ b/crates/consensus/consensus/src/test_utils.rs @@ -1,6 +1,7 @@ use crate::{Consensus, ConsensusError, PostExecutionInput}; +use alloy_primitives::U256; use core::sync::atomic::{AtomicBool, Ordering}; -use reth_primitives::{BlockWithSenders, Header, SealedBlock, SealedHeader, U256}; +use reth_primitives::{BlockWithSenders, Header, SealedBlock, SealedHeader}; /// Consensus engine implementation for testing #[derive(Debug)] diff --git a/crates/e2e-test-utils/Cargo.toml b/crates/e2e-test-utils/Cargo.toml index 1111d44d70..36f525533a 100644 --- a/crates/e2e-test-utils/Cargo.toml +++ b/crates/e2e-test-utils/Cargo.toml @@ -32,6 +32,9 @@ reth-rpc-types-compat.workspace = true jsonrpsee-types.workspace = true jsonrpsee.workspace = true +# ethereum +alloy-primitives.workspace = true + futures-util.workspace = true eyre.workspace = true tokio.workspace = true diff --git a/crates/e2e-test-utils/src/engine_api.rs b/crates/e2e-test-utils/src/engine_api.rs index 728e5764ed..1b0ff9b54e 100644 --- a/crates/e2e-test-utils/src/engine_api.rs +++ b/crates/e2e-test-utils/src/engine_api.rs @@ -1,4 +1,5 @@ use crate::traits::PayloadEnvelopeExt; +use alloy_primitives::B256; use jsonrpsee::{ core::client::ClientT, http_client::{transport::HttpBackend, HttpClient}, @@ -12,7 +13,6 @@ use reth::{ }, }; use reth_payload_builder::PayloadId; -use reth_primitives::B256; use reth_rpc_layer::AuthClientService; use std::marker::PhantomData; diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index 25fb77f01e..61c79968d0 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -8,14 +8,18 @@ use reth::{ args::{DiscoveryArgs, NetworkArgs, RpcServerArgs}, builder::{NodeBuilder, NodeConfig, NodeHandle}, network::PeersHandleProvider, - rpc::api::eth::{helpers::AddDevSigners, FullEthApiServer}, + rpc::{ + api::eth::{helpers::AddDevSigners, FullEthApiServer}, + types::AnyTransactionReceipt, + }, tasks::TaskManager, }; use reth_chainspec::ChainSpec; use reth_db::{test_utils::TempDatabase, DatabaseEnv}; use reth_node_builder::{ components::NodeComponentsBuilder, rpc::EthApiBuilderProvider, FullNodeTypesAdapter, Node, - NodeAdapter, NodeAddOns, NodeComponents, NodeTypes, RethFullAdapter, + NodeAdapter, NodeAddOns, NodeComponents, NodeTypesWithDBAdapter, NodeTypesWithEngine, + RethFullAdapter, }; use reth_provider::providers::BlockchainProvider; use tracing::{span, Level}; @@ -51,7 +55,7 @@ pub async fn setup( is_dev: bool, ) -> eyre::Result<(Vec>, TaskManager, Wallet)> where - N: Default + Node> + NodeTypes, + N: Default + Node> + NodeTypesWithEngine, N::ComponentsBuilder: NodeComponentsBuilder< TmpNodeAdapter, Components: NodeComponents, Network: PeersHandleProvider>, @@ -61,6 +65,7 @@ where EthApi: FullEthApiServer< NetworkTypes: Network< TransactionResponse = reth_rpc_types::WithOtherFields, + ReceiptResponse = AnyTransactionReceipt, >, > + AddDevSigners + EthApiBuilderProvider>, @@ -116,7 +121,10 @@ where // Type aliases type TmpDB = Arc>; -type TmpNodeAdapter = FullNodeTypesAdapter>; +type TmpNodeAdapter = FullNodeTypesAdapter< + NodeTypesWithDBAdapter, + BlockchainProvider>, +>; type Adapter = NodeAdapter< RethFullAdapter, diff --git a/crates/e2e-test-utils/src/node.rs b/crates/e2e-test-utils/src/node.rs index 0040801fa3..93ba81d677 100644 --- a/crates/e2e-test-utils/src/node.rs +++ b/crates/e2e-test-utils/src/node.rs @@ -1,6 +1,7 @@ use std::{marker::PhantomData, pin::Pin}; use alloy_network::Network; +use alloy_primitives::{BlockHash, BlockNumber, Bytes, B256}; use alloy_rpc_types::BlockNumberOrTag; use eyre::Ok; use futures_util::Future; @@ -8,15 +9,14 @@ use reth::{ api::{BuiltPayload, EngineTypes, FullNodeComponents, PayloadBuilderAttributes}, builder::FullNode, network::PeersHandleProvider, - payload::PayloadTypes, providers::{BlockReader, BlockReaderIdExt, CanonStateSubscriptions, StageCheckpointReader}, rpc::{ api::eth::helpers::{EthApiSpec, EthTransactions, TraceExt}, - types::engine::PayloadStatusEnum, + types::{engine::PayloadStatusEnum, AnyTransactionReceipt}, }, }; -use reth_node_builder::{EthApiTypes, NodeAddOns, NodeTypes}; -use reth_primitives::{BlockHash, BlockNumber, Bytes, B256}; +use reth_chainspec::ChainSpec; +use reth_node_builder::{EthApiTypes, NodeAddOns, NodeTypesWithEngine}; use reth_rpc_types::WithOtherFields; use reth_stages_types::StageId; use tokio_stream::StreamExt; @@ -36,18 +36,20 @@ where /// The core structure representing the full node. pub inner: FullNode, /// Context for testing payload-related features. - pub payload: PayloadTestContext, + pub payload: PayloadTestContext<::Engine>, /// Context for testing network functionalities. pub network: NetworkTestContext, /// Context for testing the Engine API. - pub engine_api: EngineApiTestContext, + pub engine_api: EngineApiTestContext<::Engine>, /// Context for testing RPC features. pub rpc: RpcTestContext, } -impl NodeTestContext +impl NodeTestContext where + Engine: EngineTypes, Node: FullNodeComponents, + Node::Types: NodeTypesWithEngine, Node::Network: PeersHandleProvider, AddOns: NodeAddOns, { @@ -62,7 +64,7 @@ where engine_api: EngineApiTestContext { engine_api_client: node.auth_server_handle().http_client(), canonical_stream: node.provider.canonical_state_stream(), - _marker: PhantomData::, + _marker: PhantomData::, }, rpc: RpcTestContext { inner: node.rpc_registry }, }) @@ -82,20 +84,16 @@ where &mut self, length: u64, tx_generator: impl Fn(u64) -> Pin>>, - attributes_generator: impl Fn(u64) -> ::PayloadBuilderAttributes - + Copy, - ) -> eyre::Result< - Vec<( - ::BuiltPayload, - ::PayloadBuilderAttributes, - )>, - > + attributes_generator: impl Fn(u64) -> Engine::PayloadBuilderAttributes + Copy, + ) -> eyre::Result> where - ::ExecutionPayloadV3: - From<::BuiltPayload> + PayloadEnvelopeExt, + ::ExecutionPayloadV3: + From + PayloadEnvelopeExt, AddOns::EthApi: EthApiSpec + EthTransactions + TraceExt, - ::NetworkTypes: - Network>, + ::NetworkTypes: Network< + TransactionResponse = WithOtherFields, + ReceiptResponse = AnyTransactionReceipt, + >, { let mut chain = Vec::with_capacity(length as usize); for i in 0..length { @@ -116,14 +114,11 @@ where /// It triggers the resolve payload via engine api and expects the built payload event. pub async fn new_payload( &mut self, - attributes_generator: impl Fn(u64) -> ::PayloadBuilderAttributes, - ) -> eyre::Result<( - <::Engine as PayloadTypes>::BuiltPayload, - <::Engine as PayloadTypes>::PayloadBuilderAttributes, - )> + attributes_generator: impl Fn(u64) -> Engine::PayloadBuilderAttributes, + ) -> eyre::Result<(Engine::BuiltPayload, Engine::PayloadBuilderAttributes)> where - ::ExecutionPayloadV3: - From<::BuiltPayload> + PayloadEnvelopeExt, + ::ExecutionPayloadV3: + From + PayloadEnvelopeExt, { // trigger new payload building draining the pool let eth_attr = self.payload.new_payload(attributes_generator).await.unwrap(); @@ -141,14 +136,11 @@ where pub async fn advance_block( &mut self, versioned_hashes: Vec, - attributes_generator: impl Fn(u64) -> ::PayloadBuilderAttributes, - ) -> eyre::Result<( - ::BuiltPayload, - <::Engine as PayloadTypes>::PayloadBuilderAttributes, - )> + attributes_generator: impl Fn(u64) -> Engine::PayloadBuilderAttributes, + ) -> eyre::Result<(Engine::BuiltPayload, Engine::PayloadBuilderAttributes)> where - ::ExecutionPayloadV3: - From<::BuiltPayload> + PayloadEnvelopeExt, + ::ExecutionPayloadV3: + From + PayloadEnvelopeExt, { let (payload, eth_attr) = self.new_payload(attributes_generator).await?; diff --git a/crates/e2e-test-utils/src/rpc.rs b/crates/e2e-test-utils/src/rpc.rs index a7d548a931..bcac7dcfed 100644 --- a/crates/e2e-test-utils/src/rpc.rs +++ b/crates/e2e-test-utils/src/rpc.rs @@ -1,5 +1,6 @@ use alloy_consensus::TxEnvelope; use alloy_network::{eip2718::Decodable2718, Network}; +use alloy_primitives::{Bytes, B256}; use reth::{ builder::{rpc::RpcRegistry, FullNodeComponents}, rpc::api::{ @@ -7,9 +8,9 @@ use reth::{ DebugApiServer, }, }; -use reth_node_builder::EthApiTypes; -use reth_primitives::{Bytes, B256}; -use reth_rpc_types::WithOtherFields; +use reth_chainspec::ChainSpec; +use reth_node_builder::{EthApiTypes, NodeTypes}; +use reth_rpc_types::{AnyTransactionReceipt, WithOtherFields}; #[allow(missing_debug_implementations)] pub struct RpcTestContext { @@ -18,11 +19,12 @@ pub struct RpcTestContext { impl RpcTestContext where - Node: FullNodeComponents, + Node: FullNodeComponents>, EthApi: EthApiSpec + EthTransactions< NetworkTypes: Network< TransactionResponse = WithOtherFields, + ReceiptResponse = AnyTransactionReceipt, >, > + TraceExt, { diff --git a/crates/e2e-test-utils/src/transaction.rs b/crates/e2e-test-utils/src/transaction.rs index 041ba40d6e..329454e6af 100644 --- a/crates/e2e-test-utils/src/transaction.rs +++ b/crates/e2e-test-utils/src/transaction.rs @@ -1,12 +1,11 @@ -use alloy_consensus::{ - BlobTransactionSidecar, EnvKzgSettings, SidecarBuilder, SimpleCoder, TxEip4844Variant, - TxEnvelope, +use alloy_consensus::{EnvKzgSettings, SidecarBuilder, SimpleCoder, TxEip4844Variant, TxEnvelope}; +use alloy_network::{ + eip2718::Encodable2718, Ethereum, EthereumWallet, TransactionBuilder, TransactionBuilder4844, }; -use alloy_network::{eip2718::Encodable2718, Ethereum, EthereumWallet, TransactionBuilder}; +use alloy_primitives::{hex, Address, Bytes, TxKind, B256, U256}; use alloy_rpc_types::{TransactionInput, TransactionRequest}; use alloy_signer_local::PrivateKeySigner; use eyre::Ok; -use reth_primitives::{hex, Address, Bytes, TxKind, B256, U256}; /// Helper for transaction operations #[derive(Debug)] @@ -34,13 +33,8 @@ impl TransactionTestContext { let mut builder = SidecarBuilder::::new(); builder.ingest(b"dummy blob"); - let sidecar: BlobTransactionSidecar = builder.build()?; - - >::set_blob_sidecar(&mut tx, sidecar); - >::set_max_fee_per_blob_gas( - &mut tx, - 15e9 as u128, - ); + tx.set_blob_sidecar(builder.build()?); + tx.set_max_fee_per_blob_gas(15e9 as u128); let signed = Self::sign_tx(wallet, tx).await; Ok(signed) diff --git a/crates/engine/invalid-block-hooks/Cargo.toml b/crates/engine/invalid-block-hooks/Cargo.toml new file mode 100644 index 0000000000..b0eaefce07 --- /dev/null +++ b/crates/engine/invalid-block-hooks/Cargo.toml @@ -0,0 +1,36 @@ +[package] +name = "reth-invalid-block-hooks" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +# reth +reth-chainspec.workspace = true +reth-engine-primitives.workspace = true +reth-evm.workspace = true +reth-primitives.workspace = true +reth-provider.workspace = true +reth-revm.workspace = true +reth-rpc-api = { workspace = true, features = ["client"] } +reth-tracing.workspace = true +reth-trie = { workspace = true, features = ["serde"] } + +# alloy +alloy-rlp.workspace = true +alloy-rpc-types-debug.workspace = true + +# async +futures.workspace = true + +# misc +eyre.workspace = true +jsonrpsee.workspace = true +pretty_assertions = "1.4" +serde_json.workspace = true diff --git a/crates/engine/invalid-block-hooks/src/lib.rs b/crates/engine/invalid-block-hooks/src/lib.rs new file mode 100644 index 0000000000..26e208da64 --- /dev/null +++ b/crates/engine/invalid-block-hooks/src/lib.rs @@ -0,0 +1,5 @@ +//! Invalid block hook implementations. + +mod witness; + +pub use witness::InvalidBlockWitnessHook; diff --git a/crates/engine/invalid-block-hooks/src/witness.rs b/crates/engine/invalid-block-hooks/src/witness.rs new file mode 100644 index 0000000000..6ce5513db1 --- /dev/null +++ b/crates/engine/invalid-block-hooks/src/witness.rs @@ -0,0 +1,254 @@ +use std::{collections::HashMap, fmt::Debug, fs::File, io::Write, path::PathBuf}; + +use alloy_rpc_types_debug::ExecutionWitness; +use eyre::OptionExt; +use pretty_assertions::Comparison; +use reth_chainspec::ChainSpec; +use reth_engine_primitives::InvalidBlockHook; +use reth_evm::{ + system_calls::{apply_beacon_root_contract_call, apply_blockhashes_contract_call}, + ConfigureEvm, +}; +use reth_primitives::{ + keccak256, Header, Receipt, SealedBlockWithSenders, SealedHeader, B256, U256, +}; +use reth_provider::{BlockExecutionOutput, ChainSpecProvider, StateProviderFactory}; +use reth_revm::{ + database::StateProviderDatabase, + db::states::bundle_state::BundleRetention, + primitives::{BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg}, + DatabaseCommit, StateBuilder, +}; +use reth_rpc_api::DebugApiClient; +use reth_tracing::tracing::warn; +use reth_trie::{updates::TrieUpdates, HashedPostState, HashedStorage}; + +/// Generates a witness for the given block and saves it to a file. +#[derive(Debug)] +pub struct InvalidBlockWitnessHook { + /// The provider to read the historical state and do the EVM execution. + provider: P, + /// The EVM configuration to use for the execution. + evm_config: EvmConfig, + /// The directory to write the witness to. Additionally, diff files will be written to this + /// directory in case of failed sanity checks. + output_directory: PathBuf, + /// The healthy node client to compare the witness against. + healthy_node_client: Option, +} + +impl InvalidBlockWitnessHook { + /// Creates a new witness hook. + pub const fn new( + provider: P, + evm_config: EvmConfig, + output_directory: PathBuf, + healthy_node_client: Option, + ) -> Self { + Self { provider, evm_config, output_directory, healthy_node_client } + } +} + +impl InvalidBlockWitnessHook +where + P: StateProviderFactory + ChainSpecProvider + Send + Sync + 'static, + EvmConfig: ConfigureEvm
, +{ + fn on_invalid_block( + &self, + parent_header: &SealedHeader, + block: &SealedBlockWithSenders, + output: &BlockExecutionOutput, + trie_updates: Option<(&TrieUpdates, B256)>, + ) -> eyre::Result<()> { + // TODO(alexey): unify with `DebugApi::debug_execution_witness` + + // Setup database. + let mut db = StateBuilder::new() + .with_database(StateProviderDatabase::new( + self.provider.state_by_block_hash(parent_header.hash())?, + )) + .with_bundle_update() + .build(); + + // Setup environment for the execution. + let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); + let mut block_env = BlockEnv::default(); + self.evm_config.fill_cfg_and_block_env(&mut cfg, &mut block_env, block.header(), U256::MAX); + + // Setup EVM + let mut evm = self.evm_config.evm_with_env( + &mut db, + EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()), + ); + + // Apply pre-block system contract calls. + apply_beacon_root_contract_call( + &self.evm_config, + &self.provider.chain_spec(), + block.timestamp, + block.number, + block.parent_beacon_block_root, + &mut evm, + )?; + apply_blockhashes_contract_call( + &self.evm_config, + &self.provider.chain_spec(), + block.timestamp, + block.number, + block.parent_hash, + &mut evm, + )?; + + // Re-execute all of the transactions in the block to load all touched accounts into + // the cache DB. + for tx in block.transactions() { + self.evm_config.fill_tx_env( + evm.tx_mut(), + tx, + tx.recover_signer().ok_or_eyre("failed to recover sender")?, + ); + let result = evm.transact()?; + evm.db_mut().commit(result.state); + } + + drop(evm); + + // Merge all state transitions + db.merge_transitions(BundleRetention::Reverts); + + // Take the bundle state + let bundle_state = db.take_bundle(); + + // Initialize a map of preimages. + let mut state_preimages = HashMap::new(); + + // Grab all account proofs for the data accessed during block execution. + // + // Note: We grab *all* accounts in the cache here, as the `BundleState` prunes + // referenced accounts + storage slots. + let mut hashed_state = HashedPostState::from_bundle_state(&bundle_state.state); + for (address, account) in db.cache.accounts { + let hashed_address = keccak256(address); + hashed_state + .accounts + .insert(hashed_address, account.account.as_ref().map(|a| a.info.clone().into())); + + let storage = hashed_state + .storages + .entry(hashed_address) + .or_insert_with(|| HashedStorage::new(account.status.was_destroyed())); + + if let Some(account) = account.account { + state_preimages.insert(hashed_address, alloy_rlp::encode(address).into()); + + for (slot, value) in account.storage { + let slot = B256::from(slot); + let hashed_slot = keccak256(slot); + storage.storage.insert(hashed_slot, value); + + state_preimages.insert(hashed_slot, alloy_rlp::encode(slot).into()); + } + } + } + + // Generate an execution witness for the aggregated state of accessed accounts. + // Destruct the cache database to retrieve the state provider. + let state_provider = db.database.into_inner(); + let state = state_provider.witness(Default::default(), hashed_state.clone())?; + + // Write the witness to the output directory. + let response = ExecutionWitness { state, keys: Some(state_preimages) }; + File::create_new(self.output_directory.join(format!( + "{}_{}.json", + block.number, + block.hash() + )))? + .write_all(serde_json::to_string(&response)?.as_bytes())?; + + // The bundle state after re-execution should match the original one. + if bundle_state != output.state { + let filename = format!("{}_{}.bundle_state.diff", block.number, block.hash()); + let path = self.save_diff(filename, &bundle_state, &output.state)?; + warn!(target: "engine::invalid_block_hooks::witness", path = %path.display(), "Bundle state mismatch after re-execution"); + } + + // Calculate the state root and trie updates after re-execution. They should match + // the original ones. + let (state_root, trie_output) = state_provider.state_root_with_updates(hashed_state)?; + if let Some(trie_updates) = trie_updates { + if state_root != trie_updates.1 { + let filename = format!("{}_{}.state_root.diff", block.number, block.hash()); + let path = self.save_diff(filename, &state_root, &trie_updates.1)?; + warn!(target: "engine::invalid_block_hooks::witness", path = %path.display(), "State root mismatch after re-execution"); + } + + if &trie_output != trie_updates.0 { + let filename = format!("{}_{}.trie_updates.diff", block.number, block.hash()); + let path = self.save_diff(filename, &trie_output, trie_updates.0)?; + warn!(target: "engine::invalid_block_hooks::witness", path = %path.display(), "Trie updates mismatch after re-execution"); + } + } + + if let Some(healthy_node_client) = &self.healthy_node_client { + // Compare the witness against the healthy node. + let healthy_node_witness = futures::executor::block_on(async move { + DebugApiClient::debug_execution_witness( + healthy_node_client, + block.number.into(), + true, + ) + .await + })?; + + // Write the healthy node witness to the output directory. + File::create_new(self.output_directory.join(format!( + "{}_{}.healthy_witness.json", + block.number, + block.hash() + )))? + .write_all(serde_json::to_string(&healthy_node_witness)?.as_bytes())?; + + // If the witnesses are different, write the diff to the output directory. + if response != healthy_node_witness { + let filename = format!("{}_{}.healthy_witness.diff", block.number, block.hash()); + let path = self.save_diff(filename, &response, &healthy_node_witness)?; + warn!(target: "engine::invalid_block_hooks::witness", path = %path.display(), "Witness mismatch against healthy node"); + } + } + + Ok(()) + } + + /// Saves the diff of two values into a file with the given name in the output directory. + fn save_diff( + &self, + filename: String, + original: &T, + new: &T, + ) -> eyre::Result { + let path = self.output_directory.join(filename); + let diff = Comparison::new(original, new); + File::create_new(&path)?.write_all(diff.to_string().as_bytes())?; + + Ok(path) + } +} + +impl InvalidBlockHook for InvalidBlockWitnessHook +where + P: StateProviderFactory + ChainSpecProvider + Send + Sync + 'static, + EvmConfig: ConfigureEvm
, +{ + fn on_invalid_block( + &self, + parent_header: &SealedHeader, + block: &SealedBlockWithSenders, + output: &BlockExecutionOutput, + trie_updates: Option<(&TrieUpdates, B256)>, + ) { + if let Err(err) = self.on_invalid_block(parent_header, block, output, trie_updates) { + warn!(target: "engine::invalid_block_hooks::witness", %err, "Failed to invoke hook"); + } + } +} diff --git a/crates/engine/primitives/Cargo.toml b/crates/engine/primitives/Cargo.toml index b44a4a8aa4..437aac6a87 100644 --- a/crates/engine/primitives/Cargo.toml +++ b/crates/engine/primitives/Cargo.toml @@ -13,7 +13,10 @@ workspace = true [dependencies] # reth reth-chainspec.workspace = true +reth-execution-types.workspace = true reth-payload-primitives.workspace = true +reth-primitives.workspace = true +reth-trie.workspace = true # misc -serde.workspace = true \ No newline at end of file +serde.workspace = true diff --git a/crates/engine/primitives/src/invalid_block_hook.rs b/crates/engine/primitives/src/invalid_block_hook.rs new file mode 100644 index 0000000000..9e1737dda0 --- /dev/null +++ b/crates/engine/primitives/src/invalid_block_hook.rs @@ -0,0 +1,36 @@ +use reth_execution_types::BlockExecutionOutput; +use reth_primitives::{Receipt, SealedBlockWithSenders, SealedHeader, B256}; +use reth_trie::updates::TrieUpdates; + +/// An invalid block hook. +pub trait InvalidBlockHook: Send + Sync { + /// Invoked when an invalid block is encountered. + fn on_invalid_block( + &self, + parent_header: &SealedHeader, + block: &SealedBlockWithSenders, + output: &BlockExecutionOutput, + trie_updates: Option<(&TrieUpdates, B256)>, + ); +} + +impl InvalidBlockHook for F +where + F: Fn( + &SealedHeader, + &SealedBlockWithSenders, + &BlockExecutionOutput, + Option<(&TrieUpdates, B256)>, + ) + Send + + Sync, +{ + fn on_invalid_block( + &self, + parent_header: &SealedHeader, + block: &SealedBlockWithSenders, + output: &BlockExecutionOutput, + trie_updates: Option<(&TrieUpdates, B256)>, + ) { + self(parent_header, block, output, trie_updates) + } +} diff --git a/crates/engine/primitives/src/lib.rs b/crates/engine/primitives/src/lib.rs index 4b0db7c0a1..284ed9f0fb 100644 --- a/crates/engine/primitives/src/lib.rs +++ b/crates/engine/primitives/src/lib.rs @@ -8,6 +8,9 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +mod invalid_block_hook; +pub use invalid_block_hook::InvalidBlockHook; + use reth_chainspec::ChainSpec; pub use reth_payload_primitives::{ BuiltPayload, EngineApiMessageVersion, EngineObjectValidationError, PayloadOrAttributes, diff --git a/crates/engine/service/Cargo.toml b/crates/engine/service/Cargo.toml index c5ace879db..63d5f1fb97 100644 --- a/crates/engine/service/Cargo.toml +++ b/crates/engine/service/Cargo.toml @@ -13,10 +13,7 @@ workspace = true [dependencies] # reth reth-beacon-consensus.workspace = true -reth-chainspec.workspace = true reth-consensus.workspace = true -reth-db-api.workspace = true -reth-engine-primitives.workspace = true reth-engine-tree.workspace = true reth-evm.workspace = true reth-network-p2p.workspace = true @@ -26,6 +23,7 @@ reth-provider.workspace = true reth-prune.workspace = true reth-stages-api.workspace = true reth-tasks.workspace = true +reth-node-types.workspace = true # async futures.workspace = true @@ -35,14 +33,12 @@ pin-project.workspace = true thiserror.workspace = true [dev-dependencies] -reth-blockchain-tree.workspace = true -reth-consensus.workspace = true reth-engine-tree = { workspace = true, features = ["test-utils"] } reth-ethereum-engine-primitives.workspace = true reth-evm-ethereum.workspace = true reth-exex-types.workspace = true reth-primitives.workspace = true -reth-prune-types.workspace = true +reth-chainspec.workspace = true tokio = { workspace = true, features = ["sync"] } tokio-stream.workspace = true diff --git a/crates/engine/service/src/service.rs b/crates/engine/service/src/service.rs index fd4cf26a95..1c0273aaa0 100644 --- a/crates/engine/service/src/service.rs +++ b/crates/engine/service/src/service.rs @@ -1,16 +1,13 @@ use futures::{Stream, StreamExt}; use pin_project::pin_project; -use reth_beacon_consensus::{BeaconConsensusEngineEvent, BeaconEngineMessage}; -use reth_chainspec::ChainSpec; +use reth_beacon_consensus::{BeaconConsensusEngineEvent, BeaconEngineMessage, EngineNodeTypes}; use reth_consensus::Consensus; -use reth_db_api::database::Database; -use reth_engine_primitives::EngineTypes; use reth_engine_tree::{ backfill::PipelineSync, download::BasicBlockDownloader, engine::{EngineApiRequest, EngineApiRequestHandler, EngineHandler}, persistence::PersistenceHandle, - tree::{EngineApiTreeHandler, TreeConfig}, + tree::{EngineApiTreeHandler, InvalidBlockHook, TreeConfig}, }; pub use reth_engine_tree::{ chain::{ChainEvent, ChainOrchestrator}, @@ -18,11 +15,12 @@ pub use reth_engine_tree::{ }; use reth_evm::execute::BlockExecutorProvider; use reth_network_p2p::BlockClient; +use reth_node_types::NodeTypesWithEngine; use reth_payload_builder::PayloadBuilderHandle; use reth_payload_validator::ExecutionPayloadValidator; use reth_provider::{providers::BlockchainProvider2, ProviderFactory}; -use reth_prune::Pruner; -use reth_stages_api::Pipeline; +use reth_prune::PrunerWithFactory; +use reth_stages_api::{MetricEventsSender, Pipeline}; use reth_tasks::TaskSpawner; use std::{ marker::PhantomData, @@ -35,55 +33,56 @@ use std::{ type EngineMessageStream = Pin> + Send + Sync>>; /// Alias for chain orchestrator. -type EngineServiceType = ChainOrchestrator< +type EngineServiceType = ChainOrchestrator< EngineHandler< - EngineApiRequestHandler>, - EngineMessageStream, + EngineApiRequestHandler::Engine>>, + EngineMessageStream<::Engine>, BasicBlockDownloader, >, - PipelineSync, + PipelineSync, >; /// The type that drives the chain forward and communicates progress. #[pin_project] #[allow(missing_debug_implementations)] -pub struct EngineService +pub struct EngineService where - DB: Database + 'static, + N: EngineNodeTypes, Client: BlockClient + 'static, E: BlockExecutorProvider + 'static, - T: EngineTypes, { - orchestrator: EngineServiceType, + orchestrator: EngineServiceType, _marker: PhantomData, } -impl EngineService +impl EngineService where - DB: Database + 'static, + N: EngineNodeTypes, Client: BlockClient + 'static, E: BlockExecutorProvider + 'static, - T: EngineTypes + 'static, { /// Constructor for `EngineService`. #[allow(clippy::too_many_arguments)] pub fn new( consensus: Arc, executor_factory: E, - chain_spec: Arc, + chain_spec: Arc, client: Client, - incoming_requests: EngineMessageStream, - pipeline: Pipeline, + incoming_requests: EngineMessageStream, + pipeline: Pipeline, pipeline_task_spawner: Box, - provider: ProviderFactory, - blockchain_db: BlockchainProvider2, - pruner: Pruner>, - payload_builder: PayloadBuilderHandle, + provider: ProviderFactory, + blockchain_db: BlockchainProvider2, + pruner: PrunerWithFactory>, + payload_builder: PayloadBuilderHandle, tree_config: TreeConfig, + invalid_block_hook: Box, + sync_metrics_tx: MetricEventsSender, ) -> Self { let downloader = BasicBlockDownloader::new(client, consensus.clone()); - let persistence_handle = PersistenceHandle::spawn_service(provider, pruner); + let persistence_handle = + PersistenceHandle::spawn_service(provider, pruner, sync_metrics_tx); let payload_validator = ExecutionPayloadValidator::new(chain_spec); let canonical_in_memory_state = blockchain_db.canonical_in_memory_state(); @@ -97,6 +96,7 @@ where payload_builder, canonical_in_memory_state, tree_config, + invalid_block_hook, ); let engine_handler = EngineApiRequestHandler::new(to_tree_tx, from_tree); @@ -111,17 +111,16 @@ where } /// Returns a mutable reference to the orchestrator. - pub fn orchestrator_mut(&mut self) -> &mut EngineServiceType { + pub fn orchestrator_mut(&mut self) -> &mut EngineServiceType { &mut self.orchestrator } } -impl Stream for EngineService +impl Stream for EngineService where - DB: Database + 'static, + N: EngineNodeTypes, Client: BlockClient + 'static, E: BlockExecutorProvider + 'static, - T: EngineTypes + 'static, { type Item = ChainEvent; @@ -141,13 +140,16 @@ mod tests { use super::*; use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::{ChainSpecBuilder, MAINNET}; - use reth_engine_tree::test_utils::TestPipelineBuilder; + use reth_engine_tree::{test_utils::TestPipelineBuilder, tree::NoopInvalidBlockHook}; use reth_ethereum_engine_primitives::EthEngineTypes; use reth_evm_ethereum::execute::EthExecutorProvider; use reth_exex_types::FinishedExExHeight; use reth_network_p2p::test_utils::TestFullBlockClient; use reth_primitives::SealedHeader; - use reth_provider::test_utils::create_test_provider_factory_with_chain_spec; + use reth_provider::{ + test_utils::create_test_provider_factory_with_chain_spec, StaticFileProviderFactory, + }; + use reth_prune::Pruner; use reth_tasks::TokioTaskExecutor; use std::sync::Arc; use tokio::sync::{mpsc::unbounded_channel, watch}; @@ -179,7 +181,7 @@ mod tests { .unwrap(); let (_tx, rx) = watch::channel(FinishedExExHeight::NoExExs); - let pruner = Pruner::<_, ProviderFactory<_>>::new( + let pruner = Pruner::new_with_factory( provider_factory.clone(), vec![], 0, @@ -187,8 +189,10 @@ mod tests { None, rx, 0, + Some(provider_factory.static_file_provider().path().to_path_buf()), ); + let (sync_metrics_tx, _sync_metrics_rx) = unbounded_channel(); let (tx, _rx) = unbounded_channel(); let _eth_service = EngineService::new( consensus, @@ -203,6 +207,8 @@ mod tests { pruner, PayloadBuilderHandle::new(tx), TreeConfig::default(), + Box::new(NoopInvalidBlockHook::default()), + sync_metrics_tx, ); } } diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index edc8fa2c2f..486f103c81 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -17,8 +17,6 @@ reth-blockchain-tree.workspace = true reth-blockchain-tree-api.workspace = true reth-chain-state.workspace = true reth-consensus.workspace = true -reth-db.workspace = true -reth-db-api.workspace = true reth-engine-primitives.workspace = true reth-errors.workspace = true reth-evm.workspace = true @@ -33,7 +31,9 @@ reth-revm.workspace = true reth-rpc-types.workspace = true reth-stages-api.workspace = true reth-tasks.workspace = true +reth-node-types.workspace = true reth-trie.workspace = true +reth-trie-parallel.workspace = true # common futures.workspace = true @@ -48,16 +48,15 @@ reth-metrics = { workspace = true, features = ["common"] } tracing.workspace = true # optional deps for test-utils -reth-chainspec = { workspace = true, optional = true } reth-prune-types = { workspace = true, optional = true } reth-stages = { workspace = true, optional = true } reth-static-file = { workspace = true, optional = true } reth-tracing = { workspace = true, optional = true } +reth-chainspec = { workspace = true, optional = true } [dev-dependencies] # reth reth-db = { workspace = true, features = ["test-utils"] } -reth-chainspec.workspace = true reth-chain-state = { workspace = true, features = ["test-utils"] } reth-ethereum-engine-primitives.workspace = true reth-evm = { workspace = true, features = ["test-utils"] } @@ -69,6 +68,7 @@ reth-rpc-types-compat.workspace = true reth-stages = { workspace = true, features = ["test-utils"] } reth-static-file.workspace = true reth-tracing.workspace = true +reth-chainspec.workspace = true alloy-rlp.workspace = true @@ -79,12 +79,12 @@ rand.workspace = true test-utils = [ "reth-db/test-utils", "reth-chain-state/test-utils", - "reth-chainspec", "reth-network-p2p/test-utils", "reth-prune-types", "reth-stages/test-utils", "reth-static-file", "reth-tracing", + "reth-chainspec" ] bsc = [] \ No newline at end of file diff --git a/crates/engine/tree/src/backfill.rs b/crates/engine/tree/src/backfill.rs index 5c4e56d291..9aee45f83b 100644 --- a/crates/engine/tree/src/backfill.rs +++ b/crates/engine/tree/src/backfill.rs @@ -8,7 +8,8 @@ //! These modes are mutually exclusive and the node can only be in one mode at a time. use futures::FutureExt; -use reth_db_api::database::Database; +use reth_node_types::NodeTypesWithDB; +use reth_provider::providers::ProviderNodeTypes; use reth_stages_api::{ControlFlow, Pipeline, PipelineError, PipelineTarget, PipelineWithResult}; use reth_tasks::TaskSpawner; use std::task::{ready, Context, Poll}; @@ -78,25 +79,19 @@ pub enum BackfillEvent { /// Pipeline sync. #[derive(Debug)] -pub struct PipelineSync -where - DB: Database, -{ +pub struct PipelineSync { /// The type that can spawn the pipeline task. pipeline_task_spawner: Box, /// The current state of the pipeline. /// The pipeline is used for large ranges. - pipeline_state: PipelineState, + pipeline_state: PipelineState, /// Pending target block for the pipeline to sync pending_pipeline_target: Option, } -impl PipelineSync -where - DB: Database + 'static, -{ +impl PipelineSync { /// Create a new instance. - pub fn new(pipeline: Pipeline, pipeline_task_spawner: Box) -> Self { + pub fn new(pipeline: Pipeline, pipeline_task_spawner: Box) -> Self { Self { pipeline_task_spawner, pipeline_state: PipelineState::Idle(Some(pipeline)), @@ -183,10 +178,7 @@ where } } -impl BackfillSync for PipelineSync -where - DB: Database + 'static, -{ +impl BackfillSync for PipelineSync { fn on_action(&mut self, event: BackfillAction) { match event { BackfillAction::Start(target) => self.set_pipeline_sync_target(target), @@ -221,14 +213,14 @@ where /// blockchain tree any messages that would result in database writes, since it would result in a /// deadlock. #[derive(Debug)] -enum PipelineState { +enum PipelineState { /// Pipeline is idle. - Idle(Option>), + Idle(Option>), /// Pipeline is running and waiting for a response - Running(oneshot::Receiver>), + Running(oneshot::Receiver>), } -impl PipelineState { +impl PipelineState { /// Returns `true` if the state matches idle. const fn is_idle(&self) -> bool { matches!(self, Self::Idle(_)) @@ -242,16 +234,16 @@ mod tests { use assert_matches::assert_matches; use futures::poll; use reth_chainspec::{ChainSpecBuilder, MAINNET}; - use reth_db::{mdbx::DatabaseEnv, test_utils::TempDatabase}; use reth_network_p2p::test_utils::TestFullBlockClient; use reth_primitives::{BlockNumber, Header, B256}; + use reth_provider::test_utils::MockNodeTypesWithDB; use reth_stages::ExecOutput; use reth_stages_api::StageCheckpoint; use reth_tasks::TokioTaskExecutor; use std::{collections::VecDeque, future::poll_fn, sync::Arc}; struct TestHarness { - pipeline_sync: PipelineSync>>, + pipeline_sync: PipelineSync, tip: B256, } diff --git a/crates/engine/tree/src/download.rs b/crates/engine/tree/src/download.rs index d85076715f..ca0034bc52 100644 --- a/crates/engine/tree/src/download.rs +++ b/crates/engine/tree/src/download.rs @@ -163,8 +163,9 @@ where /// Sets the metrics for the active downloads fn update_block_download_metrics(&self) { - self.metrics.active_block_downloads.set(self.inflight_full_block_requests.len() as f64); - // TODO: full block range metrics + let blocks = self.inflight_full_block_requests.len() + + self.inflight_block_range_requests.iter().map(|r| r.count() as usize).sum::(); + self.metrics.active_block_downloads.set(blocks as f64); } /// Adds a pending event to the FIFO queue. diff --git a/crates/engine/tree/src/engine.rs b/crates/engine/tree/src/engine.rs index 8fa927d8d0..d2542e1229 100644 --- a/crates/engine/tree/src/engine.rs +++ b/crates/engine/tree/src/engine.rs @@ -12,6 +12,7 @@ use reth_engine_primitives::EngineTypes; use reth_primitives::{SealedBlockWithSenders, B256}; use std::{ collections::HashSet, + fmt::Display, sync::mpsc::Sender, task::{ready, Context, Poll}, }; @@ -228,6 +229,17 @@ pub enum EngineApiRequest { InsertExecutedBlock(ExecutedBlock), } +impl Display for EngineApiRequest { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Beacon(msg) => msg.fmt(f), + Self::InsertExecutedBlock(block) => { + write!(f, "InsertExecutedBlock({:?})", block.block().num_hash()) + } + } + } +} + impl From> for EngineApiRequest { fn from(msg: BeaconEngineMessage) -> Self { Self::Beacon(msg) @@ -276,6 +288,18 @@ pub enum FromEngine { DownloadedBlocks(Vec), } +impl Display for FromEngine { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Event(ev) => write!(f, "Event({ev:?})"), + Self::Request(req) => write!(f, "Request({req})"), + Self::DownloadedBlocks(blocks) => { + write!(f, "DownloadedBlocks({} blocks)", blocks.len()) + } + } + } +} + impl From for FromEngine { fn from(event: FromOrchestrator) -> Self { Self::Event(event) diff --git a/crates/engine/tree/src/persistence.rs b/crates/engine/tree/src/persistence.rs index fa5ca10ac7..b1ade62098 100644 --- a/crates/engine/tree/src/persistence.rs +++ b/crates/engine/tree/src/persistence.rs @@ -1,12 +1,13 @@ -#![allow(dead_code)] - use crate::metrics::PersistenceMetrics; use reth_chain_state::ExecutedBlock; -use reth_db::Database; use reth_errors::ProviderError; -use reth_primitives::B256; -use reth_provider::{writer::UnifiedStorageWriter, ProviderFactory, StaticFileProviderFactory}; -use reth_prune::{Pruner, PrunerError, PrunerOutput}; +use reth_primitives::BlockNumHash; +use reth_provider::{ + providers::ProviderNodeTypes, writer::UnifiedStorageWriter, BlockHashReader, ProviderFactory, + StaticFileProviderFactory, +}; +use reth_prune::{PrunerError, PrunerOutput, PrunerWithFactory}; +use reth_stages_api::{MetricEvent, MetricEventsSender}; use std::{ sync::mpsc::{Receiver, SendError, Sender}, time::Instant, @@ -23,31 +24,34 @@ use tracing::{debug, error}; /// This should be spawned in its own thread with [`std::thread::spawn`], since this performs /// blocking I/O operations in an endless loop. #[derive(Debug)] -pub struct PersistenceService { +pub struct PersistenceService { /// The provider factory to use - provider: ProviderFactory, + provider: ProviderFactory, /// Incoming requests incoming: Receiver, /// The pruner - pruner: Pruner>, + pruner: PrunerWithFactory>, /// metrics metrics: PersistenceMetrics, + /// Sender for sync metrics - we only submit sync metrics for persisted blocks + sync_metrics_tx: MetricEventsSender, } -impl PersistenceService { +impl PersistenceService { /// Create a new persistence service pub fn new( - provider: ProviderFactory, + provider: ProviderFactory, incoming: Receiver, - pruner: Pruner>, + pruner: PrunerWithFactory>, + sync_metrics_tx: MetricEventsSender, ) -> Self { - Self { provider, incoming, pruner, metrics: PersistenceMetrics::default() } + Self { provider, incoming, pruner, metrics: PersistenceMetrics::default(), sync_metrics_tx } } /// Prunes block data before the given block hash according to the configured prune /// configuration. fn prune_before(&mut self, block_num: u64) -> Result { - debug!(target: "tree::persistence", ?block_num, "Running pruner"); + debug!(target: "engine::persistence", ?block_num, "Running pruner"); let start_time = Instant::now(); // TODO: doing this properly depends on pruner segment changes let result = self.pruner.run(block_num); @@ -56,10 +60,7 @@ impl PersistenceService { } } -impl PersistenceService -where - DB: Database, -{ +impl PersistenceService { /// This is the main loop, that will listen to database events and perform the requested /// database actions pub fn run(mut self) -> Result<(), PersistenceError> { @@ -67,12 +68,21 @@ where while let Ok(action) = self.incoming.recv() { match action { PersistenceAction::RemoveBlocksAbove(new_tip_num, sender) => { - self.on_remove_blocks_above(new_tip_num)?; + let result = self.on_remove_blocks_above(new_tip_num)?; + // send new sync metrics based on removed blocks + let _ = + self.sync_metrics_tx.send(MetricEvent::SyncHeight { height: new_tip_num }); // we ignore the error because the caller may or may not care about the result - let _ = sender.send(()); + let _ = sender.send(result); } PersistenceAction::SaveBlocks(blocks, sender) => { let result = self.on_save_blocks(blocks)?; + if let Some(ref num_hash) = result { + // send new sync metrics based on saved blocks + let _ = self + .sync_metrics_tx + .send(MetricEvent::SyncHeight { height: num_hash.number }); + } // we ignore the error because the caller may or may not care about the result let _ = sender.send(result); } @@ -87,23 +97,35 @@ where Ok(()) } - fn on_remove_blocks_above(&self, new_tip_num: u64) -> Result<(), PersistenceError> { + fn on_remove_blocks_above( + &self, + new_tip_num: u64, + ) -> Result, PersistenceError> { + debug!(target: "engine::persistence", ?new_tip_num, "Removing blocks"); let start_time = Instant::now(); let provider_rw = self.provider.provider_rw()?; let sf_provider = self.provider.static_file_provider(); + let new_tip_hash = provider_rw.block_hash(new_tip_num)?; UnifiedStorageWriter::from(&provider_rw, &sf_provider).remove_blocks_above(new_tip_num)?; UnifiedStorageWriter::commit_unwind(provider_rw, sf_provider)?; + debug!(target: "engine::persistence", ?new_tip_num, ?new_tip_hash, "Removed blocks from disk"); self.metrics.remove_blocks_above_duration_seconds.record(start_time.elapsed()); - Ok(()) + Ok(new_tip_hash.map(|hash| BlockNumHash { hash, number: new_tip_num })) } - fn on_save_blocks(&self, blocks: Vec) -> Result, PersistenceError> { + fn on_save_blocks( + &self, + blocks: Vec, + ) -> Result, PersistenceError> { + debug!(target: "engine::persistence", first=?blocks.first().map(|b| b.block.num_hash()), last=?blocks.last().map(|b| b.block.num_hash()), "Saving range of blocks"); let start_time = Instant::now(); - let last_block_hash = blocks.last().map(|block| block.block().hash()); + let last_block_hash_num = blocks + .last() + .map(|block| BlockNumHash { hash: block.block().hash(), number: block.block().number }); - if last_block_hash.is_some() { + if last_block_hash_num.is_some() { let provider_rw = self.provider.provider_rw()?; let static_file_provider = self.provider.static_file_provider(); @@ -111,7 +133,7 @@ where UnifiedStorageWriter::commit(provider_rw, static_file_provider)?; } self.metrics.save_blocks_duration_seconds.record(start_time.elapsed()); - Ok(last_block_hash) + Ok(last_block_hash_num) } } @@ -135,13 +157,13 @@ pub enum PersistenceAction { /// /// First, header, transaction, and receipt-related data should be written to static files. /// Then the execution history-related data will be written to the database. - SaveBlocks(Vec, oneshot::Sender>), + SaveBlocks(Vec, oneshot::Sender>), /// Removes block data above the given block number from the database. /// /// This will first update checkpoints from the database, then remove actual block data from /// static files. - RemoveBlocksAbove(u64, oneshot::Sender<()>), + RemoveBlocksAbove(u64, oneshot::Sender>), /// Prune associated block data before the given block number, according to already-configured /// prune modes. @@ -162,9 +184,10 @@ impl PersistenceHandle { } /// Create a new [`PersistenceHandle`], and spawn the persistence service. - pub fn spawn_service( - provider_factory: ProviderFactory, - pruner: Pruner>, + pub fn spawn_service( + provider_factory: ProviderFactory, + pruner: PrunerWithFactory>, + sync_metrics_tx: MetricEventsSender, ) -> Self { // create the initial channels let (db_service_tx, db_service_rx) = std::sync::mpsc::channel(); @@ -173,7 +196,8 @@ impl PersistenceHandle { let persistence_handle = Self::new(db_service_tx); // spawn the persistence service - let db_service = PersistenceService::new(provider_factory, db_service_rx, pruner); + let db_service = + PersistenceService::new(provider_factory, db_service_rx, pruner, sync_metrics_tx); std::thread::Builder::new() .name("Persistence Service".to_string()) .spawn(|| { @@ -206,7 +230,7 @@ impl PersistenceHandle { pub fn save_blocks( &self, blocks: Vec, - tx: oneshot::Sender>, + tx: oneshot::Sender>, ) -> Result<(), SendError> { self.send_action(PersistenceAction::SaveBlocks(blocks, tx)) } @@ -214,11 +238,12 @@ impl PersistenceHandle { /// Tells the persistence service to remove blocks above a certain block number. The removed /// blocks are returned by the service. /// - /// When the operation completes, `()` is returned in the receiver end of the sender argument. + /// When the operation completes, the new tip hash is returned in the receiver end of the sender + /// argument. pub fn remove_blocks_above( &self, block_num: u64, - tx: oneshot::Sender<()>, + tx: oneshot::Sender>, ) -> Result<(), SendError> { self.send_action(PersistenceAction::RemoveBlocksAbove(block_num, tx)) } @@ -242,8 +267,9 @@ mod tests { use reth_chain_state::test_utils::TestBlockBuilder; use reth_exex_types::FinishedExExHeight; use reth_primitives::B256; - use reth_provider::{test_utils::create_test_provider_factory, ProviderFactory}; + use reth_provider::test_utils::create_test_provider_factory; use reth_prune::Pruner; + use tokio::sync::mpsc::unbounded_channel; fn default_persistence_handle() -> PersistenceHandle { let provider = create_test_provider_factory(); @@ -251,7 +277,7 @@ mod tests { let (_finished_exex_height_tx, finished_exex_height_rx) = tokio::sync::watch::channel(FinishedExExHeight::NoExExs); - let pruner = Pruner::<_, ProviderFactory<_>>::new( + let pruner = Pruner::new_with_factory( provider.clone(), vec![], 5, @@ -259,9 +285,11 @@ mod tests { None, finished_exex_height_rx, 0, + Some(provider.static_file_provider().path().to_path_buf()), ); - PersistenceHandle::spawn_service(provider, pruner) + let (sync_metrics_tx, _sync_metrics_rx) = unbounded_channel(); + PersistenceHandle::spawn_service(provider, pruner, sync_metrics_tx) } #[tokio::test] @@ -293,11 +321,12 @@ mod tests { persistence_handle.save_blocks(blocks, tx).unwrap(); - let actual_hash = tokio::time::timeout(std::time::Duration::from_secs(10), rx) - .await - .expect("test timed out") - .expect("channel closed unexpectedly") - .expect("no hash returned"); + let BlockNumHash { hash: actual_hash, number: _ } = + tokio::time::timeout(std::time::Duration::from_secs(10), rx) + .await + .expect("test timed out") + .expect("channel closed unexpectedly") + .expect("no hash returned"); assert_eq!(block_hash, actual_hash); } @@ -313,8 +342,7 @@ mod tests { let (tx, rx) = oneshot::channel(); persistence_handle.save_blocks(blocks, tx).unwrap(); - - let actual_hash = rx.await.unwrap().unwrap(); + let BlockNumHash { hash: actual_hash, number: _ } = rx.await.unwrap().unwrap(); assert_eq!(last_hash, actual_hash); } @@ -332,7 +360,7 @@ mod tests { persistence_handle.save_blocks(blocks, tx).unwrap(); - let actual_hash = rx.await.unwrap().unwrap(); + let BlockNumHash { hash: actual_hash, number: _ } = rx.await.unwrap().unwrap(); assert_eq!(last_hash, actual_hash); } } diff --git a/crates/engine/tree/src/test_utils.rs b/crates/engine/tree/src/test_utils.rs index 5ace509d11..d76fabf43f 100644 --- a/crates/engine/tree/src/test_utils.rs +++ b/crates/engine/tree/src/test_utils.rs @@ -1,8 +1,10 @@ use reth_chainspec::ChainSpec; -use reth_db::{mdbx::DatabaseEnv, test_utils::TempDatabase}; use reth_network_p2p::test_utils::TestFullBlockClient; use reth_primitives::{BlockBody, SealedHeader, B256}; -use reth_provider::{test_utils::create_test_provider_factory_with_chain_spec, ExecutionOutcome}; +use reth_provider::{ + test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB}, + ExecutionOutcome, +}; use reth_prune_types::PruneModes; use reth_stages::{test_utils::TestStages, ExecOutput, StageError}; use reth_stages_api::Pipeline; @@ -40,12 +42,12 @@ impl TestPipelineBuilder { } /// Builds the pipeline. - pub fn build(self, chain_spec: Arc) -> Pipeline>> { + pub fn build(self, chain_spec: Arc) -> Pipeline { reth_tracing::init_test_tracing(); // Setup pipeline let (tip_tx, _tip_rx) = watch::channel(B256::default()); - let pipeline = Pipeline::builder() + let pipeline = Pipeline::::builder() .add_stages(TestStages::new(self.pipeline_exec_outputs, Default::default())) .with_tip_sender(tip_tx); diff --git a/crates/engine/tree/src/tree/config.rs b/crates/engine/tree/src/tree/config.rs index 600fb62b60..d252b65a8d 100644 --- a/crates/engine/tree/src/tree/config.rs +++ b/crates/engine/tree/src/tree/config.rs @@ -1,7 +1,11 @@ //! Engine tree configuration. -const DEFAULT_PERSISTENCE_THRESHOLD: u64 = 6; -const DEFAULT_MEMORY_BLOCK_BUFFER_TARGET: u64 = 6; +/// Triggers persistence when the number of canonical blocks in memory exceeds this threshold. +pub const DEFAULT_PERSISTENCE_THRESHOLD: u64 = 2; + +/// How close to the canonical head we persist blocks. +pub const DEFAULT_MEMORY_BLOCK_BUFFER_TARGET: u64 = 2; + const DEFAULT_BLOCK_BUFFER_LIMIT: u32 = 256; const DEFAULT_MAX_INVALID_HEADER_CACHE_LENGTH: u32 = 256; @@ -15,6 +19,8 @@ pub struct TreeConfig { persistence_threshold: u64, /// How close to the canonical head we persist blocks. Represents the ideal /// number of most recent blocks to keep in memory for quick access and reorgs. + /// + /// Note: this should be less than or equal to `persistence_threshold`. memory_block_buffer_target: u64, /// Number of pending blocks that cannot be executed due to missing parent and /// are kept in cache. @@ -22,6 +28,9 @@ pub struct TreeConfig { /// Number of invalid headers to keep in cache. max_invalid_header_cache_length: u32, /// Maximum number of blocks to execute sequentially in a batch. + /// + /// This is used as a cutoff to prevent long-running sequential block execution when we receive + /// a batch of downloaded blocks. max_execute_block_batch_size: usize, } diff --git a/crates/engine/tree/src/tree/invalid_block_hook.rs b/crates/engine/tree/src/tree/invalid_block_hook.rs new file mode 100644 index 0000000000..7e401b53c5 --- /dev/null +++ b/crates/engine/tree/src/tree/invalid_block_hook.rs @@ -0,0 +1,43 @@ +use reth_engine_primitives::InvalidBlockHook; +use reth_primitives::{Receipt, SealedBlockWithSenders, SealedHeader, B256}; +use reth_provider::BlockExecutionOutput; +use reth_trie::updates::TrieUpdates; + +/// A no-op [`InvalidBlockHook`] that does nothing. +#[derive(Debug, Default)] +#[non_exhaustive] +pub struct NoopInvalidBlockHook; + +impl InvalidBlockHook for NoopInvalidBlockHook { + fn on_invalid_block( + &self, + _parent_header: &SealedHeader, + _block: &SealedBlockWithSenders, + _output: &BlockExecutionOutput, + _trie_updates: Option<(&TrieUpdates, B256)>, + ) { + } +} + +/// Multiple [`InvalidBlockHook`]s that are executed in order. +pub struct InvalidBlockHooks(pub Vec>); + +impl std::fmt::Debug for InvalidBlockHooks { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("InvalidBlockHooks").field("len", &self.0.len()).finish() + } +} + +impl InvalidBlockHook for InvalidBlockHooks { + fn on_invalid_block( + &self, + parent_header: &SealedHeader, + block: &SealedBlockWithSenders, + output: &BlockExecutionOutput, + trie_updates: Option<(&TrieUpdates, B256)>, + ) { + for hook in &self.0 { + hook.on_invalid_block(parent_header, block, output, trie_updates); + } + } +} diff --git a/crates/engine/tree/src/tree/metrics.rs b/crates/engine/tree/src/tree/metrics.rs index 1a1c2edf29..2df1fbdac7 100644 --- a/crates/engine/tree/src/tree/metrics.rs +++ b/crates/engine/tree/src/tree/metrics.rs @@ -1,12 +1,24 @@ +use reth_evm::metrics::ExecutorMetrics; use reth_metrics::{ metrics::{Counter, Gauge, Histogram}, Metrics, }; +/// Metrics for the `EngineApi`. +#[derive(Debug, Default)] +pub(crate) struct EngineApiMetrics { + /// Engine API-specific metrics. + pub(crate) engine: EngineMetrics, + /// Block executor metrics. + pub(crate) executor: ExecutorMetrics, + /// Metrics for block validation + pub(crate) block_validation: BlockValidationMetrics, +} + /// Metrics for the `EngineApi`. #[derive(Metrics)] #[metrics(scope = "consensus.engine.beacon")] -pub(crate) struct EngineApiMetrics { +pub(crate) struct EngineMetrics { /// How many executed blocks are currently stored. pub(crate) executed_blocks: Gauge, /// The number of times the pipeline was run. @@ -19,3 +31,21 @@ pub(crate) struct EngineApiMetrics { pub(crate) persistence_duration: Histogram, // TODO add latency metrics } + +/// Metrics for non-execution related block validation. +#[derive(Metrics)] +#[metrics(scope = "sync.block_validation")] +pub(crate) struct BlockValidationMetrics { + /// Histogram of state root duration + pub(crate) state_root_histogram: Histogram, + /// Latest state root duration + pub(crate) state_root_duration: Gauge, +} + +impl BlockValidationMetrics { + /// Records a new state root time, updating both the histogram and state root gauge + pub(crate) fn record_state_root(&self, elapsed_as_secs: f64) { + self.state_root_duration.set(elapsed_as_secs); + self.state_root_histogram.record(elapsed_as_secs); + } +} diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 2db8ff8cbc..7cbc897467 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -27,8 +27,9 @@ use reth_primitives::{ SealedHeader, B256, U256, }; use reth_provider::{ - BlockReader, ExecutionOutcome, ProviderError, StateProviderBox, StateProviderFactory, - StateRootProvider, + providers::ConsistentDbView, BlockReader, DatabaseProviderFactory, ExecutionOutcome, + ProviderError, StateProviderBox, StateProviderFactory, StateReader, StateRootProvider, + TransactionVariant, }; use reth_revm::database::StateProviderDatabase; use reth_rpc_types::{ @@ -39,9 +40,12 @@ use reth_rpc_types::{ ExecutionPayload, }; use reth_stages_api::ControlFlow; -use reth_trie::HashedPostState; +use reth_trie::{updates::TrieUpdates, HashedPostState, TrieInput}; +use reth_trie_parallel::parallel_root::ParallelStateRoot; use std::{ - collections::{BTreeMap, HashMap, HashSet}, + cmp::Ordering, + collections::{btree_map, hash_map, BTreeMap, HashMap, HashSet, VecDeque}, + fmt::Debug, ops::Bound, sync::{ mpsc::{Receiver, RecvError, RecvTimeoutError, Sender}, @@ -56,10 +60,13 @@ use tokio::sync::{ }; use tracing::*; -mod config; +pub mod config; +mod invalid_block_hook; mod metrics; use crate::{engine::EngineApiRequest, tree::metrics::EngineApiMetrics}; pub use config::TreeConfig; +pub use invalid_block_hook::{InvalidBlockHooks, NoopInvalidBlockHook}; +pub use reth_engine_primitives::InvalidBlockHook; /// Keeps track of the state of the tree. /// @@ -81,6 +88,10 @@ pub struct TreeState { blocks_by_number: BTreeMap>, /// Map of any parent block hash to its children. parent_to_child: HashMap>, + /// Map of hash to trie updates for canonical blocks that are persisted but not finalized. + /// + /// Contains the block number for easy removal. + persisted_trie_updates: HashMap)>, /// Currently tracked canonical head of the chain. current_canonical_head: BlockNumHash, } @@ -93,6 +104,7 @@ impl TreeState { blocks_by_number: BTreeMap::new(), current_canonical_head, parent_to_child: HashMap::new(), + persisted_trie_updates: HashMap::new(), } } @@ -101,6 +113,11 @@ impl TreeState { self.blocks_by_hash.len() } + /// Returns the [`ExecutedBlock`] by hash. + fn executed_block_by_hash(&self, hash: B256) -> Option<&ExecutedBlock> { + self.blocks_by_hash.get(&hash) + } + /// Returns the block by hash. fn block_by_hash(&self, hash: B256) -> Option> { self.blocks_by_hash.get(&hash).map(|b| b.block.clone()) @@ -149,63 +166,189 @@ impl TreeState { } } - /// Determines if the given block is part of a fork by checking that these - /// conditions are true: - /// * walking back from the target hash to verify that the target hash is not part of an - /// extension of the canonical chain. - /// * walking back from the current head to verify that the target hash is not already part of - /// the canonical chain. - fn is_fork(&self, target_hash: B256) -> bool { - // verify that the given hash is not part of an extension of the canon chain. - let mut current_hash = target_hash; - while let Some(current_block) = self.block_by_hash(current_hash) { - if current_block.hash() == self.canonical_block_hash() { - return false + /// Remove single executed block by its hash. + /// + /// ## Returns + /// + /// The removed block and the block hashes of its children. + fn remove_by_hash(&mut self, hash: B256) -> Option<(ExecutedBlock, HashSet)> { + let executed = self.blocks_by_hash.remove(&hash)?; + + // Remove this block from collection of children of its parent block. + let parent_entry = self.parent_to_child.entry(executed.block.parent_hash); + if let hash_map::Entry::Occupied(mut entry) = parent_entry { + entry.get_mut().remove(&hash); + + if entry.get().is_empty() { + entry.remove(); } - current_hash = current_block.header.parent_hash; } - // verify that the given hash is not already part of the canon chain - current_hash = self.canonical_block_hash(); - while let Some(current_block) = self.block_by_hash(current_hash) { - if current_block.hash() == target_hash { - return false + // Remove point to children of this block. + let children = self.parent_to_child.remove(&hash).unwrap_or_default(); + + // Remove this block from `blocks_by_number`. + let block_number_entry = self.blocks_by_number.entry(executed.block.number); + if let btree_map::Entry::Occupied(mut entry) = block_number_entry { + // We have to find the index of the block since it exists in a vec + if let Some(index) = entry.get().iter().position(|b| b.block.hash() == hash) { + entry.get_mut().swap_remove(index); + + // If there are no blocks left then remove the entry for this block + if entry.get().is_empty() { + entry.remove(); + } } - current_hash = current_block.header.parent_hash; } - true + + Some((executed, children)) } - /// Remove all blocks up to __and including__ the given block number. - pub(crate) fn remove_before(&mut self, upper_bound: BlockNumber) { - let mut numbers_to_remove = Vec::new(); - for (&number, _) in - self.blocks_by_number.range((Bound::Unbounded, Bound::Included(upper_bound))) - { - numbers_to_remove.push(number); + /// Returns whether or not the hash is part of the canonical chain. + pub(crate) fn is_canonical(&self, hash: B256) -> bool { + let mut current_block = self.current_canonical_head.hash; + if current_block == hash { + return true } - for number in numbers_to_remove { - if let Some(blocks) = self.blocks_by_number.remove(&number) { - for block in blocks { - let block_hash = block.block.hash(); - self.blocks_by_hash.remove(&block_hash); + while let Some(executed) = self.blocks_by_hash.get(¤t_block) { + current_block = executed.block.parent_hash; + if current_block == hash { + return true + } + } - if let Some(parent_children) = - self.parent_to_child.get_mut(&block.block.parent_hash) - { - parent_children.remove(&block_hash); - if parent_children.is_empty() { - self.parent_to_child.remove(&block.block.parent_hash); - } - } + false + } - self.parent_to_child.remove(&block_hash); + /// Removes canonical blocks below the upper bound, only if the last persisted hash is + /// part of the canonical chain. + pub(crate) fn remove_canonical_until( + &mut self, + upper_bound: BlockNumber, + last_persisted_hash: B256, + ) { + debug!(target: "engine::tree", ?upper_bound, ?last_persisted_hash, "Removing canonical blocks from the tree"); + + // If the last persisted hash is not canonical, then we don't want to remove any canonical + // blocks yet. + if !self.is_canonical(last_persisted_hash) { + return + } + + // First, let's walk back the canonical chain and remove canonical blocks lower than the + // upper bound + let mut current_block = self.current_canonical_head.hash; + while let Some(executed) = self.blocks_by_hash.get(¤t_block) { + current_block = executed.block.parent_hash; + if executed.block.number <= upper_bound { + debug!(target: "engine::tree", num_hash=?executed.block.num_hash(), "Attempting to remove block walking back from the head"); + if let Some((removed, _)) = self.remove_by_hash(executed.block.hash()) { + debug!(target: "engine::tree", num_hash=?removed.block.num_hash(), "Removed block walking back from the head"); + // finally, move the trie updates + self.persisted_trie_updates + .insert(removed.block.hash(), (removed.block.number, removed.trie)); } } } } + /// Removes all blocks that are below the finalized block, as well as removing non-canonical + /// sidechains that fork from below the finalized block. + pub(crate) fn prune_finalized_sidechains(&mut self, finalized_num_hash: BlockNumHash) { + let BlockNumHash { number: finalized_num, hash: finalized_hash } = finalized_num_hash; + + // We remove disconnected sidechains in three steps: + // * first, remove everything with a block number __below__ the finalized block. + // * next, we populate a vec with parents __at__ the finalized block. + // * finally, we iterate through the vec, removing children until the vec is empty + // (BFS). + + // We _exclude_ the finalized block because we will be dealing with the blocks __at__ + // the finalized block later. + let blocks_to_remove = self + .blocks_by_number + .range((Bound::Unbounded, Bound::Excluded(finalized_num))) + .flat_map(|(_, blocks)| blocks.iter().map(|b| b.block.hash())) + .collect::>(); + for hash in blocks_to_remove { + if let Some((removed, _)) = self.remove_by_hash(hash) { + debug!(target: "engine::tree", num_hash=?removed.block.num_hash(), "Removed finalized sidechain block"); + } + } + + // remove trie updates that are below the finalized block + self.persisted_trie_updates.retain(|_, (block_num, _)| *block_num < finalized_num); + + // The only block that should remain at the `finalized` number now, is the finalized + // block, if it exists. + // + // For all other blocks, we first put their children into this vec. + // Then, we will iterate over them, removing them, adding their children, etc etc, + // until the vec is empty. + let mut blocks_to_remove = self.blocks_by_number.remove(&finalized_num).unwrap_or_default(); + + // re-insert the finalized hash if we removed it + if let Some(position) = + blocks_to_remove.iter().position(|b| b.block.hash() == finalized_hash) + { + let finalized_block = blocks_to_remove.swap_remove(position); + self.blocks_by_number.insert(finalized_num, vec![finalized_block]); + } + + let mut blocks_to_remove = + blocks_to_remove.into_iter().map(|e| e.block.hash()).collect::>(); + while let Some(block) = blocks_to_remove.pop_front() { + if let Some((removed, children)) = self.remove_by_hash(block) { + debug!(target: "engine::tree", num_hash=?removed.block.num_hash(), "Removed finalized sidechain child block"); + blocks_to_remove.extend(children); + } + } + } + + /// Remove all blocks up to __and including__ the given block number. + /// + /// If a finalized hash is provided, the only non-canonical blocks which will be removed are + /// those which have a fork point at or below the finalized hash. + /// + /// Canonical blocks below the upper bound will still be removed. + /// + /// NOTE: if the finalized block is greater than the upper bound, the only blocks that will be + /// removed are canonical blocks and sidechains that fork below the `upper_bound`. This is the + /// same behavior as if the `finalized_num` were `Some(upper_bound)`. + pub(crate) fn remove_until( + &mut self, + upper_bound: BlockNumber, + last_persisted_hash: B256, + finalized_num_hash: Option, + ) { + debug!(target: "engine::tree", ?upper_bound, ?finalized_num_hash, "Removing blocks from the tree"); + + // If the finalized num is ahead of the upper bound, and exists, we need to instead ensure + // that the only blocks removed, are canonical blocks less than the upper bound + // finalized_num.take_if(|finalized| *finalized > upper_bound); + let finalized_num_hash = finalized_num_hash.map(|mut finalized| { + finalized.number = finalized.number.min(upper_bound); + debug!(target: "engine::tree", ?finalized, "Adjusted upper bound"); + finalized + }); + + // We want to do two things: + // * remove canonical blocks that are persisted + // * remove forks whose root are below the finalized block + // We can do this in 2 steps: + // * remove all canonical blocks below the upper bound + // * fetch the number of the finalized hash, removing any sidechains that are __below__ the + // finalized block + self.remove_canonical_until(upper_bound, last_persisted_hash); + + // Now, we have removed canonical blocks (assuming the upper bound is above the finalized + // block) and only have sidechains below the finalized block. + if let Some(finalized_num_hash) = finalized_num_hash { + self.prune_finalized_sidechains(finalized_num_hash); + } + } + /// Updates the canonical head to the given block. fn set_canonical_head(&mut self, new_head: BlockNumHash) { self.current_canonical_head = new_head; @@ -225,75 +368,6 @@ impl TreeState { const fn canonical_block_number(&self) -> BlockNumber { self.canonical_head().number } - - /// Returns the new chain for the given head. - /// - /// This also handles reorgs. - /// - /// Note: This does not update the tracked state and instead returns the new chain based on the - /// given head. - fn on_new_head(&self, new_head: B256) -> Option { - let new_head_block = self.blocks_by_hash.get(&new_head)?; - let new_head_number = new_head_block.block.number; - let current_canonical_number = self.current_canonical_head.number; - - let mut new_chain = vec![new_head_block.clone()]; - let mut current_hash = new_head_block.block.parent_hash; - let mut current_number = new_head_number - 1; - - // Walk back the new chain until we reach a block we know about - while current_number > current_canonical_number { - if let Some(block) = self.blocks_by_hash.get(¤t_hash) { - new_chain.push(block.clone()); - current_hash = block.block.parent_hash; - current_number -= 1; - } else { - return None; // We don't have the full chain - } - } - - if current_hash == self.current_canonical_head.hash { - new_chain.reverse(); - - // Simple extension of the current chain - return Some(NewCanonicalChain::Commit { new: new_chain }); - } - - // We have a reorg. Walk back both chains to find the fork point. - let mut old_chain = Vec::new(); - let mut old_hash = self.current_canonical_head.hash; - - while old_hash != current_hash { - if let Some(block) = self.blocks_by_hash.get(&old_hash) { - old_chain.push(block.clone()); - old_hash = block.block.parent_hash; - } else { - // This shouldn't happen as we're walking back the canonical chain - warn!(target: "consensus::engine", invalid_hash=?old_hash, "Canonical block not found in TreeState"); - return None; - } - - if old_hash == current_hash { - // We've found the fork point - break; - } - - if let Some(block) = self.blocks_by_hash.get(¤t_hash) { - if self.is_fork(block.block.hash()) { - new_chain.push(block.clone()); - current_hash = block.block.parent_hash; - } - } else { - // This shouldn't happen as we've already walked this path - warn!(target: "consensus::engine", invalid_hash=?current_hash, "New chain block not found in TreeState"); - return None; - } - } - new_chain.reverse(); - old_chain.reverse(); - - Some(NewCanonicalChain::Reorg { new: new_chain, old: old_chain }) - } } /// Tracks the state of the engine api internals. @@ -371,14 +445,18 @@ impl TreeEvent { #[derive(Debug)] pub enum TreeAction { /// Make target canonical. - MakeCanonical(B256), + MakeCanonical { + /// The sync target head hash + sync_target_head: B256, + /// The sync target finalized hash + sync_target_finalized: Option, + }, } /// The engine API tree handler implementation. /// /// This type is responsible for processing engine API requests, maintaining the canonical state and /// emitting events. -#[derive(Debug)] pub struct EngineApiTreeHandler { provider: P, executor_provider: E, @@ -415,11 +493,35 @@ pub struct EngineApiTreeHandler { config: TreeConfig, /// Metrics for the engine api. metrics: EngineApiMetrics, + /// An invalid block hook. + invalid_block_hook: Box, +} + +impl std::fmt::Debug for EngineApiTreeHandler { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("EngineApiTreeHandler") + .field("provider", &self.provider) + .field("executor_provider", &self.executor_provider) + .field("consensus", &self.consensus) + .field("payload_validator", &self.payload_validator) + .field("state", &self.state) + .field("incoming_tx", &self.incoming_tx) + .field("persistence", &self.persistence) + .field("persistence_state", &self.persistence_state) + .field("backfill_sync_state", &self.backfill_sync_state) + .field("canonical_in_memory_state", &self.canonical_in_memory_state) + .field("payload_builder", &self.payload_builder) + .field("config", &self.config) + .field("metrics", &self.metrics) + .field("invalid_block_hook", &format!("{:p}", self.invalid_block_hook)) + .finish() + } } impl EngineApiTreeHandler where - P: BlockReader + StateProviderFactory + Clone + 'static, + P: DatabaseProviderFactory + BlockReader + StateProviderFactory + StateReader + Clone + 'static, +

::Provider: BlockReader, E: BlockExecutorProvider, T: EngineTypes, { @@ -455,9 +557,15 @@ where config, metrics: Default::default(), incoming_tx, + invalid_block_hook: Box::new(NoopInvalidBlockHook), } } + /// Sets the invalid block hook. + fn set_invalid_block_hook(&mut self, invalid_block_hook: Box) { + self.invalid_block_hook = invalid_block_hook; + } + /// Creates a new [`EngineApiTreeHandler`] instance and spawns it in its /// own thread. /// @@ -473,6 +581,7 @@ where payload_builder: PayloadBuilderHandle, canonical_in_memory_state: CanonicalInMemoryState, config: TreeConfig, + invalid_block_hook: Box, ) -> (Sender>>, UnboundedReceiver) { let best_block_number = provider.best_block_number().unwrap_or(0); let header = provider.sealed_header(best_block_number).ok().flatten().unwrap_or_default(); @@ -481,6 +590,7 @@ where last_persisted_block_hash: header.hash(), last_persisted_block_number: best_block_number, rx: None, + remove_above_state: VecDeque::new(), }; let (tx, outgoing) = tokio::sync::mpsc::unbounded_channel(); @@ -490,7 +600,7 @@ where header.num_hash(), ); - let task = Self::new( + let mut task = Self::new( provider, executor_provider, consensus, @@ -503,6 +613,7 @@ where payload_builder, config, ); + task.set_invalid_block_hook(invalid_block_hook); let incoming = task.incoming_tx.clone(); std::thread::Builder::new().name("Tree Task".to_string()).spawn(|| task.run()).unwrap(); (incoming, outgoing) @@ -520,22 +631,23 @@ where loop { match self.try_recv_engine_message() { Ok(Some(msg)) => { + debug!(target: "engine::tree", %msg, "received new engine message"); if let Err(fatal) = self.on_engine_message(msg) { - error!(target: "engine", %fatal, "insert block fatal error"); + error!(target: "engine::tree", %fatal, "insert block fatal error"); return } } Ok(None) => { - debug!(target: "engine", "received no engine message for some time, while waiting for persistence task to complete"); + debug!(target: "engine::tree", "received no engine message for some time, while waiting for persistence task to complete"); } Err(_err) => { - error!(target: "engine", "Engine channel disconnected"); + error!(target: "engine::tree", "Engine channel disconnected"); return } } if let Err(err) = self.advance_persistence() { - error!(target: "engine", %err, "Advancing persistence failed"); + error!(target: "engine::tree", %err, "Advancing persistence failed"); return } } @@ -555,12 +667,12 @@ where return Ok(None) } - trace!(target: "engine", block_count = %blocks.len(), "received downloaded blocks"); + trace!(target: "engine::tree", block_count = %blocks.len(), "received downloaded blocks"); let batch = self.config.max_execute_block_batch_size().min(blocks.len()); for block in blocks.drain(..batch) { if let Some(event) = self.on_downloaded_block(block)? { let needs_backfill = event.is_backfill_action(); - self.on_tree_event(event); + self.on_tree_event(event)?; if needs_backfill { // can exit early if backfill is needed return Ok(None) @@ -588,14 +700,14 @@ where /// /// This returns a [`PayloadStatus`] that represents the outcome of a processed new payload and /// returns an error if an internal error occurred. - #[instrument(level = "trace", skip_all, fields(block_hash = %payload.block_hash(), block_num = %payload.block_number(),), target = "engine")] + #[instrument(level = "trace", skip_all, fields(block_hash = %payload.block_hash(), block_num = %payload.block_number(),), target = "engine::tree")] fn on_new_payload( &mut self, payload: ExecutionPayload, cancun_fields: Option, ) -> Result, InsertBlockFatalError> { - trace!(target: "engine", "invoked new payload"); - self.metrics.new_payload_messages.increment(1); + trace!(target: "engine::tree", "invoked new payload"); + self.metrics.engine.new_payload_messages.increment(1); // Ensures that the given payload does not violate any consensus rules that concern the // block's layout, like: @@ -694,14 +806,144 @@ where let mut outcome = TreeOutcome::new(status); if outcome.outcome.is_valid() && self.is_sync_target_head(block_hash) { + // NOTE: if we are in this branch, `is_sync_target_head` has returned true, + // meaning a sync target state exists, so we can safely unwrap + let sync_target = self + .state + .forkchoice_state_tracker + .sync_target_state() + .expect("sync target must exist"); + + // if the hash is zero then we should act like there is no finalized hash + let sync_target_finalized = (!sync_target.finalized_block_hash.is_zero()) + .then_some(sync_target.finalized_block_hash); + // if the block is valid and it is the sync target head, make it canonical - outcome = - outcome.with_event(TreeEvent::TreeAction(TreeAction::MakeCanonical(block_hash))); + outcome = outcome.with_event(TreeEvent::TreeAction(TreeAction::MakeCanonical { + sync_target_head: block_hash, + sync_target_finalized, + })); } Ok(outcome) } + /// Returns the new chain for the given head. + /// + /// This also handles reorgs. + /// + /// Note: This does not update the tracked state and instead returns the new chain based on the + /// given head. + fn on_new_head( + &self, + new_head: B256, + finalized_block: Option, + ) -> ProviderResult> { + // get the executed new head block + let Some(new_head_block) = self.state.tree_state.blocks_by_hash.get(&new_head) else { + return Ok(None) + }; + + let new_head_number = new_head_block.block.number; + let current_canonical_number = self.state.tree_state.current_canonical_head.number; + + let mut new_chain = vec![new_head_block.clone()]; + let mut current_hash = new_head_block.block.parent_hash; + let mut current_number = new_head_number - 1; + + // Walk back the new chain until we reach a block we know about + // + // This is only done for in-memory blocks, because we should not have persisted any blocks + // that are _above_ the current canonical head. + while current_number > current_canonical_number { + if let Some(block) = self.executed_block_by_hash(current_hash)? { + new_chain.push(block.clone()); + current_hash = block.block.parent_hash; + current_number -= 1; + } else { + warn!(target: "engine::tree", current_hash=?current_hash, "Sidechain block not found in TreeState"); + // This should never happen as we're walking back a chain that should connect to + // the canonical chain + return Ok(None); + } + } + + // If we have reached the current canonical head by walking back from the target, then we + // know this represents an extension of the canonical chain. + if current_hash == self.state.tree_state.current_canonical_head.hash { + new_chain.reverse(); + + // Simple extension of the current chain + return Ok(Some(NewCanonicalChain::Commit { new: new_chain })); + } + + // We have a reorg. Walk back both chains to find the fork point. + let mut old_chain = Vec::new(); + let mut old_hash = self.state.tree_state.current_canonical_head.hash; + + while old_hash != current_hash { + if let Some(block) = self.executed_block_by_hash(old_hash)? { + old_chain.push(block.clone()); + old_hash = block.block.header.parent_hash; + } else { + // This shouldn't happen as we're walking back the canonical chain + warn!(target: "engine::tree", current_hash=?old_hash, "Canonical block not found in TreeState"); + return Ok(None); + } + + if old_hash == current_hash { + // We've found the fork point + break; + } + + if let Some(block) = self.executed_block_by_hash(current_hash)? { + if self.is_fork(block.block.hash(), finalized_block)? { + new_chain.push(block.clone()); + current_hash = block.block.parent_hash; + } + } else { + // This shouldn't happen as we've already walked this path + warn!(target: "engine::tree", invalid_hash=?current_hash, "New chain block not found in TreeState"); + return Ok(None); + } + } + new_chain.reverse(); + old_chain.reverse(); + + Ok(Some(NewCanonicalChain::Reorg { new: new_chain, old: old_chain })) + } + + /// Determines if the given block is part of a fork by checking that these + /// conditions are true: + /// * walking back from the target hash to verify that the target hash is not part of an + /// extension of the canonical chain. + /// * walking back from the current head to verify that the target hash is not already part of + /// the canonical chain. + fn is_fork(&self, target_hash: B256, finalized_hash: Option) -> ProviderResult { + // verify that the given hash is not part of an extension of the canon chain. + let mut current_hash = target_hash; + while let Some(current_block) = self.sealed_header_by_hash(current_hash)? { + if current_block.hash() == self.state.tree_state.canonical_block_hash() { + return Ok(false) + } + current_hash = current_block.parent_hash; + } + + // verify that the given hash is not already part of the canon chain + current_hash = self.state.tree_state.canonical_block_hash(); + while let Some(current_block) = self.sealed_header_by_hash(current_hash)? { + if Some(current_hash) == finalized_hash { + return Ok(true) + } + + if current_block.hash() == target_hash { + return Ok(false) + } + current_hash = current_block.parent_hash; + } + Ok(true) + } + /// Invoked when we receive a new forkchoice update message. Calls into the blockchain tree /// to resolve chain forks and ensure that the Execution Layer is working with the latest valid /// chain. @@ -710,14 +952,14 @@ where /// `engine_forkchoiceUpdated`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#specification-1). /// /// Returns an error if an internal error occurred like a database error. - #[instrument(level = "trace", skip_all, fields(head = % state.head_block_hash, safe = % state.safe_block_hash,finalized = % state.finalized_block_hash), target = "engine")] + #[instrument(level = "trace", skip_all, fields(head = % state.head_block_hash, safe = % state.safe_block_hash,finalized = % state.finalized_block_hash), target = "engine::tree")] fn on_forkchoice_updated( &mut self, state: ForkchoiceState, attrs: Option, ) -> ProviderResult> { - trace!(target: "engine", ?attrs, "invoked forkchoice update"); - self.metrics.forkchoice_updated_messages.increment(1); + trace!(target: "engine::tree", ?attrs, "invoked forkchoice update"); + self.metrics.engine.forkchoice_updated_messages.increment(1); self.canonical_in_memory_state.on_forkchoice_update_received(); if let Some(on_updated) = self.pre_validate_forkchoice_update(state)? { @@ -747,7 +989,7 @@ where // 1. ensure we have a new head block if self.state.tree_state.canonical_block_hash() == state.head_block_hash { - trace!(target: "engine", "fcu head hash is already canonical"); + trace!(target: "engine::tree", "fcu head hash is already canonical"); // update the safe and finalized blocks and ensure their values are valid if let Err(outcome) = self.ensure_consistent_forkchoice_state(state) { @@ -772,8 +1014,11 @@ where return Ok(valid_outcome(state.head_block_hash)) } + let finalized_block_opt = + (!state.finalized_block_hash.is_zero()).then_some(state.finalized_block_hash); + // 2. ensure we can apply a new chain update for the head block - if let Some(chain_update) = self.state.tree_state.on_new_head(state.head_block_hash) { + if let Some(chain_update) = self.on_new_head(state.head_block_hash, finalized_block_opt)? { let tip = chain_update.tip().header.clone(); self.on_canonical_chain_update(chain_update); @@ -793,7 +1038,7 @@ where // 3. check if the head is already part of the canonical chain if let Ok(Some(canonical_header)) = self.find_canonical_header(state.head_block_hash) { - debug!(target: "engine", head = canonical_header.number, "fcu head block is already canonical"); + debug!(target: "engine::tree", head = canonical_header.number, "fcu head block is already canonical"); // TODO(mattsse): for optimism we'd need to trigger a build job as well because on // Optimism, the proposers are allowed to reorg their own chain at will. @@ -821,14 +1066,14 @@ where !state.safe_block_hash.is_zero() && self.find_canonical_header(state.safe_block_hash).ok().flatten().is_none() { - debug!(target: "engine", "missing safe block on initial FCU, downloading safe block"); + debug!(target: "engine::tree", "missing safe block on initial FCU, downloading safe block"); state.safe_block_hash } else { state.head_block_hash }; let target = self.lowest_buffered_ancestor_or(target); - trace!(target: "engine", %target, "downloading missing block"); + trace!(target: "engine::tree", %target, "downloading missing block"); Ok(TreeOutcome::new(OnForkChoiceUpdated::valid(PayloadStatus::from_status( PayloadStatusEnum::Syncing, @@ -865,15 +1110,25 @@ where /// /// If we're currently awaiting a response this will try to receive the response (non-blocking) /// or send a new persistence action if necessary. - fn advance_persistence(&mut self) -> Result<(), TryRecvError> { - if self.should_persist() && !self.persistence_state.in_progress() { - let blocks_to_persist = self.get_canonical_blocks_to_persist(); - if blocks_to_persist.is_empty() { - debug!(target: "engine", "Returned empty set of blocks to persist"); - } else { - let (tx, rx) = oneshot::channel(); - let _ = self.persistence.save_blocks(blocks_to_persist, tx); - self.persistence_state.start(rx); + fn advance_persistence(&mut self) -> Result<(), AdvancePersistenceError> { + if !self.persistence_state.in_progress() { + if let Some(new_tip_num) = self.persistence_state.remove_above_state.pop_front() { + debug!(target: "engine::tree", ?new_tip_num, remove_state=?self.persistence_state.remove_above_state, last_persisted_block_number=?self.persistence_state.last_persisted_block_number, "Removing blocks using persistence task"); + if new_tip_num < self.persistence_state.last_persisted_block_number { + debug!(target: "engine::tree", ?new_tip_num, "Starting remove blocks job"); + let (tx, rx) = oneshot::channel(); + let _ = self.persistence.remove_blocks_above(new_tip_num, tx); + self.persistence_state.start(rx); + } + } else if self.should_persist() { + let blocks_to_persist = self.get_canonical_blocks_to_persist(); + if blocks_to_persist.is_empty() { + debug!(target: "engine::tree", "Returned empty set of blocks to persist"); + } else { + let (tx, rx) = oneshot::channel(); + let _ = self.persistence.save_blocks(blocks_to_persist, tx); + self.persistence_state.start(rx); + } } } @@ -885,24 +1140,25 @@ where .expect("if a persistence task is in progress Receiver must be Some"); // Check if persistence has complete match rx.try_recv() { - Ok(last_persisted_block_hash) => { - self.metrics.persistence_duration.record(start_time.elapsed()); - let Some(last_persisted_block_hash) = last_persisted_block_hash else { + Ok(last_persisted_hash_num) => { + self.metrics.engine.persistence_duration.record(start_time.elapsed()); + let Some(BlockNumHash { + hash: last_persisted_block_hash, + number: last_persisted_block_number, + }) = last_persisted_hash_num + else { // if this happened, then we persisted no blocks because we sent an // empty vec of blocks - warn!(target: "engine", "Persistence task completed but did not persist any blocks"); + warn!(target: "engine::tree", "Persistence task completed but did not persist any blocks"); return Ok(()) }; - if let Some(block) = - self.state.tree_state.block_by_hash(last_persisted_block_hash) - { - self.persistence_state.finish(last_persisted_block_hash, block.number); - self.on_new_persisted_block(); - } else { - error!("could not find persisted block with hash {last_persisted_block_hash} in memory"); - } + + trace!(target: "engine::tree", ?last_persisted_block_hash, ?last_persisted_block_number, "Finished persisting, calling finish"); + self.persistence_state + .finish(last_persisted_block_hash, last_persisted_block_number); + self.on_new_persisted_block()?; } - Err(TryRecvError::Closed) => return Err(TryRecvError::Closed), + Err(TryRecvError::Closed) => return Err(TryRecvError::Closed.into()), Err(TryRecvError::Empty) => self.persistence_state.rx = Some((rx, start_time)), } } @@ -917,7 +1173,7 @@ where match msg { FromEngine::Event(event) => match event { FromOrchestrator::BackfillSyncStarted => { - debug!(target: "consensus::engine", "received backfill sync started event"); + debug!(target: "engine::tree", "received backfill sync started event"); self.backfill_sync_state = BackfillSyncState::Active; } FromOrchestrator::BackfillSyncFinished(ctrl) => { @@ -947,7 +1203,7 @@ where )); // handle the event if any - self.on_maybe_tree_event(res.event.take()); + self.on_maybe_tree_event(res.event.take())?; } if let Err(err) = @@ -978,7 +1234,7 @@ where } FromEngine::DownloadedBlocks(blocks) => { if let Some(event) = self.on_downloaded(blocks)? { - self.on_tree_event(event); + self.on_tree_event(event)?; } } } @@ -999,12 +1255,12 @@ where &mut self, ctrl: ControlFlow, ) -> Result<(), InsertBlockFatalError> { - debug!(target: "consensus::engine", "received backfill sync finished event"); + debug!(target: "engine::tree", "received backfill sync finished event"); self.backfill_sync_state = BackfillSyncState::Idle; // Pipeline unwound, memorize the invalid block and wait for CL for next sync target. if let ControlFlow::Unwind { bad_block, .. } = ctrl { - warn!(target: "consensus::engine", invalid_hash=?bad_block.hash(), invalid_number=?bad_block.number, "Bad block detected in unwind"); + warn!(target: "engine::tree", invalid_hash=?bad_block.hash(), invalid_number=?bad_block.number, "Bad block detected in unwind"); // update the `invalid_headers` cache with the new invalid header self.state.invalid_headers.insert(*bad_block); return Ok(()) @@ -1015,8 +1271,20 @@ where // state house keeping after backfill sync // remove all executed blocks below the backfill height - self.state.tree_state.remove_before(backfill_height); - self.metrics.executed_blocks.set(self.state.tree_state.block_count() as f64); + // + // We set the `finalized_num` to `Some(backfill_height)` to ensure we remove all state + // before that + let backfill_num_hash = self + .provider + .block_hash(backfill_height)? + .map(|hash| BlockNumHash { hash, number: backfill_height }); + + self.state.tree_state.remove_until( + backfill_height, + self.persistence_state.last_persisted_block_hash, + backfill_num_hash, + ); + self.metrics.engine.executed_blocks.set(self.state.tree_state.block_count() as f64); // remove all buffered blocks below the backfill height self.state.buffer.remove_old_blocks(backfill_height); @@ -1077,25 +1345,29 @@ where /// Attempts to make the given target canonical. /// /// This will update the tracked canonical in memory state and do the necessary housekeeping. - fn make_canonical(&mut self, target: B256) { - if let Some(chain_update) = self.state.tree_state.on_new_head(target) { + fn make_canonical(&mut self, target: B256, finalized: Option) -> ProviderResult<()> { + if let Some(chain_update) = self.on_new_head(target, finalized)? { self.on_canonical_chain_update(chain_update); } + + Ok(()) } /// Convenience function to handle an optional tree event. - fn on_maybe_tree_event(&mut self, event: Option) { + fn on_maybe_tree_event(&mut self, event: Option) -> ProviderResult<()> { if let Some(event) = event { - self.on_tree_event(event); + self.on_tree_event(event)?; } + + Ok(()) } /// Handles a tree event. - fn on_tree_event(&mut self, event: TreeEvent) { + fn on_tree_event(&mut self, event: TreeEvent) -> ProviderResult<()> { match event { TreeEvent::TreeAction(action) => match action { - TreeAction::MakeCanonical(target) => { - self.make_canonical(target); + TreeAction::MakeCanonical { sync_target_head, sync_target_finalized } => { + self.make_canonical(sync_target_head, sync_target_finalized)?; } }, TreeEvent::BackfillAction(action) => { @@ -1105,6 +1377,8 @@ where self.emit_event(EngineApiEvent::Download(action)); } } + + Ok(()) } /// Emits an outgoing event to the engine. @@ -1121,13 +1395,13 @@ where if self.persistence_state.in_progress() { // backfill sync and persisting data are mutually exclusive, so we can't start // backfill while we're still persisting - debug!(target: "engine", "skipping backfill file while persistence task is active"); + debug!(target: "engine::tree", "skipping backfill file while persistence task is active"); return } self.backfill_sync_state = BackfillSyncState::Pending; - self.metrics.pipeline_runs.increment(1); - debug!(target: "engine", "emitting backfill action event"); + self.metrics.engine.pipeline_runs.increment(1); + debug!(target: "engine::tree", "emitting backfill action event"); } let _ = self @@ -1163,6 +1437,7 @@ where let target_number = canonical_head_number.saturating_sub(self.config.memory_block_buffer_target()); + debug!(target: "engine::tree", ?last_persisted_number, ?canonical_head_number, ?target_number, ?current_hash, "Returning canonical blocks to persist"); while let Some(block) = self.state.tree_state.blocks_by_hash.get(¤t_hash) { if block.block.number <= last_persisted_number { break; @@ -1188,10 +1463,53 @@ where /// height. /// /// Assumes that `finish` has been called on the `persistence_state` at least once - fn on_new_persisted_block(&mut self) { - self.state.tree_state.remove_before(self.persistence_state.last_persisted_block_number); - self.canonical_in_memory_state - .remove_persisted_blocks(self.persistence_state.last_persisted_block_number); + fn on_new_persisted_block(&mut self) -> ProviderResult<()> { + let finalized = self.state.forkchoice_state_tracker.last_valid_finalized(); + self.remove_before(self.persistence_state.last_persisted_block_number, finalized)?; + self.canonical_in_memory_state.remove_persisted_blocks(BlockNumHash { + number: self.persistence_state.last_persisted_block_number, + hash: self.persistence_state.last_persisted_block_hash, + }); + Ok(()) + } + + /// Return an [`ExecutedBlock`] from database or in-memory state by hash. + /// + /// NOTE: This cannot fetch [`ExecutedBlock`]s for _finalized_ blocks, instead it can only + /// fetch [`ExecutedBlock`]s for _canonical_ blocks, or blocks from sidechains that the node + /// has in memory. + /// + /// For finalized blocks, this will return `None`. + fn executed_block_by_hash(&self, hash: B256) -> ProviderResult> { + trace!(target: "engine::tree", ?hash, "Fetching executed block by hash"); + // check memory first + let block = self.state.tree_state.executed_block_by_hash(hash).cloned(); + + if block.is_some() { + return Ok(block) + } + + let Some((_, updates)) = self.state.tree_state.persisted_trie_updates.get(&hash) else { + return Ok(None) + }; + + let SealedBlockWithSenders { block, senders } = self + .provider + .sealed_block_with_senders(hash.into(), TransactionVariant::WithHash)? + .ok_or_else(|| ProviderError::HeaderNotFound(hash.into()))?; + let execution_output = self + .provider + .get_state(block.number)? + .ok_or_else(|| ProviderError::StateForNumberNotFound(block.number))?; + let hashed_state = execution_output.hash_state_slow(); + + Ok(Some(ExecutedBlock { + block: Arc::new(block), + senders: Arc::new(senders), + trie: updates.clone(), + execution_output: Arc::new(execution_output), + hashed_state: Arc::new(hashed_state), + })) } /// Return sealed block from database or in-memory state by hash. @@ -1242,7 +1560,7 @@ where /// Returns an error if we failed to fetch the state from the database. fn state_provider(&self, hash: B256) -> ProviderResult> { if let Some((historical, blocks)) = self.state.tree_state.blocks_by_hash(hash) { - trace!(target: "engine", %hash, "found canonical state for block in memory"); + trace!(target: "engine::tree", %hash, "found canonical state for block in memory"); // the block leads back to the canonical chain let historical = self.provider.state_by_block_hash(historical)?; return Ok(Some(Box::new(MemoryOverlayStateProvider::new(historical, blocks)))) @@ -1250,13 +1568,13 @@ where // the hash could belong to an unknown block or a persisted block if let Some(header) = self.provider.header(&hash)? { - trace!(target: "engine", %hash, number = %header.number, "found canonical state for block in database"); + trace!(target: "engine::tree", %hash, number = %header.number, "found canonical state for block in database"); // the block is known and persisted let historical = self.provider.state_by_block_hash(hash)?; return Ok(Some(historical)) } - trace!(target: "engine", %hash, "no canonical state found for block"); + trace!(target: "engine::tree", %hash, "no canonical state found for block"); Ok(None) } @@ -1397,7 +1715,7 @@ where } /// Attempts to connect any buffered blocks that are connected to the given parent hash. - #[instrument(level = "trace", skip(self), target = "engine")] + #[instrument(level = "trace", skip(self), target = "engine::tree")] fn try_connect_buffered_blocks( &mut self, parent: BlockNumHash, @@ -1415,24 +1733,29 @@ where let child_num_hash = child.num_hash(); match self.insert_block(child) { Ok(res) => { - debug!(target: "engine", child =?child_num_hash, ?res, "connected buffered block"); + debug!(target: "engine::tree", child =?child_num_hash, ?res, "connected buffered block"); if self.is_sync_target_head(child_num_hash.hash) && matches!(res, InsertPayloadOk2::Inserted(BlockStatus2::Valid)) { - self.make_canonical(child_num_hash.hash); + // we are using the sync target here because we're trying to make the sync + // target canonical + let sync_target_finalized = + self.state.forkchoice_state_tracker.sync_target_finalized(); + + self.make_canonical(child_num_hash.hash, sync_target_finalized)?; } } Err(err) => { - debug!(target: "engine", ?err, "failed to connect buffered block to tree"); + debug!(target: "engine::tree", ?err, "failed to connect buffered block to tree"); if let Err(fatal) = self.on_insert_block_error(err) { - warn!(target: "engine", %fatal, "fatal error occurred while connecting buffered blocks"); + warn!(target: "engine::tree", %fatal, "fatal error occurred while connecting buffered blocks"); return Err(fatal) } } } } - debug!(target: "engine", elapsed = ?now.elapsed(), %block_count, "connected buffered blocks"); + debug!(target: "engine::tree", elapsed = ?now.elapsed(), %block_count, "connected buffered blocks"); Ok(()) } @@ -1523,7 +1846,7 @@ where // if we have already canonicalized the finalized block, we should skip backfill match self.provider.header_by_hash_or_number(state.finalized_block_hash.into()) { Err(err) => { - warn!(target: "engine", %err, "Failed to get finalized block header"); + warn!(target: "engine::tree", %err, "Failed to get finalized block header"); } Ok(None) => { // ensure the finalized block is known (not the zero hash) @@ -1545,7 +1868,7 @@ where // // However, optimism chains will do this. The risk of a reorg is however // low. - debug!(target: "engine", hash=?state.head_block_hash, "Setting head hash as an optimistic backfill target."); + debug!(target: "engine::tree", hash=?state.head_block_hash, "Setting head hash as an optimistic backfill target."); return Some(state.head_block_hash) } Ok(Some(_)) => { @@ -1558,19 +1881,71 @@ where None } + /// This determines whether or not we should remove blocks from the chain, based on a canonical + /// chain update. + /// + /// If the chain update is a reorg: + /// * is the new chain behind the last persisted block, or + /// * if the root of the new chain is at the same height as the last persisted block, is it a + /// different block + /// + /// If either of these are true, then this returns the height of the first block. Otherwise, + /// this returns [`None`]. This should be used to check whether or not we should be sending a + /// remove command to the persistence task. + fn find_disk_reorg(&self, chain_update: &NewCanonicalChain) -> Option { + let NewCanonicalChain::Reorg { new, old: _ } = chain_update else { return None }; + + let BlockNumHash { number: new_num, hash: new_hash } = + new.first().map(|block| block.block.num_hash())?; + + match new_num.cmp(&self.persistence_state.last_persisted_block_number) { + Ordering::Greater => { + // new number is above the last persisted block so the reorg can be performed + // entirely in memory + None + } + Ordering::Equal => { + // new number is the same, if the hash is the same then we should not need to remove + // any blocks + (self.persistence_state.last_persisted_block_hash != new_hash).then_some(new_num) + } + Ordering::Less => { + // this means we are below the last persisted block and must remove on disk blocks + Some(new_num) + } + } + } + /// Invoked when we the canonical chain has been updated. /// /// This is invoked on a valid forkchoice update, or if we can make the target block canonical. fn on_canonical_chain_update(&mut self, chain_update: NewCanonicalChain) { - trace!(target: "engine", new_blocks = %chain_update.new_block_count(), reorged_blocks = %chain_update.reorged_block_count() ,"applying new chain update"); + trace!(target: "engine::tree", new_blocks = %chain_update.new_block_count(), reorged_blocks = %chain_update.reorged_block_count(), "applying new chain update"); let start = Instant::now(); + // schedule a remove_above call if we have an on-disk reorg + if let Some(height) = self.find_disk_reorg(&chain_update) { + // calculate the new tip by subtracting one from the lowest part of the chain + let new_tip_num = height.saturating_sub(1); + self.persistence_state.schedule_removal(new_tip_num); + } + // update the tracked canonical head self.state.tree_state.set_canonical_head(chain_update.tip().num_hash()); let tip = chain_update.tip().header.clone(); let notification = chain_update.to_chain_notification(); + // reinsert any missing reorged blocks + if let NewCanonicalChain::Reorg { new, old } = &chain_update { + let new_first = new.first().map(|first| first.block.num_hash()); + let old_first = old.first().map(|first| first.block.num_hash()); + trace!(target: "engine::tree", ?new_first, ?old_first, "Reorg detected, new and old first blocks"); + + self.reinsert_reorged_blocks(new.clone()); + self.reinsert_reorged_blocks(old.clone()); + } + // update the tracked in-memory state with the new chain self.canonical_in_memory_state.update_chain(chain_update); self.canonical_in_memory_state.set_canonical_head(tip.clone()); @@ -1585,6 +1960,16 @@ where )); } + /// This reinserts any blocks in the new chain that do not already exist in the tree + fn reinsert_reorged_blocks(&mut self, new_chain: Vec) { + for block in new_chain { + if self.state.tree_state.executed_block_by_hash(block.block.hash()).is_none() { + trace!(target: "engine::tree", num=?block.block.number, hash=?block.block.hash(), "Reinserting block into tree state"); + self.state.tree_state.insert_executed(block); + } + } + } + /// This handles downloaded blocks that are shown to be disconnected from the canonical chain. /// /// This mainly compares the missing parent of the downloaded block with the current canonical @@ -1599,7 +1984,7 @@ where if let Some(target) = self.backfill_sync_target(head.number, missing_parent.number, Some(downloaded_block)) { - trace!(target: "engine", %target, "triggering backfill on downloaded block"); + trace!(target: "engine::tree", %target, "triggering backfill on downloaded block"); return Some(TreeEvent::BackfillAction(BackfillAction::Start(target.into()))); } @@ -1615,10 +2000,10 @@ where let request = if let Some(distance) = self.distance_from_local_tip(head.number, missing_parent.number) { - trace!(target: "engine", %distance, missing=?missing_parent, "downloading missing parent block range"); + trace!(target: "engine::tree", %distance, missing=?missing_parent, "downloading missing parent block range"); DownloadRequest::BlockRange(missing_parent.hash, distance) } else { - trace!(target: "engine", missing=?missing_parent, "downloading missing parent block"); + trace!(target: "engine::tree", missing=?missing_parent, "downloading missing parent block"); // This happens when the missing parent is on an outdated // sidechain and we can only download the missing block itself DownloadRequest::single_block(missing_parent.hash) @@ -1632,7 +2017,7 @@ where /// Returns an event with the appropriate action to take, such as: /// - download more missing blocks /// - try to canonicalize the target if the `block` is the tracked target (head) block. - #[instrument(level = "trace", skip_all, fields(block_hash = %block.hash(), block_num = %block.number,), target = "engine")] + #[instrument(level = "trace", skip_all, fields(block_hash = %block.hash(), block_num = %block.number,), target = "engine::tree")] fn on_downloaded_block( &mut self, block: SealedBlockWithSenders, @@ -1654,14 +2039,18 @@ where match self.insert_block(block) { Ok(InsertPayloadOk2::Inserted(BlockStatus2::Valid)) => { if self.is_sync_target_head(block_num_hash.hash) { - trace!(target: "engine", "appended downloaded sync target block"); + trace!(target: "engine::tree", "appended downloaded sync target block"); + let sync_target_finalized = + self.state.forkchoice_state_tracker.sync_target_finalized(); + // we just inserted the current sync target block, we can try to make it // canonical - return Ok(Some(TreeEvent::TreeAction(TreeAction::MakeCanonical( - block_num_hash.hash, - )))) + return Ok(Some(TreeEvent::TreeAction(TreeAction::MakeCanonical { + sync_target_head: block_num_hash.hash, + sync_target_finalized, + }))) } - trace!(target: "engine", "appended downloaded block"); + trace!(target: "engine::tree", "appended downloaded block"); self.try_connect_buffered_blocks(block_num_hash)?; } Ok(InsertPayloadOk2::Inserted(BlockStatus2::Disconnected { @@ -1677,12 +2066,12 @@ where )) } Ok(InsertPayloadOk2::AlreadySeen(_)) => { - trace!(target: "engine", "downloaded block already executed"); + trace!(target: "engine::tree", "downloaded block already executed"); } Err(err) => { - debug!(target: "engine", err=%err.kind(), "failed to insert downloaded block"); + debug!(target: "engine::tree", err=%err.kind(), "failed to insert downloaded block"); if let Err(fatal) = self.on_insert_block_error(err) { - warn!(target: "engine", %fatal, "fatal error occurred while inserting downloaded block"); + warn!(target: "engine::tree", %fatal, "fatal error occurred while inserting downloaded block"); return Err(fatal) } } @@ -1767,25 +2156,66 @@ where .collect::>(); let exec_time = Instant::now(); - let output = executor.execute((&block, U256::MAX, Some(&ancestor_blocks)).into())?; - debug!(target: "engine", elapsed=?exec_time.elapsed(), ?block_number, "Executed block"); - self.consensus.validate_block_post_execution( + let output = self + .metrics + .executor + .metered((&block, U256::MAX, Some(&ancestor_blocks)).into(), |input| { + executor.execute(input) + })?; + debug!(target: "engine::tree", elapsed=?exec_time.elapsed(), ?block_number, "Executed block"); + + if let Err(err) = self.consensus.validate_block_post_execution( &block, PostExecutionInput::new(&output.receipts, &output.requests), - )?; + ) { + // call post-block hook + self.invalid_block_hook.on_invalid_block( + &parent_block, + &block.seal_slow(), + &output, + None, + ); + return Err(err.into()) + } let hashed_state = HashedPostState::from_bundle_state(&output.state.state); let root_time = Instant::now(); - let (state_root, trie_output) = - state_provider.state_root_with_updates(hashed_state.clone())?; + let mut state_root_result = None; + + // We attempt to compute state root in parallel if we are currently not persisting anything + // to database. This is safe, because the database state cannot change until we + // finish parallel computation. It is important that nothing is being persisted as + // we are computing in parallel, because we initialize a different database transaction + // per thread and it might end up with a different view of the database. + let persistence_in_progress = self.persistence_state.in_progress(); + if !persistence_in_progress { + state_root_result = match self + .compute_state_root_in_parallel(block.parent_hash, &hashed_state) + { + Ok((state_root, trie_output)) => Some((state_root, trie_output)), + Err(ProviderError::ConsistentView(error)) => { + debug!(target: "engine", %error, "Parallel state root computation failed consistency check, falling back"); + None + } + Err(error) => return Err(error.into()), + }; + } + + let (state_root, trie_output) = if let Some(result) = state_root_result { + result + } else { + debug!(target: "engine", persistence_in_progress, "Failed to compute state root in parallel"); + state_provider.state_root_with_updates(hashed_state.clone())? + }; + if state_root != block.state_root { - debug!( - target: "engine", - number = block.number, - hash = %block_hash, - receipts = ?output.receipts, - "Mismatched state root" + // call post-block hook + self.invalid_block_hook.on_invalid_block( + &parent_block, + &block.clone().seal_slow(), + &output, + Some((&trie_output, state_root)), ); return Err(ConsensusError::BodyStateRootDiff( GotExpected { got: state_root, expected: block.state_root }.into(), @@ -1793,7 +2223,9 @@ where .into()) } - debug!(target: "engine", elapsed=?root_time.elapsed(), ?block_number, "Calculated state root"); + let root_elapsed = root_time.elapsed(); + self.metrics.block_validation.record_state_root(root_elapsed.as_secs_f64()); + debug!(target: "engine::tree", ?root_elapsed, ?block_number, "Calculated state root"); let executed = ExecutedBlock { block: sealed_block.clone(), @@ -1804,25 +2236,63 @@ where }; if self.state.tree_state.canonical_block_hash() == executed.block().parent_hash { - debug!(target: "engine", pending = ?executed.block().num_hash() ,"updating pending block"); + debug!(target: "engine::tree", pending = ?executed.block().num_hash() ,"updating pending block"); // if the parent is the canonical head, we can insert the block as the pending block self.canonical_in_memory_state.set_pending_block(executed.clone()); } self.state.tree_state.insert_executed(executed); - self.metrics.executed_blocks.set(self.state.tree_state.block_count() as f64); + self.metrics.engine.executed_blocks.set(self.state.tree_state.block_count() as f64); + + // we are checking that this is a fork block compared to the current `SYNCING` forkchoice + // state. + let finalized = self.state.forkchoice_state_tracker.sync_target_finalized(); // emit insert event - let engine_event = if self.state.tree_state.is_fork(block_hash) { - BeaconConsensusEngineEvent::ForkBlockAdded(sealed_block) + let elapsed = start.elapsed(); + let engine_event = if self.is_fork(block_hash, finalized)? { + BeaconConsensusEngineEvent::ForkBlockAdded(sealed_block, elapsed) } else { - BeaconConsensusEngineEvent::CanonicalBlockAdded(sealed_block, start.elapsed()) + BeaconConsensusEngineEvent::CanonicalBlockAdded(sealed_block, elapsed) }; self.emit_event(EngineApiEvent::BeaconConsensus(engine_event)); Ok(InsertPayloadOk2::Inserted(BlockStatus2::Valid)) } + /// Compute state root for the given hashed post state in parallel. + /// + /// # Returns + /// + /// Returns `Ok(_)` if computed successfully. + /// Returns `Err(_)` if error was encountered during computation. + /// `Err(ProviderError::ConsistentView(_))` can be safely ignored and fallback computation + /// should be used instead. + fn compute_state_root_in_parallel( + &self, + parent_hash: B256, + hashed_state: &HashedPostState, + ) -> ProviderResult<(B256, TrieUpdates)> { + let consistent_view = ConsistentDbView::new_with_latest_tip(self.provider.clone())?; + let mut input = TrieInput::default(); + + if let Some((historical, blocks)) = self.state.tree_state.blocks_by_hash(parent_hash) { + // Retrieve revert state for historical block. + let revert_state = consistent_view.revert_state(historical)?; + input.append(revert_state); + + // Extend with contents of parent in-memory blocks. + for block in blocks.iter().rev() { + input.append_cached_ref(block.trie_updates(), block.hashed_state()) + } + } + + // Extend with block we are validating root for. + input.append_ref(hashed_state); + + Ok(ParallelStateRoot::new(consistent_view, input).incremental_root_with_updates()?) + } + /// Handles an error that occurred while inserting a block. /// /// If this is a validation error this will mark the block as invalid. @@ -1879,7 +2349,7 @@ where match self.find_canonical_header(finalized_block_hash) { Ok(None) => { - debug!(target: "engine", "Finalized block not found in canonical chain"); + debug!(target: "engine::tree", "Finalized block not found in canonical chain"); // if the finalized block is not known, we can't update the finalized block return Err(OnForkChoiceUpdated::invalid_state()) } @@ -1887,7 +2357,7 @@ where self.canonical_in_memory_state.set_finalized(finalized); } Err(err) => { - error!(target: "engine", %err, "Failed to fetch finalized block header"); + error!(target: "engine::tree", %err, "Failed to fetch finalized block header"); } } @@ -1902,7 +2372,7 @@ where match self.find_canonical_header(safe_block_hash) { Ok(None) => { - debug!(target: "engine", "Safe block not found in canonical chain"); + debug!(target: "engine::tree", "Safe block not found in canonical chain"); // if the safe block is not known, we can't update the safe block return Err(OnForkChoiceUpdated::invalid_state()) } @@ -1910,7 +2380,7 @@ where self.canonical_in_memory_state.set_safe(finalized); } Err(err) => { - error!(target: "engine", %err, "Failed to fetch safe block header"); + error!(target: "engine::tree", %err, "Failed to fetch safe block header"); } } @@ -1966,7 +2436,7 @@ where if !self.backfill_sync_state.is_idle() { // We can only process new forkchoice updates if the pipeline is idle, since it requires // exclusive access to the database - trace!(target: "consensus::engine", "Pipeline is syncing, skipping forkchoice update"); + trace!(target: "engine::tree", "Pipeline is syncing, skipping forkchoice update"); return Ok(Some(OnForkChoiceUpdated::syncing())) } @@ -2024,6 +2494,45 @@ where Err(_) => OnForkChoiceUpdated::invalid_payload_attributes(), } } + + /// Remove all blocks up to __and including__ the given block number. + /// + /// If a finalized hash is provided, the only non-canonical blocks which will be removed are + /// those which have a fork point at or below the finalized hash. + /// + /// Canonical blocks below the upper bound will still be removed. + pub(crate) fn remove_before( + &mut self, + upper_bound: BlockNumber, + finalized_hash: Option, + ) -> ProviderResult<()> { + // first fetch the finalized block number and then call the remove_before method on + // tree_state + let num = if let Some(hash) = finalized_hash { + self.provider.block_number(hash)?.map(|number| BlockNumHash { number, hash }) + } else { + None + }; + + self.state.tree_state.remove_until( + upper_bound, + self.persistence_state.last_persisted_block_hash, + num, + ); + Ok(()) + } +} + +/// This is an error that can come from advancing persistence. Either this can be a +/// [`TryRecvError`], or this can be a [`ProviderError`] +#[derive(Debug, thiserror::Error)] +pub enum AdvancePersistenceError { + /// An error that can be from failing to receive a value from persistence + #[error(transparent)] + RecvError(#[from] TryRecvError), + /// A provider error + #[error(transparent)] + Provider(#[from] ProviderError), } /// The state of the persistence task. @@ -2035,11 +2544,14 @@ pub struct PersistenceState { last_persisted_block_hash: B256, /// Receiver end of channel where the result of the persistence task will be /// sent when done. A None value means there's no persistence task in progress. - rx: Option<(oneshot::Receiver>, Instant)>, + rx: Option<(oneshot::Receiver>, Instant)>, /// The last persisted block number. /// /// This tracks the chain height that is persisted on disk last_persisted_block_number: u64, + /// The block above which blocks should be removed from disk, because there has been an on disk + /// reorg. + remove_above_state: VecDeque, } impl PersistenceState { @@ -2050,13 +2562,20 @@ impl PersistenceState { } /// Sets state for a started persistence task. - fn start(&mut self, rx: oneshot::Receiver>) { + fn start(&mut self, rx: oneshot::Receiver>) { self.rx = Some((rx, Instant::now())); } + /// Sets the `remove_above_state`, to the new tip number specified, only if it is less than the + /// current `last_persisted_block_number`. + fn schedule_removal(&mut self, new_tip_num: u64) { + debug!(target: "engine::tree", ?new_tip_num, prev_remove_state=?self.remove_above_state, last_persisted_block_number=?self.last_persisted_block_number, "Scheduling removal"); + self.remove_above_state.push_back(new_tip_num); + } + /// Sets state for a finished persistence task. fn finish(&mut self, last_persisted_block_hash: B256, last_persisted_block_number: u64) { - trace!(target: "engine", block= %last_persisted_block_number, hash=%last_persisted_block_hash, "updating persistence state"); + trace!(target: "engine::tree", block= %last_persisted_block_number, hash=%last_persisted_block_hash, "updating persistence state"); self.rx = None; self.last_persisted_block_number = last_persisted_block_number; self.last_persisted_block_hash = last_persisted_block_hash; @@ -2083,6 +2602,61 @@ mod tests { }; use tokio::sync::mpsc::unbounded_channel; + /// This is a test channel that allows you to `release` any value that is in the channel. + /// + /// If nothing has been sent, then the next value will be immediately sent. + #[allow(dead_code)] + struct TestChannel { + /// If an item is sent to this channel, an item will be released in the wrapped channel + release: Receiver<()>, + /// The sender channel + tx: Sender, + /// The receiver channel + rx: Receiver, + } + + impl TestChannel { + /// Creates a new test channel + #[allow(dead_code)] + fn spawn_channel() -> (Sender, Receiver, TestChannelHandle) { + let (original_tx, original_rx) = channel(); + let (wrapped_tx, wrapped_rx) = channel(); + let (release_tx, release_rx) = channel(); + let handle = TestChannelHandle::new(release_tx); + let test_channel = Self { release: release_rx, tx: wrapped_tx, rx: original_rx }; + // spawn the task that listens and releases stuff + std::thread::spawn(move || test_channel.intercept_loop()); + (original_tx, wrapped_rx, handle) + } + + /// Runs the intercept loop, waiting for the handle to release a value + fn intercept_loop(&self) { + while self.release.recv() == Ok(()) { + let Ok(value) = self.rx.recv() else { return }; + + let _ = self.tx.send(value); + } + } + } + + struct TestChannelHandle { + /// The sender to use for releasing values + release: Sender<()>, + } + + impl TestChannelHandle { + /// Returns a [`TestChannelHandle`] + const fn new(release: Sender<()>) -> Self { + Self { release } + } + + /// Signals to the channel task that a value should be released + #[allow(dead_code)] + fn release(&self) { + let _ = self.release.send(()); + } + } + struct TestHarness { tree: EngineApiTreeHandler, to_tree_tx: Sender>>, @@ -2097,6 +2671,20 @@ mod tests { impl TestHarness { fn new(chain_spec: Arc) -> Self { let (action_tx, action_rx) = channel(); + Self::with_persistence_channel(chain_spec, action_tx, action_rx) + } + + #[allow(dead_code)] + fn with_test_channel(chain_spec: Arc) -> (Self, TestChannelHandle) { + let (action_tx, action_rx, handle) = TestChannel::spawn_channel(); + (Self::with_persistence_channel(chain_spec, action_tx, action_rx), handle) + } + + fn with_persistence_channel( + chain_spec: Arc, + action_tx: Sender, + action_rx: Receiver, + ) -> Self { let persistence_handle = PersistenceHandle::new(action_tx); let consensus = Arc::new(EthBeaconConsensus::new(chain_spec.clone())); @@ -2108,7 +2696,7 @@ mod tests { let (from_tree_tx, from_tree_rx) = unbounded_channel(); - let header = chain_spec.genesis_header().seal_slow(); + let header = chain_spec.genesis_header().clone().seal_slow(); let engine_api_tree_state = EngineApiTreeState::new(10, 10, header.num_hash()); let canonical_in_memory_state = CanonicalInMemoryState::with_head(header, None); @@ -2167,6 +2755,7 @@ mod tests { blocks_by_number, current_canonical_head: blocks.last().unwrap().block().num_hash(), parent_to_child, + persisted_trie_updates: HashMap::default(), }; let last_executed_block = blocks.last().unwrap().clone(); @@ -2336,6 +2925,7 @@ mod tests { match event { EngineApiEvent::BeaconConsensus(BeaconConsensusEngineEvent::ForkBlockAdded( block, + _, )) => { assert!(block.hash() == expected_hash); } @@ -2658,15 +3248,113 @@ mod tests { #[tokio::test] async fn test_tree_state_remove_before() { - let mut tree_state = TreeState::new(BlockNumHash::default()); + let start_num_hash = BlockNumHash::default(); + let mut tree_state = TreeState::new(start_num_hash); let blocks: Vec<_> = TestBlockBuilder::default().get_executed_blocks(1..6).collect(); for block in &blocks { tree_state.insert_executed(block.clone()); } + let last = blocks.last().unwrap(); + + // set the canonical head + tree_state.set_canonical_head(last.block.num_hash()); + // inclusive bound, so we should remove anything up to and including 2 - tree_state.remove_before(2); + tree_state.remove_until(2, start_num_hash.hash, Some(blocks[1].block.num_hash())); + + assert!(!tree_state.blocks_by_hash.contains_key(&blocks[0].block.hash())); + assert!(!tree_state.blocks_by_hash.contains_key(&blocks[1].block.hash())); + assert!(!tree_state.blocks_by_number.contains_key(&1)); + assert!(!tree_state.blocks_by_number.contains_key(&2)); + + assert!(tree_state.blocks_by_hash.contains_key(&blocks[2].block.hash())); + assert!(tree_state.blocks_by_hash.contains_key(&blocks[3].block.hash())); + assert!(tree_state.blocks_by_hash.contains_key(&blocks[4].block.hash())); + assert!(tree_state.blocks_by_number.contains_key(&3)); + assert!(tree_state.blocks_by_number.contains_key(&4)); + assert!(tree_state.blocks_by_number.contains_key(&5)); + + assert!(!tree_state.parent_to_child.contains_key(&blocks[0].block.hash())); + assert!(!tree_state.parent_to_child.contains_key(&blocks[1].block.hash())); + assert!(tree_state.parent_to_child.contains_key(&blocks[2].block.hash())); + assert!(tree_state.parent_to_child.contains_key(&blocks[3].block.hash())); + assert!(!tree_state.parent_to_child.contains_key(&blocks[4].block.hash())); + + assert_eq!( + tree_state.parent_to_child.get(&blocks[2].block.hash()), + Some(&HashSet::from([blocks[3].block.hash()])) + ); + assert_eq!( + tree_state.parent_to_child.get(&blocks[3].block.hash()), + Some(&HashSet::from([blocks[4].block.hash()])) + ); + } + + #[tokio::test] + async fn test_tree_state_remove_before_finalized() { + let start_num_hash = BlockNumHash::default(); + let mut tree_state = TreeState::new(start_num_hash); + let blocks: Vec<_> = TestBlockBuilder::default().get_executed_blocks(1..6).collect(); + + for block in &blocks { + tree_state.insert_executed(block.clone()); + } + + let last = blocks.last().unwrap(); + + // set the canonical head + tree_state.set_canonical_head(last.block.num_hash()); + + // we should still remove everything up to and including 2 + tree_state.remove_until(2, start_num_hash.hash, None); + + assert!(!tree_state.blocks_by_hash.contains_key(&blocks[0].block.hash())); + assert!(!tree_state.blocks_by_hash.contains_key(&blocks[1].block.hash())); + assert!(!tree_state.blocks_by_number.contains_key(&1)); + assert!(!tree_state.blocks_by_number.contains_key(&2)); + + assert!(tree_state.blocks_by_hash.contains_key(&blocks[2].block.hash())); + assert!(tree_state.blocks_by_hash.contains_key(&blocks[3].block.hash())); + assert!(tree_state.blocks_by_hash.contains_key(&blocks[4].block.hash())); + assert!(tree_state.blocks_by_number.contains_key(&3)); + assert!(tree_state.blocks_by_number.contains_key(&4)); + assert!(tree_state.blocks_by_number.contains_key(&5)); + + assert!(!tree_state.parent_to_child.contains_key(&blocks[0].block.hash())); + assert!(!tree_state.parent_to_child.contains_key(&blocks[1].block.hash())); + assert!(tree_state.parent_to_child.contains_key(&blocks[2].block.hash())); + assert!(tree_state.parent_to_child.contains_key(&blocks[3].block.hash())); + assert!(!tree_state.parent_to_child.contains_key(&blocks[4].block.hash())); + + assert_eq!( + tree_state.parent_to_child.get(&blocks[2].block.hash()), + Some(&HashSet::from([blocks[3].block.hash()])) + ); + assert_eq!( + tree_state.parent_to_child.get(&blocks[3].block.hash()), + Some(&HashSet::from([blocks[4].block.hash()])) + ); + } + + #[tokio::test] + async fn test_tree_state_remove_before_lower_finalized() { + let start_num_hash = BlockNumHash::default(); + let mut tree_state = TreeState::new(start_num_hash); + let blocks: Vec<_> = TestBlockBuilder::default().get_executed_blocks(1..6).collect(); + + for block in &blocks { + tree_state.insert_executed(block.clone()); + } + + let last = blocks.last().unwrap(); + + // set the canonical head + tree_state.set_canonical_head(last.block.num_hash()); + + // we have no forks so we should still remove anything up to and including 2 + tree_state.remove_until(2, start_num_hash.hash, Some(blocks[0].block.num_hash())); assert!(!tree_state.blocks_by_hash.contains_key(&blocks[0].block.hash())); assert!(!tree_state.blocks_by_hash.contains_key(&blocks[1].block.hash())); @@ -2698,17 +3386,18 @@ mod tests { #[tokio::test] async fn test_tree_state_on_new_head() { - let mut tree_state = TreeState::new(BlockNumHash::default()); + let chain_spec = MAINNET.clone(); + let mut test_harness = TestHarness::new(chain_spec); let mut test_block_builder = TestBlockBuilder::default(); let blocks: Vec<_> = test_block_builder.get_executed_blocks(1..6).collect(); for block in &blocks { - tree_state.insert_executed(block.clone()); + test_harness.tree.state.tree_state.insert_executed(block.clone()); } // set block 3 as the current canonical head - tree_state.set_canonical_head(blocks[2].block.num_hash()); + test_harness.tree.state.tree_state.set_canonical_head(blocks[2].block.num_hash()); // create a fork from block 2 let fork_block_3 = @@ -2718,12 +3407,12 @@ mod tests { let fork_block_5 = test_block_builder.get_executed_block_with_number(5, fork_block_4.block.hash()); - tree_state.insert_executed(fork_block_3.clone()); - tree_state.insert_executed(fork_block_4.clone()); - tree_state.insert_executed(fork_block_5.clone()); + test_harness.tree.state.tree_state.insert_executed(fork_block_3.clone()); + test_harness.tree.state.tree_state.insert_executed(fork_block_4.clone()); + test_harness.tree.state.tree_state.insert_executed(fork_block_5.clone()); // normal (non-reorg) case - let result = tree_state.on_new_head(blocks[4].block.hash()); + let result = test_harness.tree.on_new_head(blocks[4].block.hash(), None).unwrap(); assert!(matches!(result, Some(NewCanonicalChain::Commit { .. }))); if let Some(NewCanonicalChain::Commit { new }) = result { assert_eq!(new.len(), 2); @@ -2732,7 +3421,7 @@ mod tests { } // reorg case - let result = tree_state.on_new_head(fork_block_5.block.hash()); + let result = test_harness.tree.on_new_head(fork_block_5.block.hash(), None).unwrap(); assert!(matches!(result, Some(NewCanonicalChain::Reorg { .. }))); if let Some(NewCanonicalChain::Reorg { new, old }) = result { assert_eq!(new.len(), 3); @@ -2749,26 +3438,27 @@ mod tests { async fn test_tree_state_on_new_head_deep_fork() { reth_tracing::init_test_tracing(); - let mut tree_state = TreeState::new(BlockNumHash::default()); + let chain_spec = MAINNET.clone(); + let mut test_harness = TestHarness::new(chain_spec); let mut test_block_builder = TestBlockBuilder::default(); let blocks: Vec<_> = test_block_builder.get_executed_blocks(0..5).collect(); for block in &blocks { - tree_state.insert_executed(block.clone()); + test_harness.tree.state.tree_state.insert_executed(block.clone()); } // set last block as the current canonical head let last_block = blocks.last().unwrap().block.clone(); - tree_state.set_canonical_head(last_block.num_hash()); + test_harness.tree.state.tree_state.set_canonical_head(last_block.num_hash()); // create a fork chain from last_block let chain_a = test_block_builder.create_fork(&last_block, 10); let chain_b = test_block_builder.create_fork(&last_block, 10); for block in &chain_a { - tree_state.insert_executed(ExecutedBlock { + test_harness.tree.state.tree_state.insert_executed(ExecutedBlock { block: Arc::new(block.block.clone()), senders: Arc::new(block.senders.clone()), execution_output: Arc::new(ExecutionOutcome::default()), @@ -2776,10 +3466,10 @@ mod tests { trie: Arc::new(TrieUpdates::default()), }); } - tree_state.set_canonical_head(chain_a.last().unwrap().num_hash()); + test_harness.tree.state.tree_state.set_canonical_head(chain_a.last().unwrap().num_hash()); for block in &chain_b { - tree_state.insert_executed(ExecutedBlock { + test_harness.tree.state.tree_state.insert_executed(ExecutedBlock { block: Arc::new(block.block.clone()), senders: Arc::new(block.senders.clone()), execution_output: Arc::new(ExecutionOutcome::default()), @@ -2789,7 +3479,8 @@ mod tests { } // reorg case - let result = tree_state.on_new_head(chain_b.first().unwrap().block.hash()); + let result = + test_harness.tree.on_new_head(chain_b.first().unwrap().block.hash(), None).unwrap(); assert!(matches!(result, Some(NewCanonicalChain::Reorg { .. }))); if let Some(NewCanonicalChain::Reorg { new, old }) = result { assert_eq!(new.len(), 1); @@ -3105,7 +3796,7 @@ mod tests { .on_engine_message(FromEngine::DownloadedBlocks(remaining.clone())) .unwrap(); - test_harness.check_fork_chain_insertion(remaining).await; + test_harness.check_canon_chain_insertion(remaining).await; // check canonical chain committed event with the hash of the latest block test_harness.check_canon_commit(main_chain_last_hash).await; @@ -3213,7 +3904,7 @@ mod tests { test_harness.check_canon_head(chain_b_tip_hash); // verify that chain A is now considered a fork - assert!(test_harness.tree.state.tree_state.is_fork(chain_a.last().unwrap().hash())); + assert!(test_harness.tree.is_fork(chain_a.last().unwrap().hash(), None).unwrap()); } #[tokio::test] diff --git a/crates/engine/util/src/reorg.rs b/crates/engine/util/src/reorg.rs index 48ce54f8d1..a2c07eb43f 100644 --- a/crates/engine/util/src/reorg.rs +++ b/crates/engine/util/src/reorg.rs @@ -105,7 +105,7 @@ where S: Stream>, Engine: EngineTypes, Provider: BlockReader + StateProviderFactory, - Evm: ConfigureEvm, + Evm: ConfigureEvm

, { type Item = S::Item; @@ -237,7 +237,7 @@ fn create_reorg_head( ) -> RethResult<(ExecutionPayload, Option)> where Provider: BlockReader + StateProviderFactory, - Evm: ConfigureEvm, + Evm: ConfigureEvm
, { let chain_spec = payload_validator.chain_spec(); @@ -279,13 +279,7 @@ where // Configure environments let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); let mut block_env = BlockEnv::default(); - evm_config.fill_cfg_and_block_env( - &mut cfg, - &mut block_env, - chain_spec, - &reorg_target.header, - U256::MAX, - ); + evm_config.fill_cfg_and_block_env(&mut cfg, &mut block_env, &reorg_target.header, U256::MAX); let env = EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()); let mut evm = evm_config.evm_with_env(&mut state, env); diff --git a/crates/ethereum-forks/src/display.rs b/crates/ethereum-forks/src/display.rs index 80c4ae8cd9..cc18a1e174 100644 --- a/crates/ethereum-forks/src/display.rs +++ b/crates/ethereum-forks/src/display.rs @@ -1,12 +1,10 @@ -#[cfg(not(feature = "std"))] +use crate::{hardforks::Hardforks, ForkCondition}; use alloc::{ format, string::{String, ToString}, vec::Vec, }; -use crate::{hardforks::Hardforks, ForkCondition}; - /// A container to pretty-print a hardfork. /// /// The fork is formatted depending on its fork condition: diff --git a/crates/ethereum-forks/src/forkcondition.rs b/crates/ethereum-forks/src/forkcondition.rs index 80c7fff647..89f21221b0 100644 --- a/crates/ethereum-forks/src/forkcondition.rs +++ b/crates/ethereum-forks/src/forkcondition.rs @@ -108,3 +108,179 @@ impl ForkCondition { } } } + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::U256; + + #[test] + fn test_active_at_block() { + // Test if the condition is active at the current block number + let fork_condition = ForkCondition::Block(10); + assert!(fork_condition.active_at_block(10), "The condition should be active at block 10"); + + // Test if the condition is not active at a lower block number + assert!( + !fork_condition.active_at_block(9), + "The condition should not be active at block 9" + ); + + // Test if TTD-based condition with known block activates + let fork_condition = + ForkCondition::TTD { fork_block: Some(10), total_difficulty: U256::from(1000) }; + assert!( + fork_condition.active_at_block(10), + "The TTD condition should be active at block 10" + ); + + // Test if TTD-based condition with unknown block does not activate + let fork_condition = + ForkCondition::TTD { fork_block: None, total_difficulty: U256::from(1000) }; + assert!( + !fork_condition.active_at_block(10), + "The TTD condition should not be active at block 10 with an unknown block number" + ); + } + + #[test] + fn test_transitions_at_block() { + // Test if the condition transitions at the correct block number + let fork_condition = ForkCondition::Block(10); + assert!( + fork_condition.transitions_at_block(10), + "The condition should transition at block 10" + ); + + // Test if the condition does not transition at a different block number + assert!( + !fork_condition.transitions_at_block(9), + "The condition should not transition at a different block number" + ); + assert!( + !fork_condition.transitions_at_block(11), + "The condition should not transition at a different block number" + ); + } + + #[test] + fn test_active_at_ttd() { + // Test if the condition activates at the correct total difficulty + let fork_condition = + ForkCondition::TTD { fork_block: Some(10), total_difficulty: U256::from(1000) }; + assert!( + fork_condition.active_at_ttd(U256::from(1000000), U256::from(100)), + "The TTD condition should be active when the total difficulty matches" + ); + + // Test if the condition does not activate when the total difficulty is lower + assert!( + !fork_condition.active_at_ttd(U256::from(900), U256::from(100)), + "The TTD condition should not be active when the total difficulty is lower" + ); + + // Test with a saturated subtraction + assert!( + !fork_condition.active_at_ttd(U256::from(900), U256::from(1000)), + "The TTD condition should not be active when the subtraction saturates" + ); + } + + #[test] + fn test_active_at_timestamp() { + // Test if the condition activates at the correct timestamp + let fork_condition = ForkCondition::Timestamp(12345); + assert!( + fork_condition.active_at_timestamp(12345), + "The condition should be active at timestamp 12345" + ); + + // Test if the condition does not activate at an earlier timestamp + assert!( + !fork_condition.active_at_timestamp(12344), + "The condition should not be active at an earlier timestamp" + ); + } + + #[test] + fn test_transitions_at_timestamp() { + // Test if the condition transitions at the correct timestamp + let fork_condition = ForkCondition::Timestamp(12345); + assert!( + fork_condition.transitions_at_timestamp(12345, 12344), + "The condition should transition at timestamp 12345" + ); + + // Test if the condition does not transition if the parent timestamp is already the same + assert!( + !fork_condition.transitions_at_timestamp(12345, 12345), + "The condition should not transition if the parent timestamp is already 12345" + ); + // Test with earlier timestamp + assert!( + !fork_condition.transitions_at_timestamp(123, 122), + "The condition should not transition if the parent timestamp is earlier" + ); + } + + #[test] + fn test_active_at_head() { + let head = Head { + hash: Default::default(), + number: 10, + timestamp: 12345, + total_difficulty: U256::from(1000), + difficulty: U256::from(100), + }; + + // Test if the condition activates based on block number + let fork_condition = ForkCondition::Block(10); + assert!( + fork_condition.active_at_head(&head), + "The condition should be active at the given head block number" + ); + let fork_condition = ForkCondition::Block(11); + assert!( + !fork_condition.active_at_head(&head), + "The condition should not be active at the given head block number" + ); + + // Test if the condition activates based on timestamp + let fork_condition = ForkCondition::Timestamp(12345); + assert!( + fork_condition.active_at_head(&head), + "The condition should be active at the given head timestamp" + ); + let fork_condition = ForkCondition::Timestamp(12346); + assert!( + !fork_condition.active_at_head(&head), + "The condition should not be active at the given head timestamp" + ); + + // Test if the condition activates based on total difficulty and block number + let fork_condition = + ForkCondition::TTD { fork_block: Some(9), total_difficulty: U256::from(900) }; + assert!( + fork_condition.active_at_head(&head), + "The condition should be active at the given head total difficulty" + ); + let fork_condition = + ForkCondition::TTD { fork_block: None, total_difficulty: U256::from(900) }; + assert!( + fork_condition.active_at_head(&head), + "The condition should be active at the given head total difficulty as the block number is unknown" + ); + let fork_condition = + ForkCondition::TTD { fork_block: Some(11), total_difficulty: U256::from(900) }; + assert!( + fork_condition.active_at_head(&head), + "The condition should be active as the total difficulty is higher" + ); + let fork_condition = + ForkCondition::TTD { fork_block: Some(10), total_difficulty: U256::from(9000) }; + assert!( + fork_condition.active_at_head(&head), + "The condition should be active as the total difficulty is higher than head" + ); + } +} diff --git a/crates/ethereum-forks/src/forkid.rs b/crates/ethereum-forks/src/forkid.rs index 49ca114843..0faea9e280 100644 --- a/crates/ethereum-forks/src/forkid.rs +++ b/crates/ethereum-forks/src/forkid.rs @@ -3,7 +3,6 @@ //! Previously version of Apache licenced [`ethereum-forkid`](https://crates.io/crates/ethereum-forkid). use crate::Head; -#[cfg(not(feature = "std"))] use alloc::{ collections::{BTreeMap, BTreeSet}, vec::Vec, @@ -22,8 +21,6 @@ use crc::*; use proptest_derive::Arbitrary as PropTestArbitrary; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -#[cfg(feature = "std")] -use std::collections::{BTreeMap, BTreeSet}; const CRC_32_IEEE: Crc = Crc::::new(&CRC_32_ISO_HDLC); const TIMESTAMP_BEFORE_ETHEREUM_MAINNET: u64 = 1_300_000_000; diff --git a/crates/ethereum-forks/src/hardfork/dev.rs b/crates/ethereum-forks/src/hardfork/dev.rs index 6ba54a4217..11e4f764e7 100644 --- a/crates/ethereum-forks/src/hardfork/dev.rs +++ b/crates/ethereum-forks/src/hardfork/dev.rs @@ -1,10 +1,8 @@ use crate::{ChainHardforks, EthereumHardfork, ForkCondition}; +use alloc::vec; use alloy_primitives::U256; use once_cell::sync::Lazy; -#[cfg(not(feature = "std"))] -use alloc::vec; - /// Dev hardforks pub static DEV_HARDFORKS: Lazy = Lazy::new(|| { ChainHardforks::new(vec![ diff --git a/crates/ethereum-forks/src/hardfork/ethereum.rs b/crates/ethereum-forks/src/hardfork/ethereum.rs index 650e88ad0d..3d85b54a96 100644 --- a/crates/ethereum-forks/src/hardfork/ethereum.rs +++ b/crates/ethereum-forks/src/hardfork/ethereum.rs @@ -1,4 +1,5 @@ use crate::{hardfork, ChainHardforks, ForkCondition, Hardfork}; +use alloc::{boxed::Box, format, string::String}; use alloy_chains::Chain; use alloy_primitives::{uint, U256}; use core::{ @@ -9,9 +10,6 @@ use core::{ #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -#[cfg(not(feature = "std"))] -use alloc::{boxed::Box, format, string::String}; - hardfork!( /// The name of an Ethereum hardfork. EthereumHardfork { diff --git a/crates/ethereum-forks/src/hardfork/optimism.rs b/crates/ethereum-forks/src/hardfork/optimism.rs index ae42eef128..87c42f1c01 100644 --- a/crates/ethereum-forks/src/hardfork/optimism.rs +++ b/crates/ethereum-forks/src/hardfork/optimism.rs @@ -1,4 +1,3 @@ -#[cfg(not(feature = "std"))] use alloc::{boxed::Box, format, string::String, vec}; use core::{ any::Any, @@ -16,7 +15,8 @@ use crate::{hardfork, ChainHardforks, EthereumHardfork, ForkCondition, Hardfork} hardfork!( /// The name of an optimism hardfork. /// - /// When building a list of hardforks for a chain, it's still expected to mix with [`EthereumHardfork`]. + /// When building a list of hardforks for a chain, it's still expected to mix with + /// [`EthereumHardfork`]. OptimismHardfork { /// Bedrock: . Bedrock, diff --git a/crates/ethereum-forks/src/hardforks/mod.rs b/crates/ethereum-forks/src/hardforks/mod.rs index aa58890421..f62de2d7f3 100644 --- a/crates/ethereum-forks/src/hardforks/mod.rs +++ b/crates/ethereum-forks/src/hardforks/mod.rs @@ -16,7 +16,8 @@ use rustc_hash::FxHashMap; use std::collections::hash_map::Entry; #[cfg(not(feature = "std"))] -use alloc::{boxed::Box, collections::btree_map::Entry, vec::Vec}; +use alloc::collections::btree_map::Entry; +use alloc::{boxed::Box, vec::Vec}; /// Generic trait over a set of ordered hardforks pub trait Hardforks: Default + Clone { @@ -71,7 +72,7 @@ impl ChainHardforks { /// Retrieves [`ForkCondition`] from `fork`. If `fork` is not present, returns /// [`ForkCondition::Never`]. pub fn fork(&self, fork: H) -> ForkCondition { - self.get(fork).unwrap_or(ForkCondition::Never) + self.get(fork).unwrap_or_default() } /// Retrieves [`ForkCondition`] from `fork` if it exists, otherwise `None`. diff --git a/crates/ethereum-forks/src/hardforks/optimism.rs b/crates/ethereum-forks/src/hardforks/optimism.rs index e6010451f7..54fcff33c6 100644 --- a/crates/ethereum-forks/src/hardforks/optimism.rs +++ b/crates/ethereum-forks/src/hardforks/optimism.rs @@ -8,6 +8,21 @@ pub trait OptimismHardforks: EthereumHardforks { self.fork(OptimismHardfork::Bedrock).active_at_block(block_number) } + /// Returns `true` if [`Ecotone`](OptimismHardfork::Ecotone) is active at given block timestamp. + fn is_ecotone_active_at_timestamp(&self, timestamp: u64) -> bool { + self.fork(OptimismHardfork::Ecotone).active_at_timestamp(timestamp) + } + + /// Returns `true` if [`Ecotone`](OptimismHardfork::Ecotone) is active at given block timestamp. + fn is_fjord_active_at_timestamp(&self, timestamp: u64) -> bool { + self.fork(OptimismHardfork::Ecotone).active_at_timestamp(timestamp) + } + + /// Returns `true` if [`Granite`](OptimismHardfork::Granite) is active at given block timestamp. + fn is_granite_active_at_timestamp(&self, timestamp: u64) -> bool { + self.fork(OptimismHardfork::Granite).active_at_timestamp(timestamp) + } + /// Convenience method to check if [`OptimismHardfork::Wright`] is active at a given block /// number. fn is_wright_active_at_timestamp(&self, timestamp: u64) -> bool { diff --git a/crates/ethereum-forks/src/lib.rs b/crates/ethereum-forks/src/lib.rs index 98ff7e36a3..95008a52c4 100644 --- a/crates/ethereum-forks/src/lib.rs +++ b/crates/ethereum-forks/src/lib.rs @@ -15,7 +15,6 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![cfg_attr(not(feature = "std"), no_std)] -#[cfg(not(feature = "std"))] extern crate alloc; mod display; diff --git a/crates/ethereum/cli/Cargo.toml b/crates/ethereum/cli/Cargo.toml index c5a7e60d57..5997a3add6 100644 --- a/crates/ethereum/cli/Cargo.toml +++ b/crates/ethereum/cli/Cargo.toml @@ -19,5 +19,4 @@ alloy-genesis.workspace = true eyre.workspace = true shellexpand.workspace = true -serde_json.workspace = true -clap = { workspace = true, features = ["derive", "env"] } \ No newline at end of file +serde_json.workspace = true \ No newline at end of file diff --git a/crates/ethereum/cli/src/chainspec.rs b/crates/ethereum/cli/src/chainspec.rs index a092994ae9..cbbafe490a 100644 --- a/crates/ethereum/cli/src/chainspec.rs +++ b/crates/ethereum/cli/src/chainspec.rs @@ -1,8 +1,7 @@ use alloy_genesis::Genesis; -use clap::{builder::TypedValueParser, error::Result, Arg, Command}; use reth_chainspec::{ChainSpec, DEV, HOLESKY, MAINNET, SEPOLIA}; use reth_cli::chainspec::ChainSpecParser; -use std::{ffi::OsStr, fs, path::PathBuf, sync::Arc}; +use std::{fs, path::PathBuf, sync::Arc}; /// Clap value parser for [`ChainSpec`]s. /// @@ -40,7 +39,9 @@ fn chain_value_parser(s: &str) -> eyre::Result, eyre::Error> { #[derive(Debug, Clone, Default)] pub struct EthChainSpecParser; -impl ChainSpecParser for EthChainSpecParser { +impl ChainSpecParser for EthChainSpecParser { + type ChainSpec = ChainSpec; + const SUPPORTED_CHAINS: &'static [&'static str] = &["mainnet", "sepolia", "holesky", "dev"]; fn parse(s: &str) -> eyre::Result> { @@ -48,43 +49,68 @@ impl ChainSpecParser for EthChainSpecParser { } } -impl TypedValueParser for EthChainSpecParser { - type Value = Arc; - - fn parse_ref( - &self, - _cmd: &Command, - arg: Option<&Arg>, - value: &OsStr, - ) -> Result { - let val = - value.to_str().ok_or_else(|| clap::Error::new(clap::error::ErrorKind::InvalidUtf8))?; - >::parse(val).map_err(|err| { - let arg = arg.map(|a| a.to_string()).unwrap_or_else(|| "...".to_owned()); - let possible_values = Self::SUPPORTED_CHAINS.join(","); - let msg = format!( - "Invalid value '{val}' for {arg}: {err}.\n [possible values: {possible_values}]" - ); - clap::Error::raw(clap::error::ErrorKind::InvalidValue, msg) - }) - } - - fn possible_values( - &self, - ) -> Option + '_>> { - let values = Self::SUPPORTED_CHAINS.iter().map(clap::builder::PossibleValue::new); - Some(Box::new(values)) - } -} - #[cfg(test)] mod tests { use super::*; + use reth_chainspec::EthereumHardforks; #[test] fn parse_known_chain_spec() { for &chain in EthChainSpecParser::SUPPORTED_CHAINS { - assert!(>::parse(chain).is_ok()); + assert!(::parse(chain).is_ok()); } } + + #[test] + fn parse_raw_chainspec_hardforks() { + let s = r#"{ + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "uncleHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "coinbase": "0x0000000000000000000000000000000000000000", + "stateRoot": "0x76f118cb05a8bc558388df9e3b4ad66ae1f17ef656e5308cb8f600717251b509", + "transactionsTrie": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "receiptTrie": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "bloom": "0x000...000", + "difficulty": "0x00", + "number": "0x00", + "gasLimit": "0x016345785d8a0000", + "gasUsed": "0x00", + "timestamp": "0x01", + "extraData": "0x00", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "nonce": "0x0000000000000000", + "baseFeePerGas": "0x07", + "withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "blobGasUsed": "0x00", + "excessBlobGas": "0x00", + "parentBeaconBlockRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", + "requestsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "hash": "0xc20e1a771553139cdc77e6c3d5f64a7165d972d327eee9632c9c7d0fe839ded4", + "alloc": {}, + "config": { + "ethash": {}, + "chainId": 1, + "homesteadBlock": 0, + "daoForkSupport": true, + "eip150Block": 0, + "eip155Block": 0, + "eip158Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 0, + "berlinBlock": 0, + "londonBlock": 0, + "terminalTotalDifficulty": 0, + "shanghaiTime": 0, + "cancunTime": 0, + "pragueTime": 0 + } +}"#; + + let spec = ::parse(s).unwrap(); + assert!(spec.hardforks.is_shanghai_active_at_timestamp(0)); + assert!(spec.hardforks.is_cancun_active_at_timestamp(0)); + assert!(spec.hardforks.is_prague_active_at_timestamp(0)); + } } diff --git a/crates/ethereum/consensus/src/lib.rs b/crates/ethereum/consensus/src/lib.rs index 6e4ca4825b..2c93958e8f 100644 --- a/crates/ethereum/consensus/src/lib.rs +++ b/crates/ethereum/consensus/src/lib.rs @@ -22,6 +22,9 @@ use reth_primitives::{ }; use std::{sync::Arc, time::SystemTime}; +/// The bound divisor of the gas limit, used in update calculations. +const GAS_LIMIT_BOUND_DIVISOR: u64 = 1024; + mod validation; pub use validation::validate_block_post_execution; @@ -43,7 +46,7 @@ impl EthBeaconConsensus { /// Checks the gas limit for consistency between parent and self headers. /// /// The maximum allowable difference between self and parent gas limits is determined by the - /// parent's gas limit divided by the elasticity multiplier (1024). + /// parent's gas limit divided by the [`GAS_LIMIT_BOUND_DIVISOR`]. fn validate_against_parent_gas_limit( &self, header: &SealedHeader, @@ -62,7 +65,7 @@ impl EthBeaconConsensus { // Check for an increase in gas limit beyond the allowed threshold. if header.gas_limit > parent_gas_limit { - if header.gas_limit - parent_gas_limit >= parent_gas_limit / 1024 { + if header.gas_limit - parent_gas_limit >= parent_gas_limit / GAS_LIMIT_BOUND_DIVISOR { return Err(ConsensusError::GasLimitInvalidIncrease { parent_gas_limit, child_gas_limit: header.gas_limit, @@ -70,7 +73,8 @@ impl EthBeaconConsensus { } } // Check for a decrease in gas limit beyond the allowed threshold. - else if parent_gas_limit - header.gas_limit >= parent_gas_limit / 1024 { + else if parent_gas_limit - header.gas_limit >= parent_gas_limit / GAS_LIMIT_BOUND_DIVISOR + { return Err(ConsensusError::GasLimitInvalidDecrease { parent_gas_limit, child_gas_limit: header.gas_limit, @@ -230,7 +234,7 @@ mod tests { #[test] fn test_valid_gas_limit_increase() { - let parent = header_with_gas_limit(1024 * 10); + let parent = header_with_gas_limit(GAS_LIMIT_BOUND_DIVISOR * 10); let child = header_with_gas_limit(parent.gas_limit + 5); assert_eq!( @@ -254,8 +258,10 @@ mod tests { #[test] fn test_invalid_gas_limit_increase_exceeding_limit() { - let parent = header_with_gas_limit(1024 * 10); - let child = header_with_gas_limit(parent.gas_limit + parent.gas_limit / 1024 + 1); + let parent = header_with_gas_limit(GAS_LIMIT_BOUND_DIVISOR * 10); + let child = header_with_gas_limit( + parent.gas_limit + parent.gas_limit / GAS_LIMIT_BOUND_DIVISOR + 1, + ); assert_eq!( EthBeaconConsensus::new(Arc::new(ChainSpec::default())) @@ -269,7 +275,7 @@ mod tests { #[test] fn test_valid_gas_limit_decrease_within_limit() { - let parent = header_with_gas_limit(1024 * 10); + let parent = header_with_gas_limit(GAS_LIMIT_BOUND_DIVISOR * 10); let child = header_with_gas_limit(parent.gas_limit - 5); assert_eq!( @@ -281,8 +287,10 @@ mod tests { #[test] fn test_invalid_gas_limit_decrease_exceeding_limit() { - let parent = header_with_gas_limit(1024 * 10); - let child = header_with_gas_limit(parent.gas_limit - parent.gas_limit / 1024 - 1); + let parent = header_with_gas_limit(GAS_LIMIT_BOUND_DIVISOR * 10); + let child = header_with_gas_limit( + parent.gas_limit - parent.gas_limit / GAS_LIMIT_BOUND_DIVISOR - 1, + ); assert_eq!( EthBeaconConsensus::new(Arc::new(ChainSpec::default())) diff --git a/crates/ethereum/engine-primitives/Cargo.toml b/crates/ethereum/engine-primitives/Cargo.toml index 8a1f258089..8785b96f63 100644 --- a/crates/ethereum/engine-primitives/Cargo.toml +++ b/crates/ethereum/engine-primitives/Cargo.toml @@ -13,14 +13,13 @@ workspace = true [dependencies] # reth reth-chainspec.workspace = true -reth-evm-ethereum.workspace = true reth-primitives.workspace = true reth-engine-primitives.workspace = true reth-payload-primitives.workspace = true reth-rpc-types.workspace = true reth-rpc-types-compat.workspace = true -revm-primitives.workspace = true alloy-rlp.workspace = true +reth-chain-state.workspace = true # misc serde.workspace = true diff --git a/crates/ethereum/engine-primitives/src/lib.rs b/crates/ethereum/engine-primitives/src/lib.rs index fe4a050fa4..5554beea31 100644 --- a/crates/ethereum/engine-primitives/src/lib.rs +++ b/crates/ethereum/engine-primitives/src/lib.rs @@ -1,4 +1,4 @@ -//! Ethereum specific +//! Ethereum specific engine API types and impls. #![doc( html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", diff --git a/crates/ethereum/engine-primitives/src/payload.rs b/crates/ethereum/engine-primitives/src/payload.rs index 45514d4d44..c3edab111f 100644 --- a/crates/ethereum/engine-primitives/src/payload.rs +++ b/crates/ethereum/engine-primitives/src/payload.rs @@ -1,13 +1,9 @@ //! Contains types required for building a payload. use alloy_rlp::Encodable; -use reth_chainspec::ChainSpec; -use reth_evm_ethereum::revm_spec_by_timestamp_after_merge; +use reth_chain_state::ExecutedBlock; use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes}; -use reth_primitives::{ - constants::EIP1559_INITIAL_BASE_FEE, Address, BlobTransactionSidecar, EthereumHardfork, Header, - SealedBlock, Withdrawals, B256, U256, -}; +use reth_primitives::{Address, BlobTransactionSidecar, SealedBlock, Withdrawals, B256, U256}; use reth_rpc_types::engine::{ ExecutionPayloadEnvelopeV2, ExecutionPayloadEnvelopeV3, ExecutionPayloadEnvelopeV4, ExecutionPayloadV1, PayloadAttributes, PayloadId, @@ -16,7 +12,6 @@ use reth_rpc_types_compat::engine::payload::{ block_to_payload_v1, block_to_payload_v3, block_to_payload_v4, convert_block_to_payload_field_v2, }; -use revm_primitives::{BlobExcessGasAndPrice, BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, SpecId}; use std::convert::Infallible; /// Contains the built payload. @@ -30,6 +25,8 @@ pub struct EthBuiltPayload { pub(crate) id: PayloadId, /// The built block pub(crate) block: SealedBlock, + /// Block execution data for the payload, if any. + pub(crate) executed_block: Option, /// The fees of the block pub(crate) fees: U256, /// The blobs, proofs, and commitments in the block. If the block is pre-cancun, this will be @@ -41,8 +38,13 @@ pub struct EthBuiltPayload { impl EthBuiltPayload { /// Initializes the payload with the given initial block. - pub const fn new(id: PayloadId, block: SealedBlock, fees: U256) -> Self { - Self { id, block, fees, sidecars: Vec::new() } + pub const fn new( + id: PayloadId, + block: SealedBlock, + fees: U256, + executed_block: Option, + ) -> Self { + Self { id, block, executed_block, fees, sidecars: Vec::new() } } /// Returns the identifier of the payload. @@ -79,6 +81,10 @@ impl BuiltPayload for EthBuiltPayload { fn fees(&self) -> U256 { self.fees } + + fn executed_block(&self) -> Option { + self.executed_block.clone() + } } impl<'a> BuiltPayload for &'a EthBuiltPayload { @@ -89,6 +95,10 @@ impl<'a> BuiltPayload for &'a EthBuiltPayload { fn fees(&self) -> U256 { (**self).fees() } + + fn executed_block(&self) -> Option { + self.executed_block.clone() + } } // V1 engine_getPayloadV1 response @@ -234,65 +244,6 @@ impl PayloadBuilderAttributes for EthPayloadBuilderAttributes { fn withdrawals(&self) -> &Withdrawals { &self.withdrawals } - - fn cfg_and_block_env( - &self, - chain_spec: &ChainSpec, - parent: &Header, - ) -> (CfgEnvWithHandlerCfg, BlockEnv) { - // configure evm env based on parent block - let cfg = CfgEnv::default().with_chain_id(chain_spec.chain().id()); - - // ensure we're not missing any timestamp based hardforks - let spec_id = revm_spec_by_timestamp_after_merge(chain_spec, self.timestamp()); - - // if the parent block did not have excess blob gas (i.e. it was pre-cancun), but it is - // cancun now, we need to set the excess blob gas to the default value - let blob_excess_gas_and_price = parent - .next_block_excess_blob_gas() - .or_else(|| { - if spec_id == SpecId::CANCUN { - // default excess blob gas is zero - Some(0) - } else { - None - } - }) - .map(BlobExcessGasAndPrice::new); - - let mut basefee = - parent.next_block_base_fee(chain_spec.base_fee_params_at_timestamp(self.timestamp())); - - let mut gas_limit = U256::from(parent.gas_limit); - - // If we are on the London fork boundary, we need to multiply the parent's gas limit by the - // elasticity multiplier to get the new gas limit. - if chain_spec.fork(EthereumHardfork::London).transitions_at_block(parent.number + 1) { - let elasticity_multiplier = - chain_spec.base_fee_params_at_timestamp(self.timestamp()).elasticity_multiplier; - - // multiply the gas limit by the elasticity multiplier - gas_limit *= U256::from(elasticity_multiplier); - - // set the base fee to the initial base fee from the EIP-1559 spec - basefee = Some(EIP1559_INITIAL_BASE_FEE) - } - - let block_env = BlockEnv { - number: U256::from(parent.number + 1), - coinbase: self.suggested_fee_recipient(), - timestamp: U256::from(self.timestamp()), - difficulty: U256::ZERO, - prevrandao: Some(self.prev_randao()), - gas_limit, - // calculate basefee based on parent block's gas usage - basefee: basefee.map(U256::from).unwrap_or_default(), - // calculate excess gas based on parent block's blob gas usage - blob_excess_gas_and_price, - }; - - (CfgEnvWithHandlerCfg::new_with_spec_id(cfg, spec_id), block_env) - } } /// Generates the payload id for the configured payload from the [`PayloadAttributes`]. @@ -322,94 +273,10 @@ pub(crate) fn payload_id(parent: &B256, attributes: &PayloadAttributes) -> Paylo #[cfg(test)] mod tests { use super::*; - use reth_primitives::Genesis; #[test] - fn ensure_first_london_block_base_fee_is_set() { - let hive_london = r#" -{ - "config": { - "ethash": {}, - "chainId": 7, - "homesteadBlock": 0, - "eip150Block": 0, - "eip155Block": 0, - "eip158Block": 0, - "byzantiumBlock": 0, - "constantinopleBlock": 0, - "petersburgBlock": 0, - "istanbulBlock": 0, - "muirGlacierBlock": 0, - "berlinBlock": 0, - "londonBlock": 1, - "mergeNetsplitBlock": 1, - "terminalTotalDifficulty": 196608, - "shanghaiTime": 4662, - "cancunTime": 4662 - }, - "nonce": "0x0", - "timestamp": "0x1234", - "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000658bdf435d810c91414ec09147daa6db624063790000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "gasLimit": "0x2fefd8", - "difficulty": "0x30000", - "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "coinbase": "0x0000000000000000000000000000000000000000", - "alloc": { - "0000000000000000000000000000000000000314": { - "code": "0x60606040526000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff168063a223e05d1461006a578063abd1a0cf1461008d578063abfced1d146100d4578063e05c914a14610110578063e6768b451461014c575b610000565b346100005761007761019d565b6040518082815260200191505060405180910390f35b34610000576100be600480803573ffffffffffffffffffffffffffffffffffffffff169060200190919050506101a3565b6040518082815260200191505060405180910390f35b346100005761010e600480803573ffffffffffffffffffffffffffffffffffffffff169060200190919080359060200190919050506101ed565b005b346100005761014a600480803590602001909190803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610236565b005b346100005761017960048080359060200190919080359060200190919080359060200190919050506103c4565b60405180848152602001838152602001828152602001935050505060405180910390f35b60005481565b6000600160008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205490505b919050565b80600160008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055505b5050565b7f6031a8d62d7c95988fa262657cd92107d90ed96e08d8f867d32f26edfe85502260405180905060405180910390a17f47e2689743f14e97f7dcfa5eec10ba1dff02f83b3d1d4b9c07b206cbbda66450826040518082815260200191505060405180910390a1817fa48a6b249a5084126c3da369fbc9b16827ead8cb5cdc094b717d3f1dcd995e2960405180905060405180910390a27f7890603b316f3509577afd111710f9ebeefa15e12f72347d9dffd0d65ae3bade81604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390a18073ffffffffffffffffffffffffffffffffffffffff167f7efef9ea3f60ddc038e50cccec621f86a0195894dc0520482abf8b5c6b659e4160405180905060405180910390a28181604051808381526020018273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019250505060405180910390a05b5050565b6000600060008585859250925092505b935093509390505600a165627a7a72305820aaf842d0d0c35c45622c5263cbb54813d2974d3999c8c38551d7c613ea2bc1170029", - "storage": { - "0x0000000000000000000000000000000000000000000000000000000000000000": "0x0000000000000000000000000000000000000000000000000000000000001234", - "0x6661e9d6d8b923d5bbaab1b96e1dd51ff6ea2a93520fdc9eb75d059238b8c5e9": "0x0000000000000000000000000000000000000000000000000000000000000001" - }, - "balance": "0x0" - }, - "0000000000000000000000000000000000000315": { - "code": "0x60606040526000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff168063ef2769ca1461003e575b610000565b3461000057610078600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803590602001909190505061007a565b005b8173ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051809050600060405180830381858888f1935050505015610106578173ffffffffffffffffffffffffffffffffffffffff167f30a3c50752f2552dcc2b93f5b96866280816a986c0c0408cb6778b9fa198288f826040518082815260200191505060405180910390a25b5b50505600a165627a7a72305820637991fabcc8abad4294bf2bb615db78fbec4edff1635a2647d3894e2daf6a610029", - "balance": "0x9999999999999999999999999999999" - }, - "0000000000000000000000000000000000000316": { - "code": "0x444355", - "balance": "0x0" - }, - "0000000000000000000000000000000000000317": { - "code": "0x600160003555", - "balance": "0x0" - }, - "000f3df6d732807ef1319fb7b8bb8522d0beac02": { - "code": "0x3373fffffffffffffffffffffffffffffffffffffffe14604d57602036146024575f5ffd5b5f35801560495762001fff810690815414603c575f5ffd5b62001fff01545f5260205ff35b5f5ffd5b62001fff42064281555f359062001fff015500", - "balance": "0x0", - "nonce": "0x1" - } - }, - "number": "0x0", - "gasUsed": "0x0", - "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "baseFeePerGas": null, - "excessBlobGas": null, - "blobGasUsed": null -} - "#; - + fn attributes_serde() { let attributes = r#"{"timestamp":"0x1235","prevRandao":"0xf343b00e02dc34ec0124241f74f32191be28fb370bb48060f5fa4df99bda774c","suggestedFeeRecipient":"0x0000000000000000000000000000000000000000","withdrawals":null,"parentBeaconBlockRoot":null}"#; - let attributes: PayloadAttributes = serde_json::from_str(attributes).unwrap(); - - // check that it deserializes properly - let genesis: Genesis = serde_json::from_str(hive_london).unwrap(); - let chainspec = ChainSpec::from(genesis); - let payload_builder_attributes = - EthPayloadBuilderAttributes::new(chainspec.genesis_hash(), attributes); - - // use cfg_and_block_env - let cfg_and_block_env = - payload_builder_attributes.cfg_and_block_env(&chainspec, &chainspec.genesis_header()); - - // ensure the base fee is non zero - assert_eq!(cfg_and_block_env.1.basefee, U256::from(EIP1559_INITIAL_BASE_FEE)); - - // ensure the gas limit is double the previous block's gas limit - assert_eq!( - cfg_and_block_env.1.gas_limit, - U256::from(chainspec.genesis_header().gas_limit * 2) - ); + let _attributes: PayloadAttributes = serde_json::from_str(attributes).unwrap(); } } diff --git a/crates/ethereum/evm/src/config.rs b/crates/ethereum/evm/src/config.rs index 77082b1f7d..e5253307b3 100644 --- a/crates/ethereum/evm/src/config.rs +++ b/crates/ethereum/evm/src/config.rs @@ -1,9 +1,11 @@ use reth_chainspec::{ChainSpec, EthereumHardforks}; use reth_ethereum_forks::{EthereumHardfork, Head}; -/// Returns the spec id at the given timestamp. +/// Returns the revm [`SpecId`](revm_primitives::SpecId) at the given timestamp. /// -/// Note: This is only intended to be used after the merge, when hardforks are activated by +/// # Note +/// +/// This is only intended to be used after the merge, when hardforks are activated by /// timestamp. pub fn revm_spec_by_timestamp_after_merge( chain_spec: &ChainSpec, @@ -20,7 +22,7 @@ pub fn revm_spec_by_timestamp_after_merge( } } -/// return `revm_spec` from spec configuration. +/// Map the latest active hardfork at the given block to a revm [`SpecId`](revm_primitives::SpecId). pub fn revm_spec(chain_spec: &ChainSpec, block: &Head) -> revm_primitives::SpecId { if chain_spec.fork(EthereumHardfork::Prague).active_at_head(block) { revm_primitives::PRAGUE diff --git a/crates/ethereum/evm/src/eip6110.rs b/crates/ethereum/evm/src/eip6110.rs index 722c38da76..605427276a 100644 --- a/crates/ethereum/evm/src/eip6110.rs +++ b/crates/ethereum/evm/src/eip6110.rs @@ -1,4 +1,5 @@ //! EIP-6110 deposit requests parsing +use alloc::{string::ToString, vec::Vec}; use alloy_eips::eip6110::{DepositRequest, MAINNET_DEPOSIT_CONTRACT_ADDRESS}; use alloy_sol_types::{sol, SolEvent}; use reth_chainspec::ChainSpec; @@ -6,9 +7,6 @@ use reth_evm::execute::BlockValidationError; use reth_primitives::{Receipt, Request}; use revm_primitives::Log; -#[cfg(not(feature = "std"))] -use alloc::{string::ToString, vec::Vec}; - sol! { #[allow(missing_docs)] event DepositEvent( diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index 19c24fbade..4121163430 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -4,6 +4,7 @@ use crate::{ dao_fork::{DAO_HARDFORK_BENEFICIARY, DAO_HARDKFORK_ACCOUNTS}, EthEvmConfig, }; +use alloc::{boxed::Box, sync::Arc, vec, vec::Vec}; use core::fmt::Display; use reth_chainspec::{ChainSpec, EthereumHardforks, MAINNET}; use reth_ethereum_consensus::validate_block_post_execution; @@ -24,21 +25,22 @@ use reth_primitives::{ }; use reth_prune_types::PruneModes; use reth_revm::{ - batch::BlockBatchRecord, db::states::bundle_state::BundleRetention, - state_change::post_block_balance_increments, Evm, State, + batch::BlockBatchRecord, + db::{ + states::{bundle_state::BundleRetention, StorageSlot}, + BundleAccount, State, + }, + state_change::post_block_balance_increments, + Evm, }; use revm_primitives::{ db::{Database, DatabaseCommit}, - BlockEnv, CfgEnvWithHandlerCfg, EVMError, EnvWithHandlerCfg, EvmState, ResultAndState, + BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, EvmState, ResultAndState, }; +use std::collections::hash_map::Entry; use tokio::sync::mpsc::UnboundedSender; use tracing::debug; -#[cfg(not(feature = "std"))] -use alloc::{boxed::Box, sync::Arc, vec, vec::Vec}; -#[cfg(feature = "std")] -use std::sync::Arc; - /// Provides executors to execute regular ethereum blocks #[derive(Debug, Clone)] pub struct EthExecutorProvider { @@ -49,7 +51,7 @@ pub struct EthExecutorProvider { impl EthExecutorProvider { /// Creates a new default ethereum executor provider. pub fn ethereum(chain_spec: Arc) -> Self { - Self::new(chain_spec, Default::default()) + Self::new(chain_spec.clone(), EthEvmConfig::new(chain_spec)) } /// Returns a new provider for the mainnet. @@ -67,7 +69,7 @@ impl EthExecutorProvider { impl EthExecutorProvider where - EvmConfig: ConfigureEvm, + EvmConfig: ConfigureEvm
, { fn eth_executor( &self, @@ -104,7 +106,7 @@ where impl BlockExecutorProvider for EthExecutorProvider where - EvmConfig: ConfigureEvm, + EvmConfig: ConfigureEvm
, { type Executor + Display>> = EthBlockExecutor; @@ -151,7 +153,7 @@ struct EthEvmExecutor { impl EthEvmExecutor where - EvmConfig: ConfigureEvm, + EvmConfig: ConfigureEvm
, { /// Executes the transactions in the block and returns the receipts of the transactions in the /// block, the total gas used and the list of EIP-7685 [requests](Request). @@ -210,13 +212,7 @@ where // Execute transaction. let ResultAndState { result, state } = evm.transact().map_err(move |err| { - let new_err = match err { - EVMError::Transaction(e) => EVMError::Transaction(e), - EVMError::Header(e) => EVMError::Header(e), - EVMError::Database(e) => EVMError::Database(e.into()), - EVMError::Custom(e) => EVMError::Custom(e), - EVMError::Precompile(e) => EVMError::Precompile(e), - }; + let new_err = err.map_db_err(|e| e.into()); // Ensure hash is calculated for error log, if not already done BlockValidationError::EVM { hash: transaction.recalculate_hash(), @@ -318,7 +314,7 @@ impl EthBlockExecutor { impl EthBlockExecutor where - EvmConfig: ConfigureEvm, + EvmConfig: ConfigureEvm
, DB: Database + Display>, { /// Configures a new evm configuration and block environment for the given block. @@ -332,7 +328,6 @@ where self.executor.evm_config.fill_cfg_and_block_env( &mut cfg, &mut block_env, - self.chain_spec(), header, total_difficulty, ); @@ -408,7 +403,7 @@ where impl Executor for EthBlockExecutor where - EvmConfig: ConfigureEvm, + EvmConfig: ConfigureEvm
, DB: Database + Display>, { type Input<'a> = BlockExecutionInput<'a, BlockWithSenders, Header>; @@ -438,6 +433,94 @@ where } } +/// An executor that retains all cache state from execution in its bundle state. +#[derive(Debug)] +pub struct BlockAccessListExecutor { + /// The executor used to execute single blocks + /// + /// All state changes are committed to the [State]. + executor: EthBlockExecutor, +} + +impl Executor for BlockAccessListExecutor +where + EvmConfig: ConfigureEvm
, + DB: Database + Display>, +{ + type Input<'a> = BlockExecutionInput<'a, BlockWithSenders, Header>; + type Output = BlockExecutionOutput; + type Error = BlockExecutionError; + + /// Executes the block and commits the changes to the internal state. + /// + /// Returns the receipts of the transactions in the block. + /// + /// This also returns the accounts from the internal state cache in the bundle state, allowing + /// access to not only the state that changed during execution, but also the state accessed + /// during execution. + /// + /// Returns an error if the block could not be executed or failed verification. + fn execute(mut self, input: Self::Input<'_>) -> Result { + let BlockExecutionInput { block, total_difficulty, .. } = input; + let EthExecuteOutput { receipts, requests, gas_used } = + self.executor.execute_without_verification(block, total_difficulty)?; + + // NOTE: we need to merge keep the reverts for the bundle retention + self.executor.state.merge_transitions(BundleRetention::Reverts); + + // now, ensure each account from the state is included in the bundle state + let mut bundle_state = self.executor.state.take_bundle(); + for (address, account) in self.executor.state.cache.accounts { + // convert all slots, insert all slots + let account_info = account.account_info(); + let account_storage = account.account.map(|a| a.storage).unwrap_or_default(); + + match bundle_state.state.entry(address) { + Entry::Vacant(entry) => { + // we have to add the entire account here + let extracted_storage = account_storage + .into_iter() + .map(|(k, v)| { + (k, StorageSlot { previous_or_original_value: v, present_value: v }) + }) + .collect(); + + let bundle_account = BundleAccount { + info: account_info.clone(), + original_info: account_info, + storage: extracted_storage, + status: account.status, + }; + entry.insert(bundle_account); + } + Entry::Occupied(mut entry) => { + // only add slots that are unchanged + let current_account = entry.get_mut(); + + // iterate over all storage slots, checking keys that are not in the bundle + // state + for (k, v) in account_storage { + if let Entry::Vacant(storage_entry) = current_account.storage.entry(k) { + storage_entry.insert(StorageSlot { + previous_or_original_value: v, + present_value: v, + }); + } + } + } + } + } + + Ok(BlockExecutionOutput { + state: bundle_state, + receipts, + requests, + gas_used, + snapshot: None, + }) + } +} + /// An executor for a batch of blocks. /// /// State changes are tracked until the executor is finalized. @@ -461,7 +544,7 @@ impl EthBatchExecutor { impl BatchExecutor for EthBatchExecutor where - EvmConfig: ConfigureEvm, + EvmConfig: ConfigureEvm
, DB: Database + Display>, { type Input<'a> = BlockExecutionInput<'a, BlockWithSenders, Header>; @@ -574,7 +657,7 @@ mod tests { } fn executor_provider(chain_spec: Arc) -> EthExecutorProvider { - EthExecutorProvider { chain_spec, evm_config: Default::default() } + EthExecutorProvider { evm_config: EthEvmConfig::new(chain_spec.clone()), chain_spec } } #[test] @@ -790,7 +873,7 @@ mod tests { .build(), ); - let mut header = chain_spec.genesis_header(); + let mut header = chain_spec.genesis_header().clone(); let provider = executor_provider(chain_spec); let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); @@ -1020,7 +1103,7 @@ mod tests { .build(), ); - let header = chain_spec.genesis_header(); + let header = chain_spec.genesis_header().clone(); let provider = executor_provider(chain_spec); let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); @@ -1197,7 +1280,7 @@ mod tests { .build(), ); - let mut header = chain_spec.genesis_header(); + let mut header = chain_spec.genesis_header().clone(); header.requests_root = Some(EMPTY_ROOT_HASH); let header_hash = header.hash_slow(); @@ -1361,7 +1444,7 @@ mod tests { let input: Bytes = [&validator_public_key[..], &withdrawal_amount[..]].concat().into(); assert_eq!(input.len(), 56); - let mut header = chain_spec.genesis_header(); + let mut header = chain_spec.genesis_header().clone(); header.gas_limit = 1_500_000; header.gas_used = 134_807; header.receipts_root = @@ -1452,7 +1535,7 @@ mod tests { assert_eq!(input.len(), 56); // Create a genesis block header with a specified gas limit and gas used - let mut header = chain_spec.genesis_header(); + let mut header = chain_spec.genesis_header().clone(); header.gas_limit = 1_500_000; header.gas_used = 134_807; header.receipts_root = diff --git a/crates/ethereum/evm/src/lib.rs b/crates/ethereum/evm/src/lib.rs index bead8ae392..df6ac8bcc2 100644 --- a/crates/ethereum/evm/src/lib.rs +++ b/crates/ethereum/evm/src/lib.rs @@ -9,19 +9,22 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![cfg_attr(not(feature = "std"), no_std)] -#[cfg(not(feature = "std"))] extern crate alloc; +use alloc::vec::Vec; use reth_chainspec::{ChainSpec, Head}; -use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; +use reth_evm::{ConfigureEvm, ConfigureEvmEnv, NextBlockEnvAttributes}; use reth_primitives::{transaction::FillTxEnv, Address, Header, TransactionSigned, U256}; -use revm_primitives::{AnalysisKind, Bytes, CfgEnvWithHandlerCfg, Env, TxEnv, TxKind}; - -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; +use revm_primitives::{ + AnalysisKind, BlobExcessGasAndPrice, BlockEnv, Bytes, CfgEnv, CfgEnvWithHandlerCfg, Env, + SpecId, TxEnv, TxKind, +}; +use std::sync::Arc; mod config; pub use config::{revm_spec, revm_spec_by_timestamp_after_merge}; +use reth_ethereum_forks::EthereumHardfork; +use reth_primitives::constants::EIP1559_INITIAL_BASE_FEE; pub mod execute; @@ -32,34 +35,25 @@ pub mod dao_fork; pub mod eip6110; /// Ethereum-related EVM configuration. -#[derive(Debug, Clone, Copy, Default)] -#[non_exhaustive] -pub struct EthEvmConfig; - -impl ConfigureEvmEnv for EthEvmConfig { - fn fill_cfg_env( - &self, - cfg_env: &mut CfgEnvWithHandlerCfg, - chain_spec: &ChainSpec, - header: &Header, - total_difficulty: U256, - ) { - let spec_id = config::revm_spec( - chain_spec, - &Head { - number: header.number, - timestamp: header.timestamp, - difficulty: header.difficulty, - total_difficulty, - hash: Default::default(), - }, - ); +#[derive(Debug, Clone)] +pub struct EthEvmConfig { + chain_spec: Arc, +} - cfg_env.chain_id = chain_spec.chain().id(); - cfg_env.perf_analyse_created_bytecodes = AnalysisKind::Analyse; +impl EthEvmConfig { + /// Creates a new Ethereum EVM configuration with the given chain spec. + pub const fn new(chain_spec: Arc) -> Self { + Self { chain_spec } + } - cfg_env.handler_cfg.spec_id = spec_id; + /// Returns the chain spec associated with this configuration. + pub fn chain_spec(&self) -> &ChainSpec { + &self.chain_spec } +} + +impl ConfigureEvmEnv for EthEvmConfig { + type Header = Header; fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { transaction.fill_tx_env(tx_env, sender); @@ -104,6 +98,91 @@ impl ConfigureEvmEnv for EthEvmConfig { // disable the base fee check for this call by setting the base fee to zero env.block.basefee = U256::ZERO; } + + fn fill_cfg_env( + &self, + cfg_env: &mut CfgEnvWithHandlerCfg, + header: &Header, + total_difficulty: U256, + ) { + let spec_id = config::revm_spec( + self.chain_spec(), + &Head { + number: header.number, + timestamp: header.timestamp, + difficulty: header.difficulty, + total_difficulty, + hash: Default::default(), + }, + ); + + cfg_env.chain_id = self.chain_spec.chain().id(); + cfg_env.perf_analyse_created_bytecodes = AnalysisKind::Analyse; + + cfg_env.handler_cfg.spec_id = spec_id; + } + + fn next_cfg_and_block_env( + &self, + parent: &Self::Header, + attributes: NextBlockEnvAttributes, + ) -> (CfgEnvWithHandlerCfg, BlockEnv) { + // configure evm env based on parent block + let cfg = CfgEnv::default().with_chain_id(self.chain_spec.chain().id()); + + // ensure we're not missing any timestamp based hardforks + let spec_id = revm_spec_by_timestamp_after_merge(&self.chain_spec, attributes.timestamp); + + // if the parent block did not have excess blob gas (i.e. it was pre-cancun), but it is + // cancun now, we need to set the excess blob gas to the default value + let blob_excess_gas_and_price = parent + .next_block_excess_blob_gas() + .or_else(|| { + if spec_id == SpecId::CANCUN { + // default excess blob gas is zero + Some(0) + } else { + None + } + }) + .map(BlobExcessGasAndPrice::new); + + let mut basefee = parent.next_block_base_fee( + self.chain_spec.base_fee_params_at_timestamp(attributes.timestamp), + ); + + let mut gas_limit = U256::from(parent.gas_limit); + + // If we are on the London fork boundary, we need to multiply the parent's gas limit by the + // elasticity multiplier to get the new gas limit. + if self.chain_spec.fork(EthereumHardfork::London).transitions_at_block(parent.number + 1) { + let elasticity_multiplier = self + .chain_spec + .base_fee_params_at_timestamp(attributes.timestamp) + .elasticity_multiplier; + + // multiply the gas limit by the elasticity multiplier + gas_limit *= U256::from(elasticity_multiplier); + + // set the base fee to the initial base fee from the EIP-1559 spec + basefee = Some(EIP1559_INITIAL_BASE_FEE) + } + + let block_env = BlockEnv { + number: U256::from(parent.number + 1), + coinbase: attributes.suggested_fee_recipient, + timestamp: U256::from(attributes.timestamp), + difficulty: U256::ZERO, + prevrandao: Some(attributes.prev_randao), + gas_limit, + // calculate basefee based on parent block's gas usage + basefee: basefee.map(U256::from).unwrap_or_default(), + // calculate excess gas based on parent block's blob gas usage + blob_excess_gas_and_price, + }; + + (CfgEnvWithHandlerCfg::new_with_spec_id(cfg, spec_id), block_env) + } } impl ConfigureEvm for EthEvmConfig { @@ -115,7 +194,7 @@ impl ConfigureEvm for EthEvmConfig { #[cfg(test)] mod tests { use super::*; - use reth_chainspec::{Chain, ChainSpec}; + use reth_chainspec::{Chain, ChainSpec, MAINNET}; use reth_evm::execute::ProviderError; use reth_primitives::{ revm_primitives::{BlockEnv, CfgEnv, SpecId}, @@ -155,10 +234,9 @@ mod tests { // Use the `EthEvmConfig` to fill the `cfg_env` and `block_env` based on the ChainSpec, // Header, and total difficulty - EthEvmConfig::default().fill_cfg_and_block_env( + EthEvmConfig::new(Arc::new(chain_spec.clone())).fill_cfg_and_block_env( &mut cfg_env, &mut block_env, - &chain_spec, &header, total_difficulty, ); @@ -172,7 +250,7 @@ mod tests { #[allow(clippy::needless_update)] fn test_evm_configure() { // Create a default `EthEvmConfig` - let evm_config = EthEvmConfig::default(); + let evm_config = EthEvmConfig::new(MAINNET.clone()); // Initialize an empty database wrapped in CacheDB let db = CacheDB::>::default(); @@ -210,7 +288,7 @@ mod tests { #[test] #[allow(clippy::needless_update)] fn test_evm_with_env_default_spec() { - let evm_config = EthEvmConfig::default(); + let evm_config = EthEvmConfig::new(MAINNET.clone()); let db = CacheDB::>::default(); @@ -231,7 +309,7 @@ mod tests { #[test] #[allow(clippy::needless_update)] fn test_evm_with_env_custom_cfg() { - let evm_config = EthEvmConfig::default(); + let evm_config = EthEvmConfig::new(MAINNET.clone()); let db = CacheDB::>::default(); @@ -262,7 +340,7 @@ mod tests { #[test] #[allow(clippy::needless_update)] fn test_evm_with_env_custom_block_and_tx() { - let evm_config = EthEvmConfig::default(); + let evm_config = EthEvmConfig::new(MAINNET.clone()); let db = CacheDB::>::default(); @@ -296,7 +374,7 @@ mod tests { #[test] #[allow(clippy::needless_update)] fn test_evm_with_spec_id() { - let evm_config = EthEvmConfig::default(); + let evm_config = EthEvmConfig::new(MAINNET.clone()); let db = CacheDB::>::default(); @@ -319,7 +397,7 @@ mod tests { #[test] #[allow(clippy::needless_update)] fn test_evm_with_inspector() { - let evm_config = EthEvmConfig::default(); + let evm_config = EthEvmConfig::new(MAINNET.clone()); let db = CacheDB::>::default(); @@ -361,7 +439,7 @@ mod tests { #[test] #[allow(clippy::needless_update)] fn test_evm_with_env_and_default_inspector() { - let evm_config = EthEvmConfig::default(); + let evm_config = EthEvmConfig::new(MAINNET.clone()); let db = CacheDB::>::default(); let env_with_handler = EnvWithHandlerCfg::default(); @@ -381,7 +459,7 @@ mod tests { #[test] #[allow(clippy::needless_update)] fn test_evm_with_env_inspector_and_custom_cfg() { - let evm_config = EthEvmConfig::default(); + let evm_config = EthEvmConfig::new(MAINNET.clone()); let db = CacheDB::>::default(); let cfg = CfgEnv::default().with_chain_id(111); @@ -406,7 +484,7 @@ mod tests { #[test] #[allow(clippy::needless_update)] fn test_evm_with_env_inspector_and_custom_block_tx() { - let evm_config = EthEvmConfig::default(); + let evm_config = EthEvmConfig::new(MAINNET.clone()); let db = CacheDB::>::default(); // Create custom block and tx environment @@ -438,7 +516,7 @@ mod tests { #[test] #[allow(clippy::needless_update)] fn test_evm_with_env_inspector_and_spec_id() { - let evm_config = EthEvmConfig::default(); + let evm_config = EthEvmConfig::new(MAINNET.clone()); let db = CacheDB::>::default(); let handler_cfg = HandlerCfg { spec_id: SpecId::CONSTANTINOPLE, ..Default::default() }; diff --git a/crates/ethereum/node/Cargo.toml b/crates/ethereum/node/Cargo.toml index e7a5e70295..145a966833 100644 --- a/crates/ethereum/node/Cargo.toml +++ b/crates/ethereum/node/Cargo.toml @@ -28,6 +28,7 @@ reth-beacon-consensus.workspace = true reth-rpc.workspace = true reth-node-api.workspace = true reth-chainspec.workspace = true +reth-primitives.workspace = true # misc eyre.workspace = true diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index ce1498a15f..050f9b9bc1 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -11,16 +11,17 @@ use reth_ethereum_engine_primitives::{ }; use reth_evm_ethereum::execute::EthExecutorProvider; use reth_network::NetworkHandle; -use reth_node_api::{FullNodeComponents, NodeAddOns}; +use reth_node_api::{ConfigureEvm, FullNodeComponents, NodeAddOns}; use reth_node_builder::{ components::{ ComponentsBuilder, ConsensusBuilder, ExecutorBuilder, NetworkBuilder, PayloadServiceBuilder, PoolBuilder, }, - node::{FullNodeTypes, NodeTypes}, - BuilderContext, ConfigureEvm, Node, PayloadBuilderConfig, PayloadTypes, + node::{FullNodeTypes, NodeTypes, NodeTypesWithEngine}, + BuilderContext, Node, PayloadBuilderConfig, PayloadTypes, }; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; +use reth_primitives::Header; use reth_provider::CanonStateSubscriptions; use reth_rpc::EthApi; use reth_tracing::tracing::{debug, info}; @@ -47,8 +48,8 @@ impl EthereumNode { EthereumConsensusBuilder, > where - Node: FullNodeTypes, - ::Engine: PayloadTypes< + Node: FullNodeTypes>, + ::Engine: PayloadTypes< BuiltPayload = EthBuiltPayload, PayloadAttributes = EthPayloadAttributes, PayloadBuilderAttributes = EthPayloadBuilderAttributes, @@ -57,7 +58,7 @@ impl EthereumNode { ComponentsBuilder::default() .node_types::() .pool(EthereumPoolBuilder::default()) - .payload(EthereumPayloadBuilder::new(EthEvmConfig::default())) + .payload(EthereumPayloadBuilder::default()) .network(EthereumNetworkBuilder::default()) .executor(EthereumExecutorBuilder::default()) .consensus(EthereumConsensusBuilder::default()) @@ -66,10 +67,13 @@ impl EthereumNode { impl NodeTypes for EthereumNode { type Primitives = (); - type Engine = EthEngineTypes; type ChainSpec = ChainSpec; } +impl NodeTypesWithEngine for EthereumNode { + type Engine = EthEngineTypes; +} + /// Add-ons w.r.t. l1 ethereum. #[derive(Debug, Clone)] pub struct EthereumAddOns; @@ -78,9 +82,10 @@ impl NodeAddOns for EthereumAddOns { type EthApi = EthApi; } -impl Node for EthereumNode +impl Node for EthereumNode where - N: FullNodeTypes, + Types: NodeTypesWithEngine, + N: FullNodeTypes, { type ComponentsBuilder = ComponentsBuilder< N, @@ -103,9 +108,10 @@ where #[non_exhaustive] pub struct EthereumExecutorBuilder; -impl ExecutorBuilder for EthereumExecutorBuilder +impl ExecutorBuilder for EthereumExecutorBuilder where - Node: FullNodeTypes, + Types: NodeTypesWithEngine, + Node: FullNodeTypes, { type EVM = EthEvmConfig; type Executor = EthExecutorProvider; @@ -115,8 +121,8 @@ where ctx: &BuilderContext, ) -> eyre::Result<(Self::EVM, Self::Executor)> { let chain_spec = ctx.chain_spec(); - let evm_config = EthEvmConfig::default(); - let executor = EthExecutorProvider::new(chain_spec, evm_config); + let evm_config = EthEvmConfig::new(ctx.chain_spec()); + let executor = EthExecutorProvider::new(chain_spec, evm_config.clone()); Ok((evm_config, executor)) } @@ -132,9 +138,10 @@ pub struct EthereumPoolBuilder { // TODO add options for txpool args } -impl PoolBuilder for EthereumPoolBuilder +impl PoolBuilder for EthereumPoolBuilder where - Node: FullNodeTypes, + Types: NodeTypesWithEngine, + Node: FullNodeTypes, { type Pool = EthTransactionPool; @@ -198,36 +205,29 @@ where /// A basic ethereum payload service. #[derive(Debug, Default, Clone)] #[non_exhaustive] -pub struct EthereumPayloadBuilder { - /// The EVM configuration to use for the payload builder. - pub evm_config: Evm, -} - -impl EthereumPayloadBuilder { - /// Create a new instance with the given evm config. - pub const fn new(evm_config: EVM) -> Self { - Self { evm_config } - } -} +pub struct EthereumPayloadBuilder; -impl PayloadServiceBuilder for EthereumPayloadBuilder -where - Node: FullNodeTypes, - Evm: ConfigureEvm, - Pool: TransactionPool + Unpin + 'static, - ::Engine: PayloadTypes< - BuiltPayload = EthBuiltPayload, - PayloadAttributes = EthPayloadAttributes, - PayloadBuilderAttributes = EthPayloadBuilderAttributes, - >, -{ - async fn spawn_payload_service( +impl EthereumPayloadBuilder { + /// A helper method initializing [`PayloadBuilderService`] with the given EVM config. + pub fn spawn( self, + evm_config: Evm, ctx: &BuilderContext, pool: Pool, - ) -> eyre::Result> { + ) -> eyre::Result> + where + Types: NodeTypesWithEngine, + Node: FullNodeTypes, + Evm: ConfigureEvm
, + Pool: TransactionPool + Unpin + 'static, + Types::Engine: PayloadTypes< + BuiltPayload = EthBuiltPayload, + PayloadAttributes = EthPayloadAttributes, + PayloadBuilderAttributes = EthPayloadBuilderAttributes, + >, + { let payload_builder = - reth_ethereum_payload_builder::EthereumPayloadBuilder::new(self.evm_config); + reth_ethereum_payload_builder::EthereumPayloadBuilder::new(evm_config); let conf = ctx.payload_builder_config(); let payload_job_config = BasicPayloadJobGeneratorConfig::default() @@ -253,6 +253,26 @@ where } } +impl PayloadServiceBuilder for EthereumPayloadBuilder +where + Types: NodeTypesWithEngine, + Node: FullNodeTypes, + Pool: TransactionPool + Unpin + 'static, + Types::Engine: PayloadTypes< + BuiltPayload = EthBuiltPayload, + PayloadAttributes = EthPayloadAttributes, + PayloadBuilderAttributes = EthPayloadBuilderAttributes, + >, +{ + async fn spawn_payload_service( + self, + ctx: &BuilderContext, + pool: Pool, + ) -> eyre::Result> { + self.spawn(EthEvmConfig::new(ctx.chain_spec()), ctx, pool) + } +} + /// A basic ethereum payload service. #[derive(Debug, Default, Clone, Copy)] pub struct EthereumNetworkBuilder { @@ -284,7 +304,7 @@ pub struct EthereumConsensusBuilder { impl ConsensusBuilder for EthereumConsensusBuilder where - Node: FullNodeTypes, + Node: FullNodeTypes>, { type Consensus = Arc; diff --git a/crates/ethereum/node/tests/it/builder.rs b/crates/ethereum/node/tests/it/builder.rs index 6390dd8549..379f66e814 100644 --- a/crates/ethereum/node/tests/it/builder.rs +++ b/crates/ethereum/node/tests/it/builder.rs @@ -6,6 +6,7 @@ use reth_db::{ test_utils::{create_test_rw_db, TempDatabase}, DatabaseEnv, }; +use reth_node_api::NodeTypesWithDBAdapter; use reth_node_builder::{EngineNodeLauncher, FullNodeComponents, NodeBuilder, NodeConfig}; use reth_node_ethereum::node::{EthereumAddOns, EthereumNode}; use reth_provider::providers::BlockchainProvider2; @@ -46,15 +47,22 @@ async fn test_eth_launcher() { let tasks = TaskManager::current(); let config = NodeConfig::test(); let db = create_test_rw_db(); - let _builder = NodeBuilder::new(config) - .with_database(db) - .with_types_and_provider::>>>() - .with_components(EthereumNode::components()) - .with_add_ons::() - .launch_with_fn(|builder| { - let launcher = EngineNodeLauncher::new(tasks.executor(), builder.config.datadir()); - builder.launch_with(launcher) - }); + let _builder = + NodeBuilder::new(config) + .with_database(db) + .with_types_and_provider::>>, + >>() + .with_components(EthereumNode::components()) + .with_add_ons::() + .launch_with_fn(|builder| { + let launcher = EngineNodeLauncher::new( + tasks.executor(), + builder.config.datadir(), + Default::default(), + ); + builder.launch_with(launcher) + }); } #[test] diff --git a/crates/ethereum/payload/Cargo.toml b/crates/ethereum/payload/Cargo.toml index 4037ca588a..a5f53264c6 100644 --- a/crates/ethereum/payload/Cargo.toml +++ b/crates/ethereum/payload/Cargo.toml @@ -18,12 +18,14 @@ reth-revm.workspace = true reth-transaction-pool.workspace = true reth-provider.workspace = true reth-payload-builder.workspace = true +reth-payload-primitives.workspace = true reth-execution-types.workspace = true reth-basic-payload-builder.workspace = true reth-evm.workspace = true reth-evm-ethereum.workspace = true reth-errors.workspace = true reth-trie.workspace = true +reth-chain-state.workspace = true # ethereum revm.workspace = true diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index 49c2c46b7a..b374645cd5 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -13,6 +13,7 @@ use reth_basic_payload_builder::{ commit_withdrawals, is_better_payload, BuildArguments, BuildOutcome, PayloadBuilder, PayloadConfig, WithdrawalsOutcome, }; +use reth_chain_state::ExecutedBlock; use reth_errors::RethError; use reth_evm::{ system_calls::{ @@ -20,31 +21,34 @@ use reth_evm::{ post_block_withdrawal_requests_contract_call, pre_block_beacon_root_contract_call, pre_block_blockhashes_contract_call, }, - ConfigureEvm, + ConfigureEvm, NextBlockEnvAttributes, }; use reth_evm_ethereum::{eip6110::parse_deposits_from_receipts, EthEvmConfig}; use reth_execution_types::ExecutionOutcome; use reth_payload_builder::{ error::PayloadBuilderError, EthBuiltPayload, EthPayloadBuilderAttributes, }; +use reth_payload_primitives::PayloadBuilderAttributes; use reth_primitives::{ - constants::{ - eip4844::MAX_DATA_GAS_PER_BLOCK, BEACON_NONCE, EMPTY_RECEIPTS, EMPTY_TRANSACTIONS, - }, + constants::{eip4844::MAX_DATA_GAS_PER_BLOCK, BEACON_NONCE}, eip4844::calculate_excess_blob_gas, proofs::{self, calculate_requests_root}, + revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}, Block, EthereumHardforks, Header, IntoRecoveredTransaction, Receipt, EMPTY_OMMER_ROOT_HASH, U256, }; use reth_provider::StateProviderFactory; use reth_revm::database::StateProviderDatabase; -use reth_transaction_pool::{BestTransactionsAttributes, TransactionPool}; +use reth_transaction_pool::{ + noop::NoopTransactionPool, BestTransactionsAttributes, TransactionPool, +}; use reth_trie::HashedPostState; use revm::{ db::states::bundle_state::BundleRetention, primitives::{EVMError, EnvWithHandlerCfg, InvalidTransaction, ResultAndState}, DatabaseCommit, State, }; +use std::sync::Arc; use tracing::{debug, trace, warn}; /// Ethereum payload builder @@ -61,16 +65,30 @@ impl EthereumPayloadBuilder { } } -impl Default for EthereumPayloadBuilder { - fn default() -> Self { - Self::new(EthEvmConfig::default()) +impl EthereumPayloadBuilder +where + EvmConfig: ConfigureEvm
, +{ + /// Returns the configured [`CfgEnvWithHandlerCfg`] and [`BlockEnv`] for the targeted payload + /// (that has the `parent` as its parent). + fn cfg_and_block_env( + &self, + config: &PayloadConfig, + parent: &Header, + ) -> (CfgEnvWithHandlerCfg, BlockEnv) { + let next_attributes = NextBlockEnvAttributes { + timestamp: config.attributes.timestamp(), + suggested_fee_recipient: config.attributes.suggested_fee_recipient(), + prev_randao: config.attributes.prev_randao(), + }; + self.evm_config.next_cfg_and_block_env(parent, next_attributes) } } // Default implementation of [PayloadBuilder] for unit type impl PayloadBuilder for EthereumPayloadBuilder where - EvmConfig: ConfigureEvm, + EvmConfig: ConfigureEvm
, Client: StateProviderFactory, Pool: TransactionPool, { @@ -81,7 +99,8 @@ where &self, args: BuildArguments, ) -> Result, PayloadBuilderError> { - default_ethereum_payload_builder(self.evm_config.clone(), args) + let (cfg_env, block_env) = self.cfg_and_block_env(&args.config, &args.config.parent_block); + default_ethereum_payload(self.evm_config.clone(), args, cfg_env, block_env) } fn build_empty_payload( @@ -89,173 +108,19 @@ where client: &Client, config: PayloadConfig, ) -> Result { - let extra_data = config.extra_data(); - let PayloadConfig { - initialized_block_env, - parent_block, - attributes, - chain_spec, - initialized_cfg, - .. - } = config; - - debug!(target: "payload_builder", parent_hash = ?parent_block.hash(), parent_number = parent_block.number, "building empty payload"); - - let state = client.state_by_block_hash(parent_block.hash()).map_err(|err| { - warn!(target: "payload_builder", - parent_hash=%parent_block.hash(), - %err, - "failed to get state for empty payload" - ); - err - })?; - let mut db = State::builder() - .with_database(StateProviderDatabase::new(state)) - .with_bundle_update() - .build(); - - let base_fee = initialized_block_env.basefee.to::(); - let block_gas_limit = - initialized_block_env.gas_limit.try_into().unwrap_or(chain_spec.max_gas_limit); - - // apply eip-4788 pre block contract call - pre_block_beacon_root_contract_call( - &mut db, - &self.evm_config, - &chain_spec, - &initialized_cfg, - &initialized_block_env, - attributes.parent_beacon_block_root, - ) - .map_err(|err| { - warn!(target: "payload_builder", - parent_hash=%parent_block.hash(), - %err, - "failed to apply beacon root contract call for empty payload" - ); - PayloadBuilderError::Internal(err.into()) - })?; - - // apply eip-2935 blockhashes update - pre_block_blockhashes_contract_call( - &mut db, - &self.evm_config, - &chain_spec, - &initialized_cfg, - &initialized_block_env, - parent_block.hash(), - ) - .map_err(|err| { - warn!(target: "payload_builder", parent_hash=%parent_block.hash(), %err, "failed to update blockhashes for empty payload"); - PayloadBuilderError::Internal(err.into()) - })?; - - let WithdrawalsOutcome { withdrawals_root, withdrawals } = commit_withdrawals( - &mut db, - &chain_spec, - attributes.timestamp, - attributes.withdrawals.clone(), - ) - .map_err(|err| { - warn!(target: "payload_builder", - parent_hash=%parent_block.hash(), - %err, - "failed to commit withdrawals for empty payload" - ); - err - })?; - - // merge all transitions into bundle state, this would apply the withdrawal balance - // changes and 4788 contract call - db.merge_transitions(BundleRetention::PlainState); - - // calculate the state root - let bundle_state = db.take_bundle(); - let state_root = db - .database - .state_root(HashedPostState::from_bundle_state(&bundle_state.state)) - .map_err(|err| { - warn!(target: "payload_builder", - parent_hash=%parent_block.hash(), - %err, - "failed to calculate state root for empty payload" - ); - err - })?; - - let mut excess_blob_gas = None; - let mut blob_gas_used = None; - - if chain_spec.is_cancun_active_at_timestamp(attributes.timestamp) { - excess_blob_gas = if chain_spec.is_cancun_active_at_timestamp(parent_block.timestamp) { - let parent_excess_blob_gas = parent_block.excess_blob_gas.unwrap_or_default(); - let parent_blob_gas_used = parent_block.blob_gas_used.unwrap_or_default(); - Some(calculate_excess_blob_gas(parent_excess_blob_gas, parent_blob_gas_used)) - } else { - // for the first post-fork block, both parent.blob_gas_used and - // parent.excess_blob_gas are evaluated as 0 - Some(calculate_excess_blob_gas(0, 0)) - }; - - blob_gas_used = Some(0); - } - - // Calculate the requests and the requests root. - let (requests, requests_root) = - if chain_spec.is_prague_active_at_timestamp(attributes.timestamp) { - // We do not calculate the EIP-6110 deposit requests because there are no - // transactions in an empty payload. - let withdrawal_requests = post_block_withdrawal_requests_contract_call( - &self.evm_config, - &mut db, - &initialized_cfg, - &initialized_block_env, - ) - .map_err(|err| PayloadBuilderError::Internal(err.into()))?; - let consolidation_requests = post_block_consolidation_requests_contract_call( - &self.evm_config, - &mut db, - &initialized_cfg, - &initialized_block_env, - ) - .map_err(|err| PayloadBuilderError::Internal(err.into()))?; - - let requests = [withdrawal_requests, consolidation_requests].concat(); - let requests_root = calculate_requests_root(&requests); - (Some(requests.into()), Some(requests_root)) - } else { - (None, None) - }; - - let header = Header { - parent_hash: parent_block.hash(), - ommers_hash: EMPTY_OMMER_ROOT_HASH, - beneficiary: initialized_block_env.coinbase, - state_root, - transactions_root: EMPTY_TRANSACTIONS, - withdrawals_root, - receipts_root: EMPTY_RECEIPTS, - logs_bloom: Default::default(), - timestamp: attributes.timestamp, - mix_hash: attributes.prev_randao, - nonce: BEACON_NONCE, - base_fee_per_gas: Some(base_fee), - number: parent_block.number + 1, - gas_limit: block_gas_limit, - difficulty: U256::ZERO, - gas_used: 0, - extra_data, - blob_gas_used, - excess_blob_gas, - parent_beacon_block_root: attributes.parent_beacon_block_root, - requests_root, + let args = BuildArguments { + client, + config, + // we use defaults here because for the empty payload we don't need to execute anything + pool: NoopTransactionPool::default(), + cached_reads: Default::default(), + cancel: Default::default(), + best_payload: None, }; - - let block = - Block { header, body: vec![], ommers: vec![], withdrawals, sidecars: None, requests }; - let sealed_block = block.seal_slow(); - - Ok(EthBuiltPayload::new(attributes.payload_id(), sealed_block, U256::ZERO)) + let (cfg_env, block_env) = self.cfg_and_block_env(&args.config, &args.config.parent_block); + default_ethereum_payload(self.evm_config.clone(), args, cfg_env, block_env)? + .into_payload() + .ok_or_else(|| PayloadBuilderError::MissingPayload) } } @@ -265,12 +130,14 @@ where /// and configuration, this function creates a transaction payload. Returns /// a result indicating success with the payload or an error in case of failure. #[inline] -pub fn default_ethereum_payload_builder( +pub fn default_ethereum_payload( evm_config: EvmConfig, args: BuildArguments, + initialized_cfg: CfgEnvWithHandlerCfg, + initialized_block_env: BlockEnv, ) -> Result, PayloadBuilderError> where - EvmConfig: ConfigureEvm, + EvmConfig: ConfigureEvm
, Client: StateProviderFactory, Pool: TransactionPool, { @@ -280,15 +147,7 @@ where let state = StateProviderDatabase::new(state_provider); let mut db = State::builder().with_database_ref(cached_reads.as_db(state)).with_bundle_update().build(); - let extra_data = config.extra_data(); - let PayloadConfig { - initialized_block_env, - initialized_cfg, - parent_block, - attributes, - chain_spec, - .. - } = config; + let PayloadConfig { parent_block, extra_data, attributes, chain_spec } = config; debug!(target: "payload_builder", id=%attributes.id, parent_hash = ?parent_block.hash(), parent_number = parent_block.number, "building new payload"); let mut cumulative_gas_used = 0; @@ -298,6 +157,7 @@ where let base_fee = initialized_block_env.basefee.to::(); let mut executed_txs = Vec::new(); + let mut executed_senders = Vec::new(); let mut best_txs = pool.best_transactions_with_attributes(BestTransactionsAttributes::new( base_fee, @@ -444,7 +304,8 @@ where .expect("fee is always valid; execution succeeded"); total_fees += U256::from(miner_fee) * U256::from(gas_used); - // append transaction to the list of executed transactions + // append sender and transaction to the respective lists + executed_senders.push(tx.signer()); executed_txs.push(tx.into_signed()); } @@ -487,11 +348,11 @@ where // merge all transitions into bundle state, this would apply the withdrawal balance changes // and 4788 contract call - db.merge_transitions(BundleRetention::PlainState); + db.merge_transitions(BundleRetention::Reverts); let execution_outcome = ExecutionOutcome::new( db.take_bundle(), - vec![receipts].into(), + vec![receipts.clone()].into(), block_number, vec![requests.clone().unwrap_or_default()], ); @@ -500,11 +361,16 @@ where let logs_bloom = execution_outcome.block_logs_bloom(block_number).expect("Number is in range"); // calculate the state root - let state_root = { + let hashed_state = HashedPostState::from_bundle_state(&execution_outcome.state().state); + let (state_root, trie_output) = { let state_provider = db.database.0.inner.borrow_mut(); - state_provider - .db - .state_root(HashedPostState::from_bundle_state(&execution_outcome.state().state))? + state_provider.db.state_root_with_updates(hashed_state.clone()).inspect_err(|err| { + warn!(target: "payload_builder", + parent_hash=%parent_block.hash(), + %err, + "failed to calculate state root for empty payload" + ); + })? }; // create the block header @@ -566,7 +432,16 @@ where let sealed_block = block.seal_slow(); debug!(target: "payload_builder", ?sealed_block, "sealed built block"); - let mut payload = EthBuiltPayload::new(attributes.id, sealed_block, total_fees); + // create the executed block data + let executed = ExecutedBlock { + block: Arc::new(sealed_block.clone()), + senders: Arc::new(executed_senders), + execution_output: Arc::new(execution_outcome), + hashed_state: Arc::new(hashed_state), + trie: Arc::new(trie_output), + }; + + let mut payload = EthBuiltPayload::new(attributes.id, sealed_block, total_fees, Some(executed)); // extend the payload with the blob sidecars from the executed txs payload.extend_sidecars(blob_sidecars); diff --git a/crates/etl/src/lib.rs b/crates/etl/src/lib.rs index 57db6473df..3f978fabee 100644 --- a/crates/etl/src/lib.rs +++ b/crates/etl/src/lib.rs @@ -21,6 +21,9 @@ use std::{ path::{Path, PathBuf}, }; +/// Key len and Value len encode use [`usize::to_be_bytes()`] the length is 8. +const KV_LEN: usize = 8; + use rayon::prelude::*; use reth_db_api::table::{Compress, Encode, Key, Value}; use tempfile::{NamedTempFile, TempDir}; @@ -256,8 +259,8 @@ impl EtlFile { return Ok(None) } - let mut buffer_key_length = [0; 8]; - let mut buffer_value_length = [0; 8]; + let mut buffer_key_length = [0; KV_LEN]; + let mut buffer_value_length = [0; KV_LEN]; self.file.read_exact(&mut buffer_key_length)?; self.file.read_exact(&mut buffer_value_length)?; diff --git a/crates/evm/Cargo.toml b/crates/evm/Cargo.toml index f7ad688d84..5fe402060d 100644 --- a/crates/evm/Cargo.toml +++ b/crates/evm/Cargo.toml @@ -15,8 +15,10 @@ workspace = true reth-chainspec.workspace = true reth-execution-errors.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true revm-primitives.workspace = true reth-prune-types.workspace = true +reth-metrics = { workspace = true, optional = true } reth-storage-errors.workspace = true reth-execution-types.workspace = true @@ -24,6 +26,7 @@ revm.workspace = true alloy-eips.workspace = true auto_impl.workspace = true futures-util.workspace = true +metrics = { workspace = true, optional = true } parking_lot = { workspace = true, optional = true } tokio = { workspace = true, features = ["sync", "time"] } @@ -32,5 +35,5 @@ parking_lot.workspace = true [features] default = ["std"] -std = [] +std = ["dep:metrics", "dep:reth-metrics"] test-utils = ["dep:parking_lot"] diff --git a/crates/evm/execution-errors/src/lib.rs b/crates/evm/execution-errors/src/lib.rs index 1c3c8fa4d0..1113cc83d2 100644 --- a/crates/evm/execution-errors/src/lib.rs +++ b/crates/evm/execution-errors/src/lib.rs @@ -9,14 +9,12 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![cfg_attr(not(feature = "std"), no_std)] -#[cfg(not(feature = "std"))] extern crate alloc; -#[cfg(not(feature = "std"))] -use alloc::{boxed::Box, string::String}; +use alloc::{boxed::Box, string::String}; use alloy_eips::BlockNumHash; use alloy_primitives::B256; -use derive_more::Display; +use derive_more::{Display, From}; use reth_consensus::ConsensusError; use reth_prune_types::PruneSegmentError; use reth_storage_errors::provider::ProviderError; @@ -139,7 +137,7 @@ impl std::error::Error for BlockValidationError { } /// `BlockExecutor` Errors -#[derive(Debug, Display)] +#[derive(Debug, From, Display)] pub enum BlockExecutionError { /// Validation error, transparently wrapping [`BlockValidationError`] Validation(BlockValidationError), @@ -181,24 +179,6 @@ impl BlockExecutionError { } } -impl From for BlockExecutionError { - fn from(error: BlockValidationError) -> Self { - Self::Validation(error) - } -} - -impl From for BlockExecutionError { - fn from(error: ConsensusError) -> Self { - Self::Consensus(error) - } -} - -impl From for BlockExecutionError { - fn from(error: InternalBlockExecutionError) -> Self { - Self::Internal(error) - } -} - impl From for BlockExecutionError { fn from(error: ProviderError) -> Self { InternalBlockExecutionError::from(error).into() @@ -217,9 +197,10 @@ impl std::error::Error for BlockExecutionError { } /// Internal (i.e., not validation or consensus related) `BlockExecutor` Errors -#[derive(Display, Debug)] +#[derive(Display, Debug, From)] pub enum InternalBlockExecutionError { /// Pruning error, transparently wrapping [`PruneSegmentError`] + #[from] Pruning(PruneSegmentError), /// Error when appending chain on fork is not possible #[display( @@ -232,6 +213,7 @@ pub enum InternalBlockExecutionError { other_chain_fork: Box, }, /// Error when fetching latest block state. + #[from] LatestBlock(ProviderError), /// Arbitrary Block Executor Errors #[cfg(feature = "std")] @@ -255,18 +237,6 @@ impl InternalBlockExecutionError { } } -impl From for InternalBlockExecutionError { - fn from(error: PruneSegmentError) -> Self { - Self::Pruning(error) - } -} - -impl From for InternalBlockExecutionError { - fn from(error: ProviderError) -> Self { - Self::LatestBlock(error) - } -} - #[cfg(feature = "std")] impl std::error::Error for InternalBlockExecutionError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { diff --git a/crates/evm/execution-errors/src/trie.rs b/crates/evm/execution-errors/src/trie.rs index e90022526f..306cd6750a 100644 --- a/crates/evm/execution-errors/src/trie.rs +++ b/crates/evm/execution-errors/src/trie.rs @@ -1,15 +1,13 @@ //! Errors when computing the state root. +use alloc::string::ToString; use alloy_primitives::B256; -use derive_more::Display; +use derive_more::{Display, From}; use nybbles::Nibbles; use reth_storage_errors::{db::DatabaseError, provider::ProviderError}; -#[cfg(not(feature = "std"))] -use alloc::string::ToString; - /// State root errors. -#[derive(Display, Debug, PartialEq, Eq, Clone)] +#[derive(Display, Debug, From, PartialEq, Eq, Clone)] pub enum StateRootError { /// Internal database error. Database(DatabaseError), @@ -17,18 +15,6 @@ pub enum StateRootError { StorageRootError(StorageRootError), } -impl From for StateRootError { - fn from(error: DatabaseError) -> Self { - Self::Database(error) - } -} - -impl From for StateRootError { - fn from(error: StorageRootError) -> Self { - Self::StorageRootError(error) - } -} - #[cfg(feature = "std")] impl std::error::Error for StateRootError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { @@ -49,18 +35,12 @@ impl From for DatabaseError { } /// Storage root error. -#[derive(Display, PartialEq, Eq, Clone, Debug)] +#[derive(Display, From, PartialEq, Eq, Clone, Debug)] pub enum StorageRootError { /// Internal database error. Database(DatabaseError), } -impl From for StorageRootError { - fn from(error: DatabaseError) -> Self { - Self::Database(error) - } -} - impl From for DatabaseError { fn from(err: StorageRootError) -> Self { match err { @@ -79,7 +59,7 @@ impl std::error::Error for StorageRootError { } /// State proof errors. -#[derive(Display, Debug, PartialEq, Eq, Clone)] +#[derive(Display, From, Debug, PartialEq, Eq, Clone)] pub enum StateProofError { /// Internal database error. Database(DatabaseError), @@ -87,18 +67,6 @@ pub enum StateProofError { Rlp(alloy_rlp::Error), } -impl From for StateProofError { - fn from(error: DatabaseError) -> Self { - Self::Database(error) - } -} - -impl From for StateProofError { - fn from(error: alloy_rlp::Error) -> Self { - Self::Rlp(error) - } -} - impl From for ProviderError { fn from(value: StateProofError) -> Self { match value { @@ -119,15 +87,14 @@ impl std::error::Error for StateProofError { } /// Trie witness errors. -#[derive(Display, Debug, PartialEq, Eq, Clone)] +#[derive(Display, From, Debug, PartialEq, Eq, Clone)] pub enum TrieWitnessError { /// Error gather proofs. + #[from] Proof(StateProofError), /// RLP decoding error. + #[from] Rlp(alloy_rlp::Error), - /// Missing storage multiproof. - #[display("missing storage multiproof for {_0}")] - MissingStorageMultiProof(B256), /// Missing account. #[display("missing account {_0}")] MissingAccount(B256), @@ -136,18 +103,6 @@ pub enum TrieWitnessError { MissingTargetNode(Nibbles), } -impl From for TrieWitnessError { - fn from(error: StateProofError) -> Self { - Self::Proof(error) - } -} - -impl From for TrieWitnessError { - fn from(error: alloy_rlp::Error) -> Self { - Self::Rlp(error) - } -} - impl From for ProviderError { fn from(error: TrieWitnessError) -> Self { Self::TrieWitnessError(error.to_string()) diff --git a/crates/evm/execution-types/src/chain.rs b/crates/evm/execution-types/src/chain.rs index f6122da519..994a66a52a 100644 --- a/crates/evm/execution-types/src/chain.rs +++ b/crates/evm/execution-types/src/chain.rs @@ -1,12 +1,8 @@ //! Contains [Chain], a chain of blocks and their final state. -#[cfg(not(feature = "std"))] +use crate::ExecutionOutcome; use alloc::{borrow::Cow, collections::BTreeMap}; use core::{fmt, ops::RangeInclusive}; -#[cfg(feature = "std")] -use std::{borrow::Cow, collections::BTreeMap}; - -use crate::ExecutionOutcome; use reth_execution_errors::{BlockExecutionError, InternalBlockExecutionError}; use reth_primitives::{ Address, BlockHash, BlockNumHash, BlockNumber, ForkBlock, Receipt, SealedBlock, diff --git a/crates/evm/execution-types/src/execution_outcome.rs b/crates/evm/execution-types/src/execution_outcome.rs index 0eba320cd7..7ea77b90f5 100644 --- a/crates/evm/execution-types/src/execution_outcome.rs +++ b/crates/evm/execution-types/src/execution_outcome.rs @@ -102,7 +102,7 @@ impl ExecutionOutcome { pub fn new_init( state_init: BundleStateInit, revert_init: RevertsInit, - contracts_init: Vec<(B256, Bytecode)>, + contracts_init: impl IntoIterator, receipts: Receipts, first_block: BlockNumber, requests: Vec, diff --git a/crates/evm/execution-types/src/lib.rs b/crates/evm/execution-types/src/lib.rs index 86abd98de1..8965f04d7c 100644 --- a/crates/evm/execution-types/src/lib.rs +++ b/crates/evm/execution-types/src/lib.rs @@ -8,7 +8,6 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -#[cfg(not(feature = "std"))] extern crate alloc; mod chain; diff --git a/crates/evm/src/builder.rs b/crates/evm/src/builder.rs index e238ba8cca..4b7511494a 100644 --- a/crates/evm/src/builder.rs +++ b/crates/evm/src/builder.rs @@ -1,8 +1,6 @@ //! Builder for creating an EVM with a database and environment. -#[cfg(not(feature = "std"))] use alloc::boxed::Box; - use revm::{inspector_handle_register, Database, Evm, EvmBuilder, GetInspector}; use revm_primitives::EnvWithHandlerCfg; diff --git a/crates/evm/src/lib.rs b/crates/evm/src/lib.rs index 7b52a8accf..569491a1b0 100644 --- a/crates/evm/src/lib.rs +++ b/crates/evm/src/lib.rs @@ -9,14 +9,13 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![cfg_attr(not(feature = "std"), no_std)] -#[cfg(not(feature = "std"))] extern crate alloc; use core::ops::Deref; use crate::builder::RethEvmBuilder; -use reth_chainspec::ChainSpec; -use reth_primitives::{Address, Header, TransactionSigned, TransactionSignedEcRecovered, U256}; +use reth_primitives::{Address, TransactionSigned, TransactionSignedEcRecovered, B256, U256}; +use reth_primitives_traits::BlockHeader; use revm::{Database, Evm, GetInspector}; use revm_primitives::{ BlockEnv, Bytes, CfgEnvWithHandlerCfg, Env, EnvWithHandlerCfg, SpecId, TxEnv, @@ -25,6 +24,8 @@ use revm_primitives::{ pub mod builder; pub mod either; pub mod execute; +#[cfg(feature = "std")] +pub mod metrics; pub mod noop; pub mod provider; pub mod system_calls; @@ -108,6 +109,9 @@ pub trait ConfigureEvm: ConfigureEvmEnv { /// Default trait method implementation is done w.r.t. L1. #[auto_impl::auto_impl(&, Arc)] pub trait ConfigureEvmEnv: Send + Sync + Unpin + Clone + 'static { + /// The header type used by the EVM. + type Header: BlockHeader; + /// Returns a [`TxEnv`] from a [`TransactionSignedEcRecovered`]. fn tx_env(&self, transaction: &TransactionSignedEcRecovered) -> TxEnv { let mut tx_env = TxEnv::default(); @@ -127,48 +131,76 @@ pub trait ConfigureEvmEnv: Send + Sync + Unpin + Clone + 'static { data: Bytes, ); - /// Fill [`CfgEnvWithHandlerCfg`] fields according to the chain spec and given header + /// Fill [`CfgEnvWithHandlerCfg`] fields according to the chain spec and given header. + /// + /// This must set the corresponding spec id in the handler cfg, based on timestamp or total + /// difficulty fn fill_cfg_env( &self, cfg_env: &mut CfgEnvWithHandlerCfg, - chain_spec: &ChainSpec, - header: &Header, + header: &Self::Header, total_difficulty: U256, ); /// Fill [`BlockEnv`] field according to the chain spec and given header - fn fill_block_env(&self, block_env: &mut BlockEnv, header: &Header, after_merge: bool) { - block_env.number = U256::from(header.number); - block_env.coinbase = header.beneficiary; - block_env.timestamp = U256::from(header.timestamp); + fn fill_block_env(&self, block_env: &mut BlockEnv, header: &Self::Header, after_merge: bool) { + block_env.number = U256::from(header.number()); + block_env.coinbase = header.beneficiary(); + block_env.timestamp = U256::from(header.timestamp()); if after_merge { - block_env.prevrandao = Some(header.mix_hash); + block_env.prevrandao = Some(header.mix_hash()); block_env.difficulty = U256::ZERO; } else { - block_env.difficulty = header.difficulty; + block_env.difficulty = header.difficulty(); block_env.prevrandao = None; } - block_env.basefee = U256::from(header.base_fee_per_gas.unwrap_or_default()); - block_env.gas_limit = U256::from(header.gas_limit); + block_env.basefee = U256::from(header.base_fee_per_gas().unwrap_or_default()); + block_env.gas_limit = U256::from(header.gas_limit()); // EIP-4844 excess blob gas of this block, introduced in Cancun - if let Some(excess_blob_gas) = header.excess_blob_gas { + if let Some(excess_blob_gas) = header.excess_blob_gas() { block_env.set_blob_excess_gas_and_price(excess_blob_gas); } } /// Convenience function to call both [`fill_cfg_env`](ConfigureEvmEnv::fill_cfg_env) and /// [`ConfigureEvmEnv::fill_block_env`]. + /// + /// Note: Implementers should ensure that all fields are required fields are filled. fn fill_cfg_and_block_env( &self, cfg: &mut CfgEnvWithHandlerCfg, block_env: &mut BlockEnv, - chain_spec: &ChainSpec, - header: &Header, + header: &Self::Header, total_difficulty: U256, ) { - self.fill_cfg_env(cfg, chain_spec, header, total_difficulty); + self.fill_cfg_env(cfg, header, total_difficulty); let after_merge = cfg.handler_cfg.spec_id >= SpecId::MERGE; self.fill_block_env(block_env, header, after_merge); } + + /// Returns the configured [`CfgEnvWithHandlerCfg`] and [`BlockEnv`] for `parent + 1` block. + /// + /// This is intended for usage in block building after the merge and requires additional + /// attributes that can't be derived from the parent block: attributes that are determined by + /// the CL, such as the timestamp, suggested fee recipient, and randomness value. + fn next_cfg_and_block_env( + &self, + parent: &Self::Header, + attributes: NextBlockEnvAttributes, + ) -> (CfgEnvWithHandlerCfg, BlockEnv); +} + +/// Represents additional attributes required to configure the next block. +/// This is used to configure the next block's environment +/// [`ConfigureEvmEnv::next_cfg_and_block_env`] and contains fields that can't be derived from the +/// parent header alone (attributes that are determined by the CL.) +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct NextBlockEnvAttributes { + /// The timestamp of the next block. + pub timestamp: u64, + /// The suggested fee recipient for the next block. + pub suggested_fee_recipient: Address, + /// The randomness value for the next block. + pub prev_randao: B256, } diff --git a/crates/evm/src/metrics.rs b/crates/evm/src/metrics.rs new file mode 100644 index 0000000000..ea582f9f45 --- /dev/null +++ b/crates/evm/src/metrics.rs @@ -0,0 +1,48 @@ +//! Executor metrics. +//! +//! Block processing related to syncing should take care to update the metrics by using e.g. +//! [`ExecutorMetrics::metered`]. +use std::time::Instant; + +use metrics::{Counter, Gauge, Histogram}; +use reth_execution_types::BlockExecutionInput; +use reth_metrics::Metrics; +use reth_primitives::{BlockWithSenders, Header}; + +/// Executor metrics. +// TODO(onbjerg): add sload/sstore, acc load/acc change, bytecode metrics +#[derive(Metrics, Clone)] +#[metrics(scope = "sync.execution")] +pub struct ExecutorMetrics { + /// The total amount of gas processed. + pub gas_processed_total: Counter, + /// The instantaneous amount of gas processed per second. + pub gas_per_second: Gauge, + /// The Histogram for amount of time taken to execute blocks. + pub execution_histogram: Histogram, + /// The total amount of time it took to execute the latest block. + pub execution_duration: Gauge, +} + +impl ExecutorMetrics { + /// Execute the given block and update metrics for the execution. + pub fn metered(&self, input: BlockExecutionInput<'_, BlockWithSenders, Header>, f: F) -> R + where + F: FnOnce(BlockExecutionInput<'_, BlockWithSenders, Header>) -> R, + { + let gas_used = input.block.gas_used; + + // Execute the block and record the elapsed time. + let execute_start = Instant::now(); + let output = f(input); + let execution_duration = execute_start.elapsed().as_secs_f64(); + + // Update gas metrics. + self.gas_processed_total.increment(gas_used); + self.gas_per_second.set(gas_used as f64 / execution_duration); + self.execution_histogram.record(execution_duration); + self.execution_duration.set(execution_duration); + + output + } +} diff --git a/crates/evm/src/provider.rs b/crates/evm/src/provider.rs index b847a0665a..fc3d4ff94f 100644 --- a/crates/evm/src/provider.rs +++ b/crates/evm/src/provider.rs @@ -22,7 +22,7 @@ pub trait EvmEnvProvider: Send + Sync { evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv; + EvmConfig: ConfigureEvmEnv
; /// Fills the default [`CfgEnvWithHandlerCfg`] and [BlockEnv] fields with values specific to the /// given [Header]. @@ -32,7 +32,7 @@ pub trait EvmEnvProvider: Send + Sync { evm_config: EvmConfig, ) -> ProviderResult<(CfgEnvWithHandlerCfg, BlockEnv)> where - EvmConfig: ConfigureEvmEnv, + EvmConfig: ConfigureEvmEnv
, { let mut cfg = CfgEnvWithHandlerCfg::new_with_spec_id(CfgEnv::default(), SpecId::LATEST); let mut block_env = BlockEnv::default(); @@ -50,7 +50,7 @@ pub trait EvmEnvProvider: Send + Sync { evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv; + EvmConfig: ConfigureEvmEnv
; /// Fills the [`CfgEnvWithHandlerCfg`] fields with values specific to the given /// [BlockHashOrNumber]. @@ -61,7 +61,7 @@ pub trait EvmEnvProvider: Send + Sync { evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv; + EvmConfig: ConfigureEvmEnv
; /// Fills the [`CfgEnvWithHandlerCfg`] fields with values specific to the given [Header]. fn fill_cfg_env_with_header( @@ -71,5 +71,5 @@ pub trait EvmEnvProvider: Send + Sync { evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv; + EvmConfig: ConfigureEvmEnv
; } diff --git a/crates/evm/src/system_calls.rs b/crates/evm/src/system_calls.rs deleted file mode 100644 index 779e6aad75..0000000000 --- a/crates/evm/src/system_calls.rs +++ /dev/null @@ -1,513 +0,0 @@ -//! System contract call functions. - -#[cfg(feature = "std")] -use std::fmt::Display; -#[cfg(not(feature = "std"))] -use { - alloc::{boxed::Box, format, string::ToString, vec::Vec}, - core::fmt::Display, -}; - -use crate::ConfigureEvm; -use alloy_eips::{ - eip2935::HISTORY_STORAGE_ADDRESS, - eip4788::BEACON_ROOTS_ADDRESS, - eip7002::{WithdrawalRequest, WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS}, - eip7251::{ConsolidationRequest, CONSOLIDATION_REQUEST_PREDEPLOY_ADDRESS}, -}; -use reth_chainspec::{BscHardforks, ChainSpec, EthereumHardforks}; -use reth_execution_errors::{BlockExecutionError, BlockValidationError}; -use reth_primitives::{Buf, Request}; -use revm::{interpreter::Host, Database, DatabaseCommit, Evm}; -use revm_primitives::{ - Address, BlockEnv, Bytes, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ExecutionResult, FixedBytes, - ResultAndState, B256, -}; - -/// Apply the [EIP-2935](https://eips.ethereum.org/EIPS/eip-2935) pre block contract call. -/// -/// This constructs a new [`Evm`] with the given database and environment ([`CfgEnvWithHandlerCfg`] -/// and [`BlockEnv`]) to execute the pre block contract call. -/// -/// This uses [`apply_blockhashes_contract_call`] to ultimately apply the blockhash contract state -/// change. -pub fn pre_block_blockhashes_contract_call( - db: &mut DB, - evm_config: &EvmConfig, - chain_spec: &ChainSpec, - initialized_cfg: &CfgEnvWithHandlerCfg, - initialized_block_env: &BlockEnv, - parent_block_hash: B256, -) -> Result<(), BlockExecutionError> -where - DB: Database + DatabaseCommit, - DB::Error: Display, - EvmConfig: ConfigureEvm, -{ - // Apply the pre-block EIP-2935 contract call - let mut evm_pre_block = Evm::builder() - .with_db(db) - .with_env_with_handler_cfg(EnvWithHandlerCfg::new_with_cfg_env( - initialized_cfg.clone(), - initialized_block_env.clone(), - Default::default(), - )) - .build(); - - apply_blockhashes_contract_call( - evm_config, - chain_spec, - initialized_block_env.timestamp.to(), - initialized_block_env.number.to(), - parent_block_hash, - &mut evm_pre_block, - ) -} - -/// Applies the pre-block call to the [EIP-2935] blockhashes contract, using the given block, -/// [`ChainSpec`], and EVM. -/// -/// If Prague is not activated, or the block is the genesis block, then this is a no-op, and no -/// state changes are made. -/// -/// [EIP-2935]: https://eips.ethereum.org/EIPS/eip-2935 -#[inline] -pub fn apply_blockhashes_contract_call( - evm_config: &EvmConfig, - chain_spec: &ChainSpec, - block_timestamp: u64, - block_number: u64, - parent_block_hash: B256, - evm: &mut Evm<'_, EXT, DB>, -) -> Result<(), BlockExecutionError> -where - DB: Database + DatabaseCommit, - DB::Error: core::fmt::Display, - EvmConfig: ConfigureEvm, -{ - if !chain_spec.is_prague_active_at_timestamp(block_timestamp) { - return Ok(()) - } - - // if the block number is zero (genesis block) then no system transaction may occur as per - // EIP-2935 - if block_number == 0 { - return Ok(()) - } - - // get previous env - let previous_env = Box::new(evm.context.env().clone()); - - // modify env for pre block call - evm_config.fill_tx_env_system_contract_call( - &mut evm.context.evm.env, - alloy_eips::eip4788::SYSTEM_ADDRESS, - HISTORY_STORAGE_ADDRESS, - parent_block_hash.0.into(), - ); - - let mut state = match evm.transact() { - Ok(res) => res.state, - Err(e) => { - evm.context.evm.env = previous_env; - return Err(BlockValidationError::BlockHashContractCall { message: e.to_string() }.into()) - } - }; - - state.remove(&alloy_eips::eip4788::SYSTEM_ADDRESS); - state.remove(&evm.block().coinbase); - - evm.context.evm.db.commit(state); - - // re-set the previous env - evm.context.evm.env = previous_env; - - Ok(()) -} - -/// Apply the [EIP-4788](https://eips.ethereum.org/EIPS/eip-4788) pre block contract call. -/// -/// This constructs a new [`Evm`] with the given DB, and environment -/// ([`CfgEnvWithHandlerCfg`] and [`BlockEnv`]) to execute the pre block contract call. -/// -/// This uses [`apply_beacon_root_contract_call`] to ultimately apply the beacon root contract state -/// change. -pub fn pre_block_beacon_root_contract_call( - db: &mut DB, - evm_config: &EvmConfig, - chain_spec: &ChainSpec, - initialized_cfg: &CfgEnvWithHandlerCfg, - initialized_block_env: &BlockEnv, - parent_beacon_block_root: Option, -) -> Result<(), BlockExecutionError> -where - DB: Database + DatabaseCommit, - DB::Error: Display, - EvmConfig: ConfigureEvm, -{ - // Return immediately if beaconRoot equals the zero hash when using the Parlia engine. - if chain_spec.is_bohr_active_at_timestamp(initialized_block_env.timestamp.to()) && - parent_beacon_block_root == Some(B256::ZERO) - { - return Ok(()) - } - - // apply pre-block EIP-4788 contract call - let mut evm_pre_block = Evm::builder() - .with_db(db) - .with_env_with_handler_cfg(EnvWithHandlerCfg::new_with_cfg_env( - initialized_cfg.clone(), - initialized_block_env.clone(), - Default::default(), - )) - .build(); - - // initialize a block from the env, because the pre block call needs the block itself - apply_beacon_root_contract_call( - evm_config, - chain_spec, - initialized_block_env.timestamp.to(), - initialized_block_env.number.to(), - parent_beacon_block_root, - &mut evm_pre_block, - ) -} - -/// Applies the pre-block call to the [EIP-4788] beacon block root contract, using the given block, -/// [`ChainSpec`], EVM. -/// -/// If Cancun is not activated or the block is the genesis block, then this is a no-op, and no -/// state changes are made. -/// -/// [EIP-4788]: https://eips.ethereum.org/EIPS/eip-4788 -#[inline] -pub fn apply_beacon_root_contract_call( - evm_config: &EvmConfig, - chain_spec: &ChainSpec, - block_timestamp: u64, - block_number: u64, - parent_beacon_block_root: Option, - evm: &mut Evm<'_, EXT, DB>, -) -> Result<(), BlockExecutionError> -where - DB: Database + DatabaseCommit, - DB::Error: core::fmt::Display, - EvmConfig: ConfigureEvm, -{ - if !chain_spec.is_cancun_active_at_timestamp(block_timestamp) { - return Ok(()) - } - - let parent_beacon_block_root = - parent_beacon_block_root.ok_or(BlockValidationError::MissingParentBeaconBlockRoot)?; - - // if the block number is zero (genesis block) then the parent beacon block root must - // be 0x0 and no system transaction may occur as per EIP-4788 - if block_number == 0 { - if !parent_beacon_block_root.is_zero() { - return Err(BlockValidationError::CancunGenesisParentBeaconBlockRootNotZero { - parent_beacon_block_root, - } - .into()) - } - return Ok(()) - } - - // get previous env - let previous_env = Box::new(evm.context.env().clone()); - - // modify env for pre block call - evm_config.fill_tx_env_system_contract_call( - &mut evm.context.evm.env, - alloy_eips::eip4788::SYSTEM_ADDRESS, - BEACON_ROOTS_ADDRESS, - parent_beacon_block_root.0.into(), - ); - - let mut state = match evm.transact() { - Ok(res) => res.state, - Err(e) => { - evm.context.evm.env = previous_env; - return Err(BlockValidationError::BeaconRootContractCall { - parent_beacon_block_root: Box::new(parent_beacon_block_root), - message: e.to_string(), - } - .into()) - } - }; - - state.remove(&alloy_eips::eip4788::SYSTEM_ADDRESS); - state.remove(&evm.block().coinbase); - - evm.context.evm.db.commit(state); - - // re-set the previous env - evm.context.evm.env = previous_env; - - Ok(()) -} - -/// Apply the [EIP-7002](https://eips.ethereum.org/EIPS/eip-7002) post block contract call. -/// -/// This constructs a new [Evm] with the given DB, and environment -/// ([`CfgEnvWithHandlerCfg`] and [`BlockEnv`]) to execute the post block contract call. -/// -/// This uses [`apply_withdrawal_requests_contract_call`] to ultimately calculate the -/// [requests](Request). -pub fn post_block_withdrawal_requests_contract_call( - evm_config: &EvmConfig, - db: &mut DB, - initialized_cfg: &CfgEnvWithHandlerCfg, - initialized_block_env: &BlockEnv, -) -> Result, BlockExecutionError> -where - DB: Database + DatabaseCommit, - DB::Error: Display, - EvmConfig: ConfigureEvm, -{ - // apply post-block EIP-7002 contract call - let mut evm_post_block = Evm::builder() - .with_db(db) - .with_env_with_handler_cfg(EnvWithHandlerCfg::new_with_cfg_env( - initialized_cfg.clone(), - initialized_block_env.clone(), - Default::default(), - )) - .build(); - - // initialize a block from the env, because the post block call needs the block itself - apply_withdrawal_requests_contract_call(evm_config, &mut evm_post_block) -} - -/// Applies the post-block call to the EIP-7002 withdrawal requests contract. -/// -/// If Prague is not active at the given timestamp, then this is a no-op, and an empty vector is -/// returned. Otherwise, the withdrawal requests are returned. -#[inline] -pub fn apply_withdrawal_requests_contract_call( - evm_config: &EvmConfig, - evm: &mut Evm<'_, EXT, DB>, -) -> Result, BlockExecutionError> -where - DB: Database + DatabaseCommit, - DB::Error: core::fmt::Display, - EvmConfig: ConfigureEvm, -{ - // get previous env - let previous_env = Box::new(evm.context.env().clone()); - - // Fill transaction environment with the EIP-7002 withdrawal requests contract message data. - // - // This requirement for the withdrawal requests contract call defined by - // [EIP-7002](https://eips.ethereum.org/EIPS/eip-7002) is: - // - // At the end of processing any execution block where `block.timestamp >= FORK_TIMESTAMP` (i.e. - // after processing all transactions and after performing the block body withdrawal requests - // validations), call the contract as `SYSTEM_ADDRESS`. - evm_config.fill_tx_env_system_contract_call( - &mut evm.context.evm.env, - alloy_eips::eip7002::SYSTEM_ADDRESS, - WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS, - Bytes::new(), - ); - - let ResultAndState { result, mut state } = match evm.transact() { - Ok(res) => res, - Err(e) => { - evm.context.evm.env = previous_env; - return Err(BlockValidationError::WithdrawalRequestsContractCall { - message: format!("execution failed: {e}"), - } - .into()) - } - }; - - // cleanup the state - state.remove(&alloy_eips::eip7002::SYSTEM_ADDRESS); - state.remove(&evm.block().coinbase); - evm.context.evm.db.commit(state); - - // re-set the previous env - evm.context.evm.env = previous_env; - - let mut data = match result { - ExecutionResult::Success { output, .. } => Ok(output.into_data()), - ExecutionResult::Revert { output, .. } => { - Err(BlockValidationError::WithdrawalRequestsContractCall { - message: format!("execution reverted: {output}"), - }) - } - ExecutionResult::Halt { reason, .. } => { - Err(BlockValidationError::WithdrawalRequestsContractCall { - message: format!("execution halted: {reason:?}"), - }) - } - }?; - - // Withdrawals are encoded as a series of withdrawal requests, each with the following - // format: - // - // +------+--------+--------+ - // | addr | pubkey | amount | - // +------+--------+--------+ - // 20 48 8 - - const WITHDRAWAL_REQUEST_SIZE: usize = 20 + 48 + 8; - let mut withdrawal_requests = Vec::with_capacity(data.len() / WITHDRAWAL_REQUEST_SIZE); - while data.has_remaining() { - if data.remaining() < WITHDRAWAL_REQUEST_SIZE { - return Err(BlockValidationError::WithdrawalRequestsContractCall { - message: "invalid withdrawal request length".to_string(), - } - .into()) - } - - let mut source_address = Address::ZERO; - data.copy_to_slice(source_address.as_mut_slice()); - - let mut validator_pubkey = FixedBytes::<48>::ZERO; - data.copy_to_slice(validator_pubkey.as_mut_slice()); - - let amount = data.get_u64(); - - withdrawal_requests - .push(WithdrawalRequest { source_address, validator_pubkey, amount }.into()); - } - - Ok(withdrawal_requests) -} - -/// Apply the [EIP-7251](https://eips.ethereum.org/EIPS/eip-7251) post block contract call. -/// -/// This constructs a new [Evm] with the given DB, and environment -/// ([`CfgEnvWithHandlerCfg`] and [`BlockEnv`]) to execute the post block contract call. -/// -/// This uses [`apply_consolidation_requests_contract_call`] to ultimately calculate the -/// [requests](Request). -pub fn post_block_consolidation_requests_contract_call( - evm_config: &EvmConfig, - db: &mut DB, - initialized_cfg: &CfgEnvWithHandlerCfg, - initialized_block_env: &BlockEnv, -) -> Result, BlockExecutionError> -where - DB: Database + DatabaseCommit, - DB::Error: Display, - EvmConfig: ConfigureEvm, -{ - // apply post-block EIP-7251 contract call - let mut evm_post_block = Evm::builder() - .with_db(db) - .with_env_with_handler_cfg(EnvWithHandlerCfg::new_with_cfg_env( - initialized_cfg.clone(), - initialized_block_env.clone(), - Default::default(), - )) - .build(); - - // initialize a block from the env, because the post block call needs the block itself - apply_consolidation_requests_contract_call(evm_config, &mut evm_post_block) -} - -/// Applies the post-block call to the EIP-7251 consolidation requests contract. -/// -/// If Prague is not active at the given timestamp, then this is a no-op, and an empty vector is -/// returned. Otherwise, the consolidation requests are returned. -#[inline] -pub fn apply_consolidation_requests_contract_call( - evm_config: &EvmConfig, - evm: &mut Evm<'_, EXT, DB>, -) -> Result, BlockExecutionError> -where - DB: Database + DatabaseCommit, - DB::Error: core::fmt::Display, - EvmConfig: ConfigureEvm, -{ - // get previous env - let previous_env = Box::new(evm.context.env().clone()); - - // Fill transaction environment with the EIP-7251 consolidation requests contract message data. - // - // This requirement for the consolidation requests contract call defined by - // [EIP-7251](https://eips.ethereum.org/EIPS/eip-7251) is: - // - // At the end of processing any execution block where block.timestamp >= FORK_TIMESTAMP (i.e. - // after processing all transactions and after performing the block body requests validations) - // clienst software MUST [..] call the contract as `SYSTEM_ADDRESS` and empty input data to - // trigger the system subroutine execute. - evm_config.fill_tx_env_system_contract_call( - &mut evm.context.evm.env, - alloy_eips::eip7002::SYSTEM_ADDRESS, - CONSOLIDATION_REQUEST_PREDEPLOY_ADDRESS, - Bytes::new(), - ); - - let ResultAndState { result, mut state } = match evm.transact() { - Ok(res) => res, - Err(e) => { - evm.context.evm.env = previous_env; - return Err(BlockValidationError::ConsolidationRequestsContractCall { - message: format!("execution failed: {e}"), - } - .into()) - } - }; - - // cleanup the state - state.remove(&alloy_eips::eip7002::SYSTEM_ADDRESS); - state.remove(&evm.block().coinbase); - evm.context.evm.db.commit(state); - - // re-set the previous env - evm.context.evm.env = previous_env; - - let mut data = match result { - ExecutionResult::Success { output, .. } => Ok(output.into_data()), - ExecutionResult::Revert { output, .. } => { - Err(BlockValidationError::ConsolidationRequestsContractCall { - message: format!("execution reverted: {output}"), - }) - } - ExecutionResult::Halt { reason, .. } => { - Err(BlockValidationError::ConsolidationRequestsContractCall { - message: format!("execution halted: {reason:?}"), - }) - } - }?; - - // Consolidations are encoded as a series of consolidation requests, each with the following - // format: - // - // +------+--------+---------------+ - // | addr | pubkey | target pubkey | - // +------+--------+---------------+ - // 20 48 48 - - const CONSOLIDATION_REQUEST_SIZE: usize = 20 + 48 + 48; - let mut consolidation_requests = Vec::with_capacity(data.len() / CONSOLIDATION_REQUEST_SIZE); - while data.has_remaining() { - if data.remaining() < CONSOLIDATION_REQUEST_SIZE { - return Err(BlockValidationError::ConsolidationRequestsContractCall { - message: "invalid consolidation request length".to_string(), - } - .into()) - } - - let mut source_address = Address::ZERO; - data.copy_to_slice(source_address.as_mut_slice()); - - let mut source_pubkey = FixedBytes::<48>::ZERO; - data.copy_to_slice(source_pubkey.as_mut_slice()); - - let mut target_pubkey = FixedBytes::<48>::ZERO; - data.copy_to_slice(target_pubkey.as_mut_slice()); - - consolidation_requests.push(Request::ConsolidationRequest(ConsolidationRequest { - source_address, - source_pubkey, - target_pubkey, - })); - } - - Ok(consolidation_requests) -} diff --git a/crates/evm/src/system_calls/eip2935.rs b/crates/evm/src/system_calls/eip2935.rs new file mode 100644 index 0000000000..d32a589666 --- /dev/null +++ b/crates/evm/src/system_calls/eip2935.rs @@ -0,0 +1,151 @@ +//! [EIP-2935](https://eips.ethereum.org/EIPS/eip-2935) system call implementation. + +use alloc::{boxed::Box, string::ToString}; +use alloy_eips::eip2935::HISTORY_STORAGE_ADDRESS; + +use crate::ConfigureEvm; +use core::fmt::Display; +use reth_chainspec::{ChainSpec, EthereumHardforks}; +use reth_execution_errors::{BlockExecutionError, BlockValidationError}; +use reth_primitives::Header; +use revm::{interpreter::Host, Database, DatabaseCommit, Evm}; +use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, B256}; + +/// Apply the [EIP-2935](https://eips.ethereum.org/EIPS/eip-2935) pre block contract call. +/// +/// This constructs a new [`Evm`] with the given database and environment ([`CfgEnvWithHandlerCfg`] +/// and [`BlockEnv`]) to execute the pre block contract call. +/// +/// This uses [`apply_blockhashes_contract_call`] to ultimately apply the +/// blockhash contract state change. +pub fn pre_block_blockhashes_contract_call( + db: &mut DB, + evm_config: &EvmConfig, + chain_spec: &ChainSpec, + initialized_cfg: &CfgEnvWithHandlerCfg, + initialized_block_env: &BlockEnv, + parent_block_hash: B256, +) -> Result<(), BlockExecutionError> +where + DB: Database + DatabaseCommit, + DB::Error: Display, + EvmConfig: ConfigureEvm
, +{ + // Apply the pre-block EIP-2935 contract call + let mut evm_pre_block = Evm::builder() + .with_db(db) + .with_env_with_handler_cfg(EnvWithHandlerCfg::new_with_cfg_env( + initialized_cfg.clone(), + initialized_block_env.clone(), + Default::default(), + )) + .build(); + + apply_blockhashes_contract_call( + evm_config, + chain_spec, + initialized_block_env.timestamp.to(), + initialized_block_env.number.to(), + parent_block_hash, + &mut evm_pre_block, + ) +} + +/// Applies the pre-block call to the [EIP-2935] blockhashes contract, using the given block, +/// [`ChainSpec`], and EVM. +/// +/// If Prague is not activated, or the block is the genesis block, then this is a no-op, and no +/// state changes are made. +/// +/// Note: this does not commit the state changes to the database, it only transact the call. +/// +/// Returns `None` if Prague is not active or the block is the genesis block, otherwise returns the +/// result of the call. +/// +/// [EIP-2935]: https://eips.ethereum.org/EIPS/eip-2935 +#[inline] +pub fn transact_blockhashes_contract_call( + evm_config: &EvmConfig, + chain_spec: &ChainSpec, + block_timestamp: u64, + block_number: u64, + parent_block_hash: B256, + evm: &mut Evm<'_, EXT, DB>, +) -> Result, BlockExecutionError> +where + DB: Database + DatabaseCommit, + DB::Error: core::fmt::Display, + EvmConfig: ConfigureEvm
, +{ + if !chain_spec.is_prague_active_at_timestamp(block_timestamp) { + return Ok(None) + } + + // if the block number is zero (genesis block) then no system transaction may occur as per + // EIP-2935 + if block_number == 0 { + return Ok(None) + } + + // get previous env + let previous_env = Box::new(evm.context.env().clone()); + + // modify env for pre block call + evm_config.fill_tx_env_system_contract_call( + &mut evm.context.evm.env, + alloy_eips::eip4788::SYSTEM_ADDRESS, + HISTORY_STORAGE_ADDRESS, + parent_block_hash.0.into(), + ); + + let mut res = match evm.transact() { + Ok(res) => res, + Err(e) => { + evm.context.evm.env = previous_env; + return Err(BlockValidationError::BlockHashContractCall { message: e.to_string() }.into()) + } + }; + + res.state.remove(&alloy_eips::eip4788::SYSTEM_ADDRESS); + res.state.remove(&evm.block().coinbase); + + // re-set the previous env + evm.context.evm.env = previous_env; + + Ok(Some(res)) +} + +/// Applies the pre-block call to the [EIP-2935] blockhashes contract, using the given block, +/// [`ChainSpec`], and EVM and commits the relevant state changes. +/// +/// If Prague is not activated, or the block is the genesis block, then this is a no-op, and no +/// state changes are made. +/// +/// [EIP-2935]: https://eips.ethereum.org/EIPS/eip-2935 +#[inline] +pub fn apply_blockhashes_contract_call( + evm_config: &EvmConfig, + chain_spec: &ChainSpec, + block_timestamp: u64, + block_number: u64, + parent_block_hash: B256, + evm: &mut Evm<'_, EXT, DB>, +) -> Result<(), BlockExecutionError> +where + DB: Database + DatabaseCommit, + DB::Error: core::fmt::Display, + EvmConfig: ConfigureEvm
, +{ + if let Some(res) = transact_blockhashes_contract_call( + evm_config, + chain_spec, + block_timestamp, + block_number, + parent_block_hash, + evm, + )? { + evm.context.evm.db.commit(res.state); + } + + Ok(()) +} diff --git a/crates/evm/src/system_calls/eip4788.rs b/crates/evm/src/system_calls/eip4788.rs new file mode 100644 index 0000000000..7e67072cb1 --- /dev/null +++ b/crates/evm/src/system_calls/eip4788.rs @@ -0,0 +1,132 @@ +//! [EIP-4788](https://eips.ethereum.org/EIPS/eip-4788) system call implementation. +use alloc::{boxed::Box, string::ToString}; + +use crate::ConfigureEvm; +use alloy_eips::eip4788::BEACON_ROOTS_ADDRESS; +use reth_chainspec::{BscHardforks, ChainSpec, EthereumHardforks}; +use reth_execution_errors::{BlockExecutionError, BlockValidationError}; +use reth_primitives::Header; +use revm::{interpreter::Host, Database, DatabaseCommit, Evm}; +use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, B256}; + +/// Apply the [EIP-4788](https://eips.ethereum.org/EIPS/eip-4788) pre block contract call. +/// +/// This constructs a new [`Evm`] with the given DB, and environment +/// ([`CfgEnvWithHandlerCfg`] and [`BlockEnv`]) to execute the pre block contract call. +/// +/// This uses [`apply_beacon_root_contract_call`] to ultimately apply the beacon root contract state +/// change. +pub fn pre_block_beacon_root_contract_call( + db: &mut DB, + evm_config: &EvmConfig, + chain_spec: &ChainSpec, + initialized_cfg: &CfgEnvWithHandlerCfg, + initialized_block_env: &BlockEnv, + parent_beacon_block_root: Option, +) -> Result<(), BlockExecutionError> +where + DB: Database + DatabaseCommit, + DB::Error: core::fmt::Display, + EvmConfig: ConfigureEvm
, +{ + // Return immediately if beaconRoot equals the zero hash when using the Parlia engine. + if chain_spec.is_bohr_active_at_timestamp(initialized_block_env.timestamp.to()) && + parent_beacon_block_root == Some(B256::ZERO) + { + return Ok(()) + } + + // apply pre-block EIP-4788 contract call + let mut evm_pre_block = Evm::builder() + .with_db(db) + .with_env_with_handler_cfg(EnvWithHandlerCfg::new_with_cfg_env( + initialized_cfg.clone(), + initialized_block_env.clone(), + Default::default(), + )) + .build(); + + // initialize a block from the env, because the pre block call needs the block itself + apply_beacon_root_contract_call( + evm_config, + chain_spec, + initialized_block_env.timestamp.to(), + initialized_block_env.number.to(), + parent_beacon_block_root, + &mut evm_pre_block, + ) +} + +/// Applies the pre-block call to the [EIP-4788] beacon block root contract, using the given block, +/// [`ChainSpec`], EVM. +/// +/// If Cancun is not activated or the block is the genesis block, then this is a no-op, and no +/// state changes are made. +/// +/// [EIP-4788]: https://eips.ethereum.org/EIPS/eip-4788 +#[inline] +pub fn apply_beacon_root_contract_call( + evm_config: &EvmConfig, + chain_spec: &ChainSpec, + block_timestamp: u64, + block_number: u64, + parent_beacon_block_root: Option, + evm: &mut Evm<'_, EXT, DB>, +) -> Result<(), BlockExecutionError> +where + DB: Database + DatabaseCommit, + DB::Error: core::fmt::Display, + EvmConfig: ConfigureEvm
, +{ + if !chain_spec.is_cancun_active_at_timestamp(block_timestamp) { + return Ok(()) + } + + let parent_beacon_block_root = + parent_beacon_block_root.ok_or(BlockValidationError::MissingParentBeaconBlockRoot)?; + + // if the block number is zero (genesis block) then the parent beacon block root must + // be 0x0 and no system transaction may occur as per EIP-4788 + if block_number == 0 { + if !parent_beacon_block_root.is_zero() { + return Err(BlockValidationError::CancunGenesisParentBeaconBlockRootNotZero { + parent_beacon_block_root, + } + .into()) + } + return Ok(()) + } + + // get previous env + let previous_env = Box::new(evm.context.env().clone()); + + // modify env for pre block call + evm_config.fill_tx_env_system_contract_call( + &mut evm.context.evm.env, + alloy_eips::eip4788::SYSTEM_ADDRESS, + BEACON_ROOTS_ADDRESS, + parent_beacon_block_root.0.into(), + ); + + let mut state = match evm.transact() { + Ok(res) => res.state, + Err(e) => { + evm.context.evm.env = previous_env; + return Err(BlockValidationError::BeaconRootContractCall { + parent_beacon_block_root: Box::new(parent_beacon_block_root), + message: e.to_string(), + } + .into()) + } + }; + + state.remove(&alloy_eips::eip4788::SYSTEM_ADDRESS); + state.remove(&evm.block().coinbase); + + evm.context.evm.db.commit(state); + + // re-set the previous env + evm.context.evm.env = previous_env; + + Ok(()) +} diff --git a/crates/evm/src/system_calls/eip7002.rs b/crates/evm/src/system_calls/eip7002.rs new file mode 100644 index 0000000000..5d73da7b26 --- /dev/null +++ b/crates/evm/src/system_calls/eip7002.rs @@ -0,0 +1,143 @@ +//! [EIP-7002](https://eips.ethereum.org/EIPS/eip-7002) system call implementation. +use alloc::{boxed::Box, format, string::ToString, vec::Vec}; +use core::fmt::Display; + +use crate::ConfigureEvm; +use alloy_eips::eip7002::{WithdrawalRequest, WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS}; +use reth_execution_errors::{BlockExecutionError, BlockValidationError}; +use reth_primitives::{Buf, Header, Request}; +use revm::{interpreter::Host, Database, DatabaseCommit, Evm}; +use revm_primitives::{ + Address, BlockEnv, Bytes, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ExecutionResult, FixedBytes, + ResultAndState, +}; + +/// Apply the [EIP-7002](https://eips.ethereum.org/EIPS/eip-7002) post block contract call. +/// +/// This constructs a new [Evm] with the given DB, and environment +/// ([`CfgEnvWithHandlerCfg`] and [`BlockEnv`]) to execute the post block contract call. +/// +/// This uses [`apply_withdrawal_requests_contract_call`] to ultimately calculate the +/// [requests](Request). +pub fn post_block_withdrawal_requests_contract_call( + evm_config: &EvmConfig, + db: &mut DB, + initialized_cfg: &CfgEnvWithHandlerCfg, + initialized_block_env: &BlockEnv, +) -> Result, BlockExecutionError> +where + DB: Database + DatabaseCommit, + DB::Error: Display, + EvmConfig: ConfigureEvm
, +{ + // apply post-block EIP-7002 contract call + let mut evm_post_block = Evm::builder() + .with_db(db) + .with_env_with_handler_cfg(EnvWithHandlerCfg::new_with_cfg_env( + initialized_cfg.clone(), + initialized_block_env.clone(), + Default::default(), + )) + .build(); + + // initialize a block from the env, because the post block call needs the block itself + apply_withdrawal_requests_contract_call(evm_config, &mut evm_post_block) +} + +/// Applies the post-block call to the EIP-7002 withdrawal requests contract. +/// +/// If Prague is not active at the given timestamp, then this is a no-op, and an empty vector is +/// returned. Otherwise, the withdrawal requests are returned. +#[inline] +pub fn apply_withdrawal_requests_contract_call( + evm_config: &EvmConfig, + evm: &mut Evm<'_, EXT, DB>, +) -> Result, BlockExecutionError> +where + DB: Database + DatabaseCommit, + DB::Error: core::fmt::Display, + EvmConfig: ConfigureEvm
, +{ + // get previous env + let previous_env = Box::new(evm.context.env().clone()); + + // Fill transaction environment with the EIP-7002 withdrawal requests contract message data. + // + // This requirement for the withdrawal requests contract call defined by + // [EIP-7002](https://eips.ethereum.org/EIPS/eip-7002) is: + // + // At the end of processing any execution block where `block.timestamp >= FORK_TIMESTAMP` (i.e. + // after processing all transactions and after performing the block body withdrawal requests + // validations), call the contract as `SYSTEM_ADDRESS`. + evm_config.fill_tx_env_system_contract_call( + &mut evm.context.evm.env, + alloy_eips::eip7002::SYSTEM_ADDRESS, + WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS, + Bytes::new(), + ); + + let ResultAndState { result, mut state } = match evm.transact() { + Ok(res) => res, + Err(e) => { + evm.context.evm.env = previous_env; + return Err(BlockValidationError::WithdrawalRequestsContractCall { + message: format!("execution failed: {e}"), + } + .into()) + } + }; + + // cleanup the state + state.remove(&alloy_eips::eip7002::SYSTEM_ADDRESS); + state.remove(&evm.block().coinbase); + evm.context.evm.db.commit(state); + + // re-set the previous env + evm.context.evm.env = previous_env; + + let mut data = match result { + ExecutionResult::Success { output, .. } => Ok(output.into_data()), + ExecutionResult::Revert { output, .. } => { + Err(BlockValidationError::WithdrawalRequestsContractCall { + message: format!("execution reverted: {output}"), + }) + } + ExecutionResult::Halt { reason, .. } => { + Err(BlockValidationError::WithdrawalRequestsContractCall { + message: format!("execution halted: {reason:?}"), + }) + } + }?; + + // Withdrawals are encoded as a series of withdrawal requests, each with the following + // format: + // + // +------+--------+--------+ + // | addr | pubkey | amount | + // +------+--------+--------+ + // 20 48 8 + + const WITHDRAWAL_REQUEST_SIZE: usize = 20 + 48 + 8; + let mut withdrawal_requests = Vec::with_capacity(data.len() / WITHDRAWAL_REQUEST_SIZE); + while data.has_remaining() { + if data.remaining() < WITHDRAWAL_REQUEST_SIZE { + return Err(BlockValidationError::WithdrawalRequestsContractCall { + message: "invalid withdrawal request length".to_string(), + } + .into()) + } + + let mut source_address = Address::ZERO; + data.copy_to_slice(source_address.as_mut_slice()); + + let mut validator_pubkey = FixedBytes::<48>::ZERO; + data.copy_to_slice(validator_pubkey.as_mut_slice()); + + let amount = data.get_u64(); + + withdrawal_requests + .push(WithdrawalRequest { source_address, validator_pubkey, amount }.into()); + } + + Ok(withdrawal_requests) +} diff --git a/crates/evm/src/system_calls/eip7251.rs b/crates/evm/src/system_calls/eip7251.rs new file mode 100644 index 0000000000..02684d83b7 --- /dev/null +++ b/crates/evm/src/system_calls/eip7251.rs @@ -0,0 +1,148 @@ +//! [EIP-7251](https://eips.ethereum.org/EIPS/eip-7251) system call implementation. +use alloc::{boxed::Box, format, string::ToString, vec::Vec}; +use core::fmt::Display; + +use crate::ConfigureEvm; +use alloy_eips::eip7251::{ConsolidationRequest, CONSOLIDATION_REQUEST_PREDEPLOY_ADDRESS}; +use reth_execution_errors::{BlockExecutionError, BlockValidationError}; +use reth_primitives::{Buf, Header, Request}; +use revm::{interpreter::Host, Database, DatabaseCommit, Evm}; +use revm_primitives::{ + Address, BlockEnv, Bytes, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ExecutionResult, FixedBytes, + ResultAndState, +}; + +/// Apply the [EIP-7251](https://eips.ethereum.org/EIPS/eip-7251) post block contract call. +/// +/// This constructs a new [Evm] with the given DB, and environment +/// ([`CfgEnvWithHandlerCfg`] and [`BlockEnv`]) to execute the post block contract call. +/// +/// This uses [`apply_consolidation_requests_contract_call`] to ultimately calculate the +/// [requests](Request). +pub fn post_block_consolidation_requests_contract_call( + evm_config: &EvmConfig, + db: &mut DB, + initialized_cfg: &CfgEnvWithHandlerCfg, + initialized_block_env: &BlockEnv, +) -> Result, BlockExecutionError> +where + DB: Database + DatabaseCommit, + DB::Error: Display, + EvmConfig: ConfigureEvm
, +{ + // apply post-block EIP-7251 contract call + let mut evm_post_block = Evm::builder() + .with_db(db) + .with_env_with_handler_cfg(EnvWithHandlerCfg::new_with_cfg_env( + initialized_cfg.clone(), + initialized_block_env.clone(), + Default::default(), + )) + .build(); + + // initialize a block from the env, because the post block call needs the block itself + apply_consolidation_requests_contract_call(evm_config, &mut evm_post_block) +} + +/// Applies the post-block call to the EIP-7251 consolidation requests contract. +/// +/// If Prague is not active at the given timestamp, then this is a no-op, and an empty vector is +/// returned. Otherwise, the consolidation requests are returned. +#[inline] +pub fn apply_consolidation_requests_contract_call( + evm_config: &EvmConfig, + evm: &mut Evm<'_, EXT, DB>, +) -> Result, BlockExecutionError> +where + DB: Database + DatabaseCommit, + DB::Error: core::fmt::Display, + EvmConfig: ConfigureEvm
, +{ + // get previous env + let previous_env = Box::new(evm.context.env().clone()); + + // Fill transaction environment with the EIP-7251 consolidation requests contract message data. + // + // This requirement for the consolidation requests contract call defined by + // [EIP-7251](https://eips.ethereum.org/EIPS/eip-7251) is: + // + // At the end of processing any execution block where block.timestamp >= FORK_TIMESTAMP (i.e. + // after processing all transactions and after performing the block body requests validations) + // clienst software MUST [..] call the contract as `SYSTEM_ADDRESS` and empty input data to + // trigger the system subroutine execute. + evm_config.fill_tx_env_system_contract_call( + &mut evm.context.evm.env, + alloy_eips::eip7002::SYSTEM_ADDRESS, + CONSOLIDATION_REQUEST_PREDEPLOY_ADDRESS, + Bytes::new(), + ); + + let ResultAndState { result, mut state } = match evm.transact() { + Ok(res) => res, + Err(e) => { + evm.context.evm.env = previous_env; + return Err(BlockValidationError::ConsolidationRequestsContractCall { + message: format!("execution failed: {e}"), + } + .into()) + } + }; + + // cleanup the state + state.remove(&alloy_eips::eip7002::SYSTEM_ADDRESS); + state.remove(&evm.block().coinbase); + evm.context.evm.db.commit(state); + + // re-set the previous env + evm.context.evm.env = previous_env; + + let mut data = match result { + ExecutionResult::Success { output, .. } => Ok(output.into_data()), + ExecutionResult::Revert { output, .. } => { + Err(BlockValidationError::ConsolidationRequestsContractCall { + message: format!("execution reverted: {output}"), + }) + } + ExecutionResult::Halt { reason, .. } => { + Err(BlockValidationError::ConsolidationRequestsContractCall { + message: format!("execution halted: {reason:?}"), + }) + } + }?; + + // Consolidations are encoded as a series of consolidation requests, each with the following + // format: + // + // +------+--------+---------------+ + // | addr | pubkey | target pubkey | + // +------+--------+---------------+ + // 20 48 48 + + const CONSOLIDATION_REQUEST_SIZE: usize = 20 + 48 + 48; + let mut consolidation_requests = Vec::with_capacity(data.len() / CONSOLIDATION_REQUEST_SIZE); + while data.has_remaining() { + if data.remaining() < CONSOLIDATION_REQUEST_SIZE { + return Err(BlockValidationError::ConsolidationRequestsContractCall { + message: "invalid consolidation request length".to_string(), + } + .into()) + } + + let mut source_address = Address::ZERO; + data.copy_to_slice(source_address.as_mut_slice()); + + let mut source_pubkey = FixedBytes::<48>::ZERO; + data.copy_to_slice(source_pubkey.as_mut_slice()); + + let mut target_pubkey = FixedBytes::<48>::ZERO; + data.copy_to_slice(target_pubkey.as_mut_slice()); + + consolidation_requests.push(Request::ConsolidationRequest(ConsolidationRequest { + source_address, + source_pubkey, + target_pubkey, + })); + } + + Ok(consolidation_requests) +} diff --git a/crates/evm/src/system_calls/mod.rs b/crates/evm/src/system_calls/mod.rs new file mode 100644 index 0000000000..50d5c4c857 --- /dev/null +++ b/crates/evm/src/system_calls/mod.rs @@ -0,0 +1,13 @@ +//! System contract call functions. + +mod eip2935; +pub use eip2935::*; + +mod eip4788; +pub use eip4788::*; + +mod eip7002; +pub use eip7002::*; + +mod eip7251; +pub use eip7251::*; diff --git a/crates/exex/exex/Cargo.toml b/crates/exex/exex/Cargo.toml index 1ad906e6f0..4e082c4573 100644 --- a/crates/exex/exex/Cargo.toml +++ b/crates/exex/exex/Cargo.toml @@ -13,6 +13,7 @@ workspace = true [dependencies] ## reth +reth-chainspec.workspace = true reth-config.workspace = true reth-evm.workspace = true reth-exex-types.workspace = true @@ -20,8 +21,8 @@ reth-metrics.workspace = true reth-node-api.workspace = true reth-node-core.workspace = true reth-payload-builder.workspace = true -reth-primitives-traits.workspace = true reth-primitives = { workspace = true, features = ["secp256k1"] } +reth-primitives-traits.workspace = true reth-provider.workspace = true reth-prune-types.workspace = true reth-revm.workspace = true @@ -40,14 +41,13 @@ metrics.workspace = true [dev-dependencies] reth-blockchain-tree.workspace = true -reth-chainspec.workspace = true reth-db-api.workspace = true reth-db-common.workspace = true reth-evm-ethereum.workspace = true reth-node-api.workspace = true +reth-primitives-traits = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } reth-testing-utils.workspace = true -reth-primitives-traits = { workspace = true, features = ["test-utils"] } secp256k1.workspace = true diff --git a/crates/exex/exex/src/backfill/job.rs b/crates/exex/exex/src/backfill/job.rs index f5b2ccb1a5..206e26e2df 100644 --- a/crates/exex/exex/src/backfill/job.rs +++ b/crates/exex/exex/src/backfill/job.rs @@ -256,7 +256,7 @@ mod tests { let executor = EthExecutorProvider::ethereum(chain_spec.clone()); let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); - init_genesis(provider_factory.clone())?; + init_genesis(&provider_factory)?; let blockchain_db = BlockchainProvider::new( provider_factory.clone(), Arc::new(NoopBlockchainTree::default()), @@ -295,7 +295,7 @@ mod tests { let executor = EthExecutorProvider::ethereum(chain_spec.clone()); let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); - init_genesis(provider_factory.clone())?; + init_genesis(&provider_factory)?; let blockchain_db = BlockchainProvider::new( provider_factory.clone(), Arc::new(NoopBlockchainTree::default()), diff --git a/crates/exex/exex/src/backfill/stream.rs b/crates/exex/exex/src/backfill/stream.rs index 5b72dad38b..95744d2eb1 100644 --- a/crates/exex/exex/src/backfill/stream.rs +++ b/crates/exex/exex/src/backfill/stream.rs @@ -114,9 +114,6 @@ where let start = range.next(); let range_bounds = start.zip(range.last().or(start)); - // Advance the range by `batch_size` blocks - this.range.nth(this.batch_size); - // If we have range bounds, then we can spawn a new task for that range if let Some((first, last)) = range_bounds { let range = first..=last; @@ -202,7 +199,7 @@ mod tests { let executor = EthExecutorProvider::ethereum(chain_spec.clone()); let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); - init_genesis(provider_factory.clone())?; + init_genesis(&provider_factory)?; let blockchain_db = BlockchainProvider::new( provider_factory.clone(), Arc::new(NoopBlockchainTree::default()), @@ -243,7 +240,7 @@ mod tests { let executor = EthExecutorProvider::ethereum(chain_spec.clone()); let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); - init_genesis(provider_factory.clone())?; + init_genesis(&provider_factory)?; let blockchain_db = BlockchainProvider::new( provider_factory.clone(), Arc::new(NoopBlockchainTree::default()), diff --git a/crates/exex/exex/src/backfill/test_utils.rs b/crates/exex/exex/src/backfill/test_utils.rs index 9ee68831ca..39c23ab1af 100644 --- a/crates/exex/exex/src/backfill/test_utils.rs +++ b/crates/exex/exex/src/backfill/test_utils.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use eyre::OptionExt; -use reth_chainspec::{ChainSpec, ChainSpecBuilder, EthereumHardfork, MAINNET}; +use reth_chainspec::{ChainSpec, ChainSpecBuilder, EthereumHardfork, MAINNET, MIN_TRANSACTION_GAS}; use reth_evm::execute::{ BatchExecutor, BlockExecutionInput, BlockExecutionOutput, BlockExecutorProvider, Executor, }; @@ -10,7 +10,10 @@ use reth_primitives::{ b256, constants::ETH_TO_WEI, Address, Block, BlockWithSenders, Genesis, GenesisAccount, Header, Receipt, Requests, SealedBlockWithSenders, Transaction, TxEip2930, TxKind, U256, }; -use reth_provider::{BlockWriter as _, ExecutionOutcome, LatestStateProviderRef, ProviderFactory}; +use reth_provider::{ + providers::ProviderNodeTypes, BlockWriter as _, ExecutionOutcome, LatestStateProviderRef, + ProviderFactory, +}; use reth_revm::database::StateProviderDatabase; use reth_testing_utils::generators::sign_tx_with_key_pair; use secp256k1::Keypair; @@ -47,13 +50,13 @@ pub(crate) fn chain_spec(address: Address) -> Arc { ) } -pub(crate) fn execute_block_and_commit_to_database( - provider_factory: &ProviderFactory, +pub(crate) fn execute_block_and_commit_to_database( + provider_factory: &ProviderFactory, chain_spec: Arc, block: &BlockWithSenders, ) -> eyre::Result> where - DB: reth_db_api::database::Database, + N: ProviderNodeTypes, { let provider = provider_factory.provider()?; @@ -103,8 +106,8 @@ fn blocks( ), difficulty: chain_spec.fork(EthereumHardfork::Paris).ttd().expect("Paris TTD"), number: 1, - gas_limit: 21000, - gas_used: 21000, + gas_limit: MIN_TRANSACTION_GAS, + gas_used: MIN_TRANSACTION_GAS, ..Default::default() }, body: vec![sign_tx_with_key_pair( @@ -112,7 +115,7 @@ fn blocks( Transaction::Eip2930(TxEip2930 { chain_id: chain_spec.chain.id(), nonce: 0, - gas_limit: 21000, + gas_limit: MIN_TRANSACTION_GAS as u128, gas_price: 1_500_000_000, to: TxKind::Call(Address::ZERO), value: U256::from(0.1 * ETH_TO_WEI as f64), @@ -134,8 +137,8 @@ fn blocks( ), difficulty: chain_spec.fork(EthereumHardfork::Paris).ttd().expect("Paris TTD"), number: 2, - gas_limit: 21000, - gas_used: 21000, + gas_limit: MIN_TRANSACTION_GAS, + gas_used: MIN_TRANSACTION_GAS, ..Default::default() }, body: vec![sign_tx_with_key_pair( @@ -143,7 +146,7 @@ fn blocks( Transaction::Eip2930(TxEip2930 { chain_id: chain_spec.chain.id(), nonce: 1, - gas_limit: 21000, + gas_limit: MIN_TRANSACTION_GAS as u128, gas_price: 1_500_000_000, to: TxKind::Call(Address::ZERO), value: U256::from(0.1 * ETH_TO_WEI as f64), @@ -159,13 +162,13 @@ fn blocks( Ok((block1, block2)) } -pub(crate) fn blocks_and_execution_outputs( - provider_factory: ProviderFactory, +pub(crate) fn blocks_and_execution_outputs( + provider_factory: ProviderFactory, chain_spec: Arc, key_pair: Keypair, ) -> eyre::Result)>> where - DB: reth_db_api::database::Database, + N: ProviderNodeTypes, { let (block1, block2) = blocks(chain_spec.clone(), key_pair)?; @@ -180,13 +183,13 @@ where Ok(vec![(block1, block_output1), (block2, block_output2)]) } -pub(crate) fn blocks_and_execution_outcome( - provider_factory: ProviderFactory, +pub(crate) fn blocks_and_execution_outcome( + provider_factory: ProviderFactory, chain_spec: Arc, key_pair: Keypair, ) -> eyre::Result<(Vec, ExecutionOutcome)> where - DB: reth_db_api::database::Database, + N: ProviderNodeTypes, { let (block1, block2) = blocks(chain_spec.clone(), key_pair)?; diff --git a/crates/exex/exex/src/context.rs b/crates/exex/exex/src/context.rs index c159b90bd5..ec3c880148 100644 --- a/crates/exex/exex/src/context.rs +++ b/crates/exex/exex/src/context.rs @@ -1,12 +1,12 @@ use std::fmt::Debug; -use reth_node_api::FullNodeComponents; +use reth_node_api::{FullNodeComponents, NodeTypesWithEngine}; use reth_node_core::node_config::NodeConfig; use reth_primitives::Head; use reth_tasks::TaskExecutor; -use tokio::sync::mpsc::{Receiver, UnboundedSender}; +use tokio::sync::mpsc::UnboundedSender; -use crate::{ExExEvent, ExExNotification}; +use crate::{ExExEvent, ExExNotifications}; /// Captures the context that an `ExEx` has access to. pub struct ExExContext { @@ -24,19 +24,24 @@ pub struct ExExContext { /// Additionally, the exex can pre-emptively emit a `FinishedHeight` event to specify what /// blocks to receive notifications for. pub events: UnboundedSender, - /// Channel to receive [`ExExNotification`]s. + /// Channel to receive [`ExExNotification`](crate::ExExNotification)s. /// /// # Important /// - /// Once a an [`ExExNotification`] is sent over the channel, it is considered delivered by the - /// node. - pub notifications: Receiver, + /// Once an [`ExExNotification`](crate::ExExNotification) is sent over the channel, it is + /// considered delivered by the node. + pub notifications: ExExNotifications, /// node components pub components: Node, } -impl Debug for ExExContext { +impl Debug for ExExContext +where + Node: FullNodeComponents, + Node::Provider: Debug, + Node::Executor: Debug, +{ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("ExExContext") .field("head", &self.head) @@ -76,7 +81,10 @@ impl ExExContext { } /// Returns the handle to the payload builder service. - pub fn payload_builder(&self) -> &reth_payload_builder::PayloadBuilderHandle { + pub fn payload_builder( + &self, + ) -> &reth_payload_builder::PayloadBuilderHandle<::Engine> + { self.components.payload_builder() } diff --git a/crates/exex/exex/src/manager.rs b/crates/exex/exex/src/manager.rs index 0f222e0eca..5e0e5b215c 100644 --- a/crates/exex/exex/src/manager.rs +++ b/crates/exex/exex/src/manager.rs @@ -1,10 +1,19 @@ -use crate::{ExExEvent, ExExNotification, FinishedExExHeight}; +use crate::{ + BackfillJobFactory, ExExEvent, ExExNotification, FinishedExExHeight, StreamBackfillJob, +}; +use eyre::OptionExt; +use futures::{Stream, StreamExt}; use metrics::Gauge; +use reth_chainspec::Head; +use reth_evm::execute::BlockExecutorProvider; +use reth_exex_types::ExExHead; use reth_metrics::{metrics::Counter, Metrics}; -use reth_primitives::BlockNumber; +use reth_primitives::{BlockNumber, U256}; +use reth_provider::{BlockReader, Chain, HeaderProvider, StateProviderFactory}; use reth_tracing::tracing::debug; use std::{ collections::VecDeque, + fmt::Debug, future::{poll_fn, Future}, pin::Pin, sync::{ @@ -40,14 +49,12 @@ pub struct ExExHandle { id: String, /// Metrics for an `ExEx`. metrics: ExExMetrics, - /// Channel to send [`ExExNotification`]s to the `ExEx`. sender: PollSender, /// Channel to receive [`ExExEvent`]s from the `ExEx`. receiver: UnboundedReceiver, /// The ID of the next notification to send to this `ExEx`. next_notification_id: usize, - /// The finished block number of the `ExEx`. /// /// If this is `None`, the `ExEx` has not emitted a `FinishedHeight` event. @@ -59,9 +66,16 @@ impl ExExHandle { /// /// Returns the handle, as well as a [`UnboundedSender`] for [`ExExEvent`]s and a /// [`Receiver`] for [`ExExNotification`]s that should be given to the `ExEx`. - pub fn new(id: String) -> (Self, UnboundedSender, Receiver) { + pub fn new( + id: String, + node_head: Head, + provider: P, + executor: E, + ) -> (Self, UnboundedSender, ExExNotifications) { let (notification_tx, notification_rx) = mpsc::channel(1); let (event_tx, event_rx) = mpsc::unbounded_channel(); + let notifications = + ExExNotifications { node_head, provider, executor, notifications: notification_rx }; ( Self { @@ -73,7 +87,7 @@ impl ExExHandle { finished_height: None, }, event_tx, - notification_rx, + notifications, ) } @@ -139,6 +153,328 @@ impl ExExHandle { } } +/// A stream of [`ExExNotification`]s. The stream will emit notifications for all blocks. +pub struct ExExNotifications { + node_head: Head, + provider: P, + executor: E, + notifications: Receiver, +} + +impl Debug for ExExNotifications { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("ExExNotifications") + .field("provider", &self.provider) + .field("executor", &self.executor) + .field("notifications", &self.notifications) + .finish() + } +} + +impl ExExNotifications { + /// Creates a new instance of [`ExExNotifications`]. + pub const fn new( + node_head: Head, + provider: P, + executor: E, + notifications: Receiver, + ) -> Self { + Self { node_head, provider, executor, notifications } + } + + /// Receives the next value for this receiver. + /// + /// This method returns `None` if the channel has been closed and there are + /// no remaining messages in the channel's buffer. This indicates that no + /// further values can ever be received from this `Receiver`. The channel is + /// closed when all senders have been dropped, or when [`Receiver::close`] is called. + /// + /// # Cancel safety + /// + /// This method is cancel safe. If `recv` is used as the event in a + /// [`tokio::select!`] statement and some other branch + /// completes first, it is guaranteed that no messages were received on this + /// channel. + /// + /// For full documentation, see [`Receiver::recv`]. + #[deprecated(note = "use `ExExNotifications::next` and its `Stream` implementation instead")] + pub async fn recv(&mut self) -> Option { + self.notifications.recv().await + } + + /// Polls to receive the next message on this channel. + /// + /// This method returns: + /// + /// * `Poll::Pending` if no messages are available but the channel is not closed, or if a + /// spurious failure happens. + /// * `Poll::Ready(Some(message))` if a message is available. + /// * `Poll::Ready(None)` if the channel has been closed and all messages sent before it was + /// closed have been received. + /// + /// When the method returns `Poll::Pending`, the `Waker` in the provided + /// `Context` is scheduled to receive a wakeup when a message is sent on any + /// receiver, or when the channel is closed. Note that on multiple calls to + /// `poll_recv` or `poll_recv_many`, only the `Waker` from the `Context` + /// passed to the most recent call is scheduled to receive a wakeup. + /// + /// If this method returns `Poll::Pending` due to a spurious failure, then + /// the `Waker` will be notified when the situation causing the spurious + /// failure has been resolved. Note that receiving such a wakeup does not + /// guarantee that the next call will succeed — it could fail with another + /// spurious failure. + /// + /// For full documentation, see [`Receiver::poll_recv`]. + #[deprecated( + note = "use `ExExNotifications::poll_next` and its `Stream` implementation instead" + )] + pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll> { + self.notifications.poll_recv(cx) + } +} + +impl ExExNotifications +where + P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, + E: BlockExecutorProvider + Clone + Unpin + 'static, +{ + /// Subscribe to notifications with the given head. + /// + /// Notifications will be sent starting from the head, not inclusive. For example, if + /// `head.number == 10`, then the first notification will be with `block.number == 11`. + pub fn with_head(self, head: ExExHead) -> ExExNotificationsWithHead { + ExExNotificationsWithHead::new( + self.node_head, + self.provider, + self.executor, + self.notifications, + head, + ) + } +} + +impl Stream for ExExNotifications { + type Item = ExExNotification; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.get_mut().notifications.poll_recv(cx) + } +} + +/// A stream of [`ExExNotification`]s. The stream will only emit notifications for blocks that are +/// committed or reverted after the given head. +#[derive(Debug)] +pub struct ExExNotificationsWithHead { + node_head: Head, + provider: P, + executor: E, + notifications: Receiver, + exex_head: ExExHead, + pending_sync: bool, + /// The backfill job to run before consuming any notifications. + backfill_job: Option>, + /// Whether we're currently waiting for the node head to catch up to the same height as the + /// ExEx head. + node_head_catchup_in_progress: bool, +} + +impl ExExNotificationsWithHead +where + P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, + E: BlockExecutorProvider + Clone + Unpin + 'static, +{ + /// Creates a new [`ExExNotificationsWithHead`]. + pub const fn new( + node_head: Head, + provider: P, + executor: E, + notifications: Receiver, + exex_head: ExExHead, + ) -> Self { + Self { + node_head, + provider, + executor, + notifications, + exex_head, + pending_sync: true, + backfill_job: None, + node_head_catchup_in_progress: false, + } + } + + /// Compares the node head against the ExEx head, and synchronizes them in case of a mismatch. + /// + /// Possible situations are: + /// - ExEx is behind the node head (`node_head.number < exex_head.number`). + /// - ExEx is on the canonical chain (`exex_head.hash` is found in the node database). + /// Backfill from the node database. + /// - ExEx is not on the canonical chain (`exex_head.hash` is not found in the node database). + /// Unwind the ExEx to the first block matching between the ExEx and the node, and then + /// bacfkill from the node database. + /// - ExEx is at the same block number (`node_head.number == exex_head.number`). + /// - ExEx is on the canonical chain (`exex_head.hash` is found in the node database). Nothing + /// to do. + /// - ExEx is not on the canonical chain (`exex_head.hash` is not found in the node database). + /// Unwind the ExEx to the first block matching between the ExEx and the node, and then + /// backfill from the node database. + /// - ExEx is ahead of the node head (`node_head.number > exex_head.number`). Wait until the + /// node head catches up to the ExEx head, and then repeat the synchronization process. + fn synchronize(&mut self) -> eyre::Result<()> { + debug!(target: "exex::manager", "Synchronizing ExEx head"); + + let backfill_job_factory = + BackfillJobFactory::new(self.executor.clone(), self.provider.clone()); + match self.exex_head.block.number.cmp(&self.node_head.number) { + std::cmp::Ordering::Less => { + // ExEx is behind the node head + + if let Some(exex_header) = self.provider.header(&self.exex_head.block.hash)? { + // ExEx is on the canonical chain + debug!(target: "exex::manager", "ExEx is behind the node head and on the canonical chain"); + + if exex_header.number != self.exex_head.block.number { + eyre::bail!("ExEx head number does not match the hash") + } + + // ExEx is on the canonical chain, start backfill + let backfill = backfill_job_factory + .backfill(self.exex_head.block.number + 1..=self.node_head.number) + .into_stream(); + self.backfill_job = Some(backfill); + } else { + debug!(target: "exex::manager", "ExEx is behind the node head and not on the canonical chain"); + // ExEx is not on the canonical chain, first unwind it and then backfill + + // TODO(alexey): unwind and backfill + self.backfill_job = None; + } + } + #[allow(clippy::branches_sharing_code)] + std::cmp::Ordering::Equal => { + // ExEx is at the same block height as the node head + + if let Some(exex_header) = self.provider.header(&self.exex_head.block.hash)? { + // ExEx is on the canonical chain + debug!(target: "exex::manager", "ExEx is at the same block height as the node head and on the canonical chain"); + + if exex_header.number != self.exex_head.block.number { + eyre::bail!("ExEx head number does not match the hash") + } + + // ExEx is on the canonical chain and the same as the node head, no need to + // backfill + self.backfill_job = None; + } else { + // ExEx is not on the canonical chain, first unwind it and then backfill + debug!(target: "exex::manager", "ExEx is at the same block height as the node head but not on the canonical chain"); + + // TODO(alexey): unwind and backfill + self.backfill_job = None; + } + } + std::cmp::Ordering::Greater => { + debug!(target: "exex::manager", "ExEx is ahead of the node head"); + + // ExEx is ahead of the node head + + // TODO(alexey): wait until the node head is at the same height as the ExEx head + // and then repeat the process above + self.node_head_catchup_in_progress = true; + } + }; + + Ok(()) + } +} + +impl Stream for ExExNotificationsWithHead +where + P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, + E: BlockExecutorProvider + Clone + Unpin + 'static, +{ + type Item = eyre::Result; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let this = self.get_mut(); + + if this.pending_sync { + this.synchronize()?; + this.pending_sync = false; + } + + if let Some(backfill_job) = &mut this.backfill_job { + if let Some(chain) = ready!(backfill_job.poll_next_unpin(cx)) { + return Poll::Ready(Some(Ok(ExExNotification::ChainCommitted { + new: Arc::new(chain?), + }))) + } + + // Backfill job is done, remove it + this.backfill_job = None; + } + + loop { + let Some(notification) = ready!(this.notifications.poll_recv(cx)) else { + return Poll::Ready(None) + }; + + // 1. Either committed or reverted chain from the notification. + // 2. Block number of the tip of the canonical chain: + // - For committed chain, it's the tip block number. + // - For reverted chain, it's the block number preceding the first block in the chain. + let (chain, tip) = notification + .committed_chain() + .map(|chain| (chain.clone(), chain.tip().number)) + .or_else(|| { + notification + .reverted_chain() + .map(|chain| (chain.clone(), chain.first().number - 1)) + }) + .unzip(); + + if this.node_head_catchup_in_progress { + // If we are waiting for the node head to catch up to the same height as the ExEx + // head, then we need to check if the ExEx is on the canonical chain. + + // Query the chain from the new notification for the ExEx head block number. + let exex_head_block = chain + .as_ref() + .and_then(|chain| chain.blocks().get(&this.exex_head.block.number)); + + // Compare the hash of the block from the new notification to the ExEx head + // hash. + if let Some((block, tip)) = exex_head_block.zip(tip) { + if block.hash() == this.exex_head.block.hash { + // ExEx is on the canonical chain, proceed with the notification + this.node_head_catchup_in_progress = false; + } else { + // ExEx is not on the canonical chain, synchronize + let tip = + this.provider.sealed_header(tip)?.ok_or_eyre("node head not found")?; + this.node_head = Head::new( + tip.number, + tip.hash(), + tip.difficulty, + U256::MAX, + tip.timestamp, + ); + this.synchronize()?; + } + } + } + + if notification + .committed_chain() + .or_else(|| notification.reverted_chain()) + .map_or(false, |chain| chain.first().number > this.exex_head.block.number) + { + return Poll::Ready(Some(Ok(notification))) + } + } + } +} + /// Metrics for the `ExEx` manager. #[derive(Metrics)] #[metrics(scope = "exex_manager")] @@ -473,13 +809,20 @@ impl Clone for ExExManagerHandle { #[cfg(test)] mod tests { use super::*; - use reth_primitives::{SealedBlockWithSenders, B256}; - use reth_provider::Chain; + use futures::StreamExt; + use reth_db_common::init::init_genesis; + use reth_evm_ethereum::execute::EthExecutorProvider; + use reth_primitives::{Block, BlockNumHash, Header, SealedBlockWithSenders, B256}; + use reth_provider::{ + providers::BlockchainProvider2, test_utils::create_test_provider_factory, BlockReader, + BlockWriter, Chain, + }; + use reth_testing_utils::generators::{self, random_block, BlockParams}; #[tokio::test] async fn test_delivers_events() { let (mut exex_handle, event_tx, mut _notification_rx) = - ExExHandle::new("test_exex".to_string()); + ExExHandle::new("test_exex".to_string(), Head::default(), (), ()); // Send an event and check that it's delivered correctly event_tx.send(ExExEvent::FinishedHeight(42)).unwrap(); @@ -489,7 +832,8 @@ mod tests { #[tokio::test] async fn test_has_exexs() { - let (exex_handle_1, _, _) = ExExHandle::new("test_exex_1".to_string()); + let (exex_handle_1, _, _) = + ExExHandle::new("test_exex_1".to_string(), Head::default(), (), ()); assert!(!ExExManager::new(vec![], 0).handle.has_exexs()); @@ -498,7 +842,8 @@ mod tests { #[tokio::test] async fn test_has_capacity() { - let (exex_handle_1, _, _) = ExExHandle::new("test_exex_1".to_string()); + let (exex_handle_1, _, _) = + ExExHandle::new("test_exex_1".to_string(), Head::default(), (), ()); assert!(!ExExManager::new(vec![], 0).handle.has_capacity()); @@ -507,7 +852,7 @@ mod tests { #[test] fn test_push_notification() { - let (exex_handle, _, _) = ExExHandle::new("test_exex".to_string()); + let (exex_handle, _, _) = ExExHandle::new("test_exex".to_string(), Head::default(), (), ()); // Create a mock ExExManager and add the exex_handle to it let mut exex_manager = ExExManager::new(vec![exex_handle], 10); @@ -552,7 +897,7 @@ mod tests { #[test] fn test_update_capacity() { - let (exex_handle, _, _) = ExExHandle::new("test_exex".to_string()); + let (exex_handle, _, _) = ExExHandle::new("test_exex".to_string(), Head::default(), (), ()); // Create a mock ExExManager and add the exex_handle to it let max_capacity = 5; @@ -587,7 +932,7 @@ mod tests { #[tokio::test] async fn test_updates_block_height() { let (exex_handle, event_tx, mut _notification_rx) = - ExExHandle::new("test_exex".to_string()); + ExExHandle::new("test_exex".to_string(), Head::default(), (), ()); // Check initial block height assert!(exex_handle.finished_height.is_none()); @@ -624,8 +969,10 @@ mod tests { #[tokio::test] async fn test_updates_block_height_lower() { // Create two `ExExHandle` instances - let (exex_handle1, event_tx1, _) = ExExHandle::new("test_exex1".to_string()); - let (exex_handle2, event_tx2, _) = ExExHandle::new("test_exex2".to_string()); + let (exex_handle1, event_tx1, _) = + ExExHandle::new("test_exex1".to_string(), Head::default(), (), ()); + let (exex_handle2, event_tx2, _) = + ExExHandle::new("test_exex2".to_string(), Head::default(), (), ()); // Send events to update the block heights of the two handles, with the second being lower event_tx1.send(ExExEvent::FinishedHeight(42)).unwrap(); @@ -655,8 +1002,10 @@ mod tests { #[tokio::test] async fn test_updates_block_height_greater() { // Create two `ExExHandle` instances - let (exex_handle1, event_tx1, _) = ExExHandle::new("test_exex1".to_string()); - let (exex_handle2, event_tx2, _) = ExExHandle::new("test_exex2".to_string()); + let (exex_handle1, event_tx1, _) = + ExExHandle::new("test_exex1".to_string(), Head::default(), (), ()); + let (exex_handle2, event_tx2, _) = + ExExHandle::new("test_exex2".to_string(), Head::default(), (), ()); // Assert that the initial block height is `None` for the first `ExExHandle`. assert!(exex_handle1.finished_height.is_none()); @@ -692,7 +1041,8 @@ mod tests { #[tokio::test] async fn test_exex_manager_capacity() { - let (exex_handle_1, _, _) = ExExHandle::new("test_exex_1".to_string()); + let (exex_handle_1, _, _) = + ExExHandle::new("test_exex_1".to_string(), Head::default(), (), ()); // Create an ExExManager with a small max capacity let max_capacity = 2; @@ -730,7 +1080,8 @@ mod tests { #[tokio::test] async fn exex_handle_new() { - let (mut exex_handle, _, mut notification_rx) = ExExHandle::new("test_exex".to_string()); + let (mut exex_handle, _, mut notifications) = + ExExHandle::new("test_exex".to_string(), Head::default(), (), ()); // Check initial state assert_eq!(exex_handle.id, "test_exex"); @@ -759,7 +1110,7 @@ mod tests { // Send a notification and ensure it's received correctly match exex_handle.send(&mut cx, &(22, notification.clone())) { Poll::Ready(Ok(())) => { - let received_notification = notification_rx.recv().await.unwrap(); + let received_notification = notifications.next().await.unwrap(); assert_eq!(received_notification, notification); } Poll::Pending => panic!("Notification send is pending"), @@ -772,7 +1123,8 @@ mod tests { #[tokio::test] async fn test_notification_if_finished_height_gt_chain_tip() { - let (mut exex_handle, _, mut notification_rx) = ExExHandle::new("test_exex".to_string()); + let (mut exex_handle, _, mut notifications) = + ExExHandle::new("test_exex".to_string(), Head::default(), (), ()); // Set finished_height to a value higher than the block tip exex_handle.finished_height = Some(15); @@ -790,9 +1142,17 @@ mod tests { // Send the notification match exex_handle.send(&mut cx, &(22, notification)) { Poll::Ready(Ok(())) => { - // The notification should be skipped, so nothing should be sent. - // Check that the receiver channel is indeed empty - assert!(notification_rx.try_recv().is_err(), "Receiver channel should be empty"); + poll_fn(|cx| { + // The notification should be skipped, so nothing should be sent. + // Check that the receiver channel is indeed empty + assert_eq!( + notifications.poll_next_unpin(cx), + Poll::Pending, + "Receiver channel should be empty" + ); + Poll::Ready(()) + }) + .await; } Poll::Pending | Poll::Ready(Err(_)) => { panic!("Notification should not be pending or fail"); @@ -805,7 +1165,8 @@ mod tests { #[tokio::test] async fn test_sends_chain_reorged_notification() { - let (mut exex_handle, _, mut notification_rx) = ExExHandle::new("test_exex".to_string()); + let (mut exex_handle, _, mut notifications) = + ExExHandle::new("test_exex".to_string(), Head::default(), (), ()); let notification = ExExNotification::ChainReorged { old: Arc::new(Chain::default()), @@ -821,7 +1182,7 @@ mod tests { // Send the notification match exex_handle.send(&mut cx, &(22, notification.clone())) { Poll::Ready(Ok(())) => { - let received_notification = notification_rx.recv().await.unwrap(); + let received_notification = notifications.next().await.unwrap(); assert_eq!(received_notification, notification); } Poll::Pending | Poll::Ready(Err(_)) => { @@ -835,7 +1196,8 @@ mod tests { #[tokio::test] async fn test_sends_chain_reverted_notification() { - let (mut exex_handle, _, mut notification_rx) = ExExHandle::new("test_exex".to_string()); + let (mut exex_handle, _, mut notifications) = + ExExHandle::new("test_exex".to_string(), Head::default(), (), ()); let notification = ExExNotification::ChainReverted { old: Arc::new(Chain::default()) }; @@ -848,7 +1210,7 @@ mod tests { // Send the notification match exex_handle.send(&mut cx, &(22, notification.clone())) { Poll::Ready(Ok(())) => { - let received_notification = notification_rx.recv().await.unwrap(); + let received_notification = notifications.next().await.unwrap(); assert_eq!(received_notification, notification); } Poll::Pending | Poll::Ready(Err(_)) => { @@ -859,4 +1221,220 @@ mod tests { // Ensure the notification ID was incremented assert_eq!(exex_handle.next_notification_id, 23); } + + #[tokio::test] + async fn exex_notifications_behind_head_canonical() -> eyre::Result<()> { + let mut rng = generators::rng(); + + let provider_factory = create_test_provider_factory(); + let genesis_hash = init_genesis(&provider_factory)?; + let genesis_block = provider_factory + .block(genesis_hash.into())? + .ok_or_else(|| eyre::eyre!("genesis block not found"))?; + + let provider = BlockchainProvider2::new(provider_factory.clone())?; + + let node_head_block = random_block( + &mut rng, + genesis_block.number + 1, + BlockParams { parent: Some(genesis_hash), tx_count: Some(0), ..Default::default() }, + ); + let provider_rw = provider_factory.provider_rw()?; + provider_rw.insert_block( + node_head_block.clone().seal_with_senders().ok_or_eyre("failed to recover senders")?, + )?; + provider_rw.commit()?; + + let node_head = Head { + number: node_head_block.number, + hash: node_head_block.hash(), + ..Default::default() + }; + let exex_head = + ExExHead { block: BlockNumHash { number: genesis_block.number, hash: genesis_hash } }; + + let notification = ExExNotification::ChainCommitted { + new: Arc::new(Chain::new( + vec![random_block( + &mut rng, + node_head.number + 1, + BlockParams { parent: Some(node_head.hash), ..Default::default() }, + ) + .seal_with_senders() + .ok_or_eyre("failed to recover senders")?], + Default::default(), + None, + )), + }; + + let (notifications_tx, notifications_rx) = mpsc::channel(1); + + notifications_tx.send(notification.clone()).await?; + + let mut notifications = ExExNotifications::new( + node_head, + provider, + EthExecutorProvider::mainnet(), + notifications_rx, + ) + .with_head(exex_head); + + // First notification is the backfill of missing blocks from the canonical chain + assert_eq!( + notifications.next().await.transpose()?, + Some(ExExNotification::ChainCommitted { + new: Arc::new( + BackfillJobFactory::new( + notifications.executor.clone(), + notifications.provider.clone() + ) + .backfill(1..=1) + .next() + .ok_or_eyre("failed to backfill")?? + ) + }) + ); + + // Second notification is the actual notification that we sent before + assert_eq!(notifications.next().await.transpose()?, Some(notification)); + + Ok(()) + } + + #[ignore] + #[tokio::test] + async fn exex_notifications_behind_head_non_canonical() -> eyre::Result<()> { + Ok(()) + } + + #[tokio::test] + async fn exex_notifications_same_head_canonical() -> eyre::Result<()> { + let provider_factory = create_test_provider_factory(); + let genesis_hash = init_genesis(&provider_factory)?; + let genesis_block = provider_factory + .block(genesis_hash.into())? + .ok_or_else(|| eyre::eyre!("genesis block not found"))?; + + let provider = BlockchainProvider2::new(provider_factory)?; + + let node_head = + Head { number: genesis_block.number, hash: genesis_hash, ..Default::default() }; + let exex_head = + ExExHead { block: BlockNumHash { number: node_head.number, hash: node_head.hash } }; + + let notification = ExExNotification::ChainCommitted { + new: Arc::new(Chain::new( + vec![Block { + header: Header { + parent_hash: node_head.hash, + number: node_head.number + 1, + ..Default::default() + }, + ..Default::default() + } + .seal_slow() + .seal_with_senders() + .ok_or_eyre("failed to recover senders")?], + Default::default(), + None, + )), + }; + + let (notifications_tx, notifications_rx) = mpsc::channel(1); + + notifications_tx.send(notification.clone()).await?; + + let mut notifications = ExExNotifications::new( + node_head, + provider, + EthExecutorProvider::mainnet(), + notifications_rx, + ) + .with_head(exex_head); + + let new_notification = notifications.next().await.transpose()?; + assert_eq!(new_notification, Some(notification)); + + Ok(()) + } + + #[ignore] + #[tokio::test] + async fn exex_notifications_same_head_non_canonical() -> eyre::Result<()> { + Ok(()) + } + + #[tokio::test] + async fn test_notifications_ahead_of_head() -> eyre::Result<()> { + let mut rng = generators::rng(); + + let provider_factory = create_test_provider_factory(); + let genesis_hash = init_genesis(&provider_factory)?; + let genesis_block = provider_factory + .block(genesis_hash.into())? + .ok_or_else(|| eyre::eyre!("genesis block not found"))?; + + let provider = BlockchainProvider2::new(provider_factory)?; + + let exex_head_block = random_block( + &mut rng, + genesis_block.number + 1, + BlockParams { parent: Some(genesis_hash), tx_count: Some(0), ..Default::default() }, + ); + + let node_head = + Head { number: genesis_block.number, hash: genesis_hash, ..Default::default() }; + let exex_head = ExExHead { + block: BlockNumHash { number: exex_head_block.number, hash: exex_head_block.hash() }, + }; + + let (notifications_tx, notifications_rx) = mpsc::channel(1); + + notifications_tx + .send(ExExNotification::ChainCommitted { + new: Arc::new(Chain::new( + vec![exex_head_block + .clone() + .seal_with_senders() + .ok_or_eyre("failed to recover senders")?], + Default::default(), + None, + )), + }) + .await?; + + let mut notifications = ExExNotifications::new( + node_head, + provider, + EthExecutorProvider::mainnet(), + notifications_rx, + ) + .with_head(exex_head); + + // First notification is skipped because the node is catching up with the ExEx + let new_notification = poll_fn(|cx| Poll::Ready(notifications.poll_next_unpin(cx))).await; + assert!(new_notification.is_pending()); + + // Imitate the node catching up with the ExEx by sending a notification for the missing + // block + let notification = ExExNotification::ChainCommitted { + new: Arc::new(Chain::new( + vec![random_block( + &mut rng, + exex_head_block.number + 1, + BlockParams { parent: Some(exex_head_block.hash()), ..Default::default() }, + ) + .seal_with_senders() + .ok_or_eyre("failed to recover senders")?], + Default::default(), + None, + )), + }; + notifications_tx.send(notification.clone()).await?; + + // Second notification is received because the node caught up with the ExEx + assert_eq!(notifications.next().await.transpose()?, Some(notification)); + + Ok(()) + } } diff --git a/crates/exex/test-utils/src/lib.rs b/crates/exex/test-utils/src/lib.rs index b363823eab..ab1958cc86 100644 --- a/crates/exex/test-utils/src/lib.rs +++ b/crates/exex/test-utils/src/lib.rs @@ -12,13 +12,18 @@ use futures_util::FutureExt; use reth_blockchain_tree::noop::NoopBlockchainTree; use reth_chainspec::{ChainSpec, MAINNET}; use reth_consensus::test_utils::TestConsensus; -use reth_db::{test_utils::TempDatabase, DatabaseEnv}; +use reth_db::{ + test_utils::{create_test_rw_db, create_test_static_files_dir, TempDatabase}, + DatabaseEnv, +}; use reth_db_common::init::init_genesis; use reth_evm::test_utils::MockExecutorProvider; use reth_execution_types::Chain; -use reth_exex::{ExExContext, ExExEvent, ExExNotification}; +use reth_exex::{ExExContext, ExExEvent, ExExNotification, ExExNotifications}; use reth_network::{config::SecretKey, NetworkConfigBuilder, NetworkManager}; -use reth_node_api::{FullNodeTypes, FullNodeTypesAdapter, NodeTypes}; +use reth_node_api::{ + FullNodeTypes, FullNodeTypesAdapter, NodeTypes, NodeTypesWithDBAdapter, NodeTypesWithEngine, +}; use reth_node_builder::{ components::{ Components, ComponentsBuilder, ConsensusBuilder, ExecutorBuilder, NodeComponentsBuilder, @@ -34,7 +39,7 @@ use reth_node_ethereum::{ use reth_payload_builder::noop::NoopPayloadBuilderService; use reth_primitives::{Head, SealedBlockWithSenders}; use reth_provider::{ - providers::BlockchainProvider, test_utils::create_test_provider_factory_with_chain_spec, + providers::{BlockchainProvider, StaticFileProvider}, BlockReader, ProviderFactory, }; use reth_tasks::TaskManager; @@ -71,16 +76,16 @@ pub struct TestExecutorBuilder; impl ExecutorBuilder for TestExecutorBuilder where - Node: FullNodeTypes, + Node: FullNodeTypes>, { type EVM = EthEvmConfig; type Executor = MockExecutorProvider; async fn build_evm( self, - _ctx: &BuilderContext, + ctx: &BuilderContext, ) -> eyre::Result<(Self::EVM, Self::Executor)> { - let evm_config = EthEvmConfig::default(); + let evm_config = EthEvmConfig::new(ctx.chain_spec()); let executor = MockExecutorProvider::default(); Ok((evm_config, executor)) @@ -110,13 +115,16 @@ pub struct TestNode; impl NodeTypes for TestNode { type Primitives = (); - type Engine = EthEngineTypes; type ChainSpec = ChainSpec; } +impl NodeTypesWithEngine for TestNode { + type Engine = EthEngineTypes; +} + impl Node for TestNode where - N: FullNodeTypes, + N: FullNodeTypes>, { type ComponentsBuilder = ComponentsBuilder< N, @@ -145,9 +153,12 @@ pub type TmpDB = Arc>; /// boot the testing environment pub type Adapter = NodeAdapter< RethFullAdapter, - <>>>::ComponentsBuilder as NodeComponentsBuilder< - RethFullAdapter, - >>::Components, + <, + BlockchainProvider>, + >, + >>::ComponentsBuilder as NodeComponentsBuilder>>::Components, >; /// An [`ExExContext`] using the [`Adapter`] type. pub type TestExExContext = ExExContext; @@ -158,7 +169,7 @@ pub struct TestExExHandle { /// Genesis block that was inserted into the storage pub genesis: SealedBlockWithSenders, /// Provider Factory for accessing the emphemeral storage of the host node - pub provider_factory: ProviderFactory, + pub provider_factory: ProviderFactory>, /// Channel for receiving events from the Execution Extension pub events_rx: UnboundedReceiver, /// Channel for sending notifications to the Execution Extension @@ -227,12 +238,19 @@ pub async fn test_exex_context_with_chain_spec( chain_spec: Arc, ) -> eyre::Result<(ExExContext, TestExExHandle)> { let transaction_pool = testing_pool(); - let evm_config = EthEvmConfig::default(); + let evm_config = EthEvmConfig::new(chain_spec.clone()); let executor = MockExecutorProvider::default(); let consensus = Arc::new(TestConsensus::default()); - let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec); - let genesis_hash = init_genesis(provider_factory.clone())?; + let (static_dir, _) = create_test_static_files_dir(); + let db = create_test_rw_db(); + let provider_factory = ProviderFactory::new( + db, + chain_spec, + StaticFileProvider::read_write(static_dir.into_path()).expect("static file provider"), + ); + + let genesis_hash = init_genesis(&provider_factory)?; let provider = BlockchainProvider::new(provider_factory.clone(), Arc::new(NoopBlockchainTree::default()))?; @@ -248,7 +266,7 @@ pub async fn test_exex_context_with_chain_spec( let tasks = TaskManager::current(); let task_executor = tasks.executor(); - let components = NodeAdapter::, _> { + let components = NodeAdapter::, _>, _> { components: Components { transaction_pool, evm_config, @@ -278,13 +296,19 @@ pub async fn test_exex_context_with_chain_spec( let (events_tx, events_rx) = tokio::sync::mpsc::unbounded_channel(); let (notifications_tx, notifications_rx) = tokio::sync::mpsc::channel(1); + let notifications = ExExNotifications::new( + head, + components.provider.clone(), + components.components.executor.clone(), + notifications_rx, + ); let ctx = ExExContext { head, config: NodeConfig::test(), reth_config: reth_config::Config::default(), events: events_tx, - notifications: notifications_rx, + notifications, components, }; diff --git a/crates/exex/types/Cargo.toml b/crates/exex/types/Cargo.toml index a70bcc1dd4..75cd498cd1 100644 --- a/crates/exex/types/Cargo.toml +++ b/crates/exex/types/Cargo.toml @@ -13,6 +13,7 @@ workspace = true [dependencies] # reth +reth-primitives.workspace = true reth-provider.workspace = true # reth diff --git a/crates/exex/types/src/head.rs b/crates/exex/types/src/head.rs new file mode 100644 index 0000000000..3e67b1eca5 --- /dev/null +++ b/crates/exex/types/src/head.rs @@ -0,0 +1,8 @@ +use reth_primitives::BlockNumHash; + +/// A head of the ExEx. It determines the highest block committed to the internal ExEx state. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct ExExHead { + /// The head block. + pub block: BlockNumHash, +} diff --git a/crates/exex/types/src/lib.rs b/crates/exex/types/src/lib.rs index 3c0ca731f2..8e71fbc619 100644 --- a/crates/exex/types/src/lib.rs +++ b/crates/exex/types/src/lib.rs @@ -9,7 +9,9 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] mod finished_height; +mod head; mod notification; pub use finished_height::FinishedExExHeight; +pub use head::ExExHead; pub use notification::ExExNotification; diff --git a/crates/metrics/metrics-derive/tests/metrics.rs b/crates/metrics/metrics-derive/tests/metrics.rs index 5c79b0a3d3..a07ccc8a7c 100644 --- a/crates/metrics/metrics-derive/tests/metrics.rs +++ b/crates/metrics/metrics-derive/tests/metrics.rs @@ -1,3 +1,4 @@ +#![allow(missing_docs)] use metrics::{ Counter, Gauge, Histogram, Key, KeyName, Label, Metadata, Recorder, SharedString, Unit, }; diff --git a/crates/metrics/metrics-derive/tests/trybuild.rs b/crates/metrics/metrics-derive/tests/trybuild.rs index 9ab8d32e10..3b06a67250 100644 --- a/crates/metrics/metrics-derive/tests/trybuild.rs +++ b/crates/metrics/metrics-derive/tests/trybuild.rs @@ -1,3 +1,4 @@ +#![allow(missing_docs)] #[test] fn compile_test() { let t = trybuild::TestCases::new(); diff --git a/crates/net/discv5/src/lib.rs b/crates/net/discv5/src/lib.rs index 5b0044a721..f231603f40 100644 --- a/crates/net/discv5/src/lib.rs +++ b/crates/net/discv5/src/lib.rs @@ -148,9 +148,11 @@ impl Discv5 { /// Returns the [`NodeRecord`] of the local node. /// /// This includes the currently tracked external IP address of the node. - pub fn node_record(&self) -> NodeRecord { + /// + /// Returns `None` if the local ENR does not contain the required fields. + pub fn node_record(&self) -> Option { let enr: Enr<_> = EnrCombinedKeyWrapper(self.discv5.local_enr()).into(); - (&enr).try_into().unwrap() + enr.try_into().ok() } /// Spawns [`discv5::Discv5`]. Returns [`discv5::Discv5`] handle in reth compatible wrapper type diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index d46f09ea12..b37f13da88 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -606,7 +606,9 @@ mod tests { use reth_consensus::test_utils::TestConsensus; use reth_db::test_utils::{create_test_rw_db, create_test_static_files_dir}; use reth_primitives::{BlockBody, B256}; - use reth_provider::{providers::StaticFileProvider, ProviderFactory}; + use reth_provider::{ + providers::StaticFileProvider, test_utils::MockNodeTypesWithDB, ProviderFactory, + }; use reth_testing_utils::generators::{self, random_block_range, BlockRangeParams}; use std::collections::HashMap; @@ -628,7 +630,7 @@ mod tests { let mut downloader = BodiesDownloaderBuilder::default().build( client.clone(), Arc::new(TestConsensus::default()), - ProviderFactory::new( + ProviderFactory::::new( db, MAINNET.clone(), StaticFileProvider::read_write(static_dir_path).unwrap(), @@ -683,7 +685,7 @@ mod tests { BodiesDownloaderBuilder::default().with_request_limit(request_limit).build( client.clone(), Arc::new(TestConsensus::default()), - ProviderFactory::new( + ProviderFactory::::new( db, MAINNET.clone(), StaticFileProvider::read_write(static_dir_path).unwrap(), @@ -717,7 +719,7 @@ mod tests { .build( client.clone(), Arc::new(TestConsensus::default()), - ProviderFactory::new( + ProviderFactory::::new( db, MAINNET.clone(), StaticFileProvider::read_write(static_dir_path).unwrap(), @@ -753,7 +755,7 @@ mod tests { let mut downloader = BodiesDownloaderBuilder::default().with_stream_batch_size(100).build( client.clone(), Arc::new(TestConsensus::default()), - ProviderFactory::new( + ProviderFactory::::new( db, MAINNET.clone(), StaticFileProvider::read_write(static_dir_path).unwrap(), @@ -799,7 +801,7 @@ mod tests { .build( client.clone(), Arc::new(TestConsensus::default()), - ProviderFactory::new( + ProviderFactory::::new( db, MAINNET.clone(), StaticFileProvider::read_write(static_dir_path).unwrap(), @@ -836,7 +838,7 @@ mod tests { .build( client.clone(), Arc::new(TestConsensus::default()), - ProviderFactory::new( + ProviderFactory::::new( db, MAINNET.clone(), StaticFileProvider::read_write(static_dir_path).unwrap(), diff --git a/crates/net/downloaders/src/receipt_file_client.rs b/crates/net/downloaders/src/receipt_file_client.rs index fd0bb09016..3776886dbc 100644 --- a/crates/net/downloaders/src/receipt_file_client.rs +++ b/crates/net/downloaders/src/receipt_file_client.rs @@ -381,14 +381,14 @@ mod test { .unwrap(), }; + // #[allow(clippy::needless_update)] not recognised, ..Default::default() needed so optimism + // feature must not be brought into scope let mut receipt = Receipt { tx_type: TxType::Legacy, success: true, cumulative_gas_used: 202819, ..Default::default() }; - // #[allow(clippy::needless_update)] not recognised, ..Default::default() needed so optimism - // feature must not be brought into scope receipt.logs = vec![log_1, log_2, log_3]; ReceiptWithBlockNumber { receipt, number: 1 } @@ -433,14 +433,14 @@ mod test { .unwrap(), }; + // #[allow(clippy::needless_update)] not recognised, ..Default::default() needed so optimism + // feature must not be brought into scope let mut receipt = Receipt { tx_type: TxType::Legacy, success: true, cumulative_gas_used: 116237, ..Default::default() }; - // #[allow(clippy::needless_update)] not recognised, ..Default::default() needed so optimism - // feature must not be brought into scope receipt.logs = vec![log_1, log_2]; ReceiptWithBlockNumber { receipt, number: 2 } @@ -485,14 +485,14 @@ mod test { .unwrap(), }; + // #[allow(clippy::needless_update)] not recognised, ..Default::default() needed so optimism + // feature must not be brought into scope let mut receipt = Receipt { tx_type: TxType::Legacy, success: true, cumulative_gas_used: 116237, ..Default::default() }; - // #[allow(clippy::needless_update)] not recognised, ..Default::default() needed so optimism - // feature must not be brought into scope receipt.logs = vec![log_1, log_2]; ReceiptWithBlockNumber { receipt, number: 3 } diff --git a/crates/net/eth-wire-types/src/blocks.rs b/crates/net/eth-wire-types/src/blocks.rs index 0569da05f9..9706ea86d8 100644 --- a/crates/net/eth-wire-types/src/blocks.rs +++ b/crates/net/eth-wire-types/src/blocks.rs @@ -364,7 +364,7 @@ mod tests { chain_id: Some(1), nonce: 0x8u64, gas_price: 0x4a817c808, - gas_limit: 0x2e248u64, + gas_limit: 0x2e248, to: TxKind::Call(hex!("3535353535353535353535353535353535353535").into()), value: U256::from(0x200u64), input: Default::default(), @@ -379,7 +379,7 @@ mod tests { chain_id: Some(1), nonce: 0x9u64, gas_price: 0x4a817c809, - gas_limit: 0x33450u64, + gas_limit: 0x33450, to: TxKind::Call(hex!("3535353535353535353535353535353535353535").into()), value: U256::from(0x2d9u64), input: Default::default(), @@ -439,7 +439,7 @@ mod tests { chain_id: Some(1), nonce: 0x8u64, gas_price: 0x4a817c808, - gas_limit: 0x2e248u64, + gas_limit: 0x2e248, to: TxKind::Call(hex!("3535353535353535353535353535353535353535").into()), value: U256::from(0x200u64), input: Default::default(), @@ -455,7 +455,7 @@ mod tests { chain_id: Some(1), nonce: 0x9u64, gas_price: 0x4a817c809, - gas_limit: 0x33450u64, + gas_limit: 0x33450, to: TxKind::Call(hex!("3535353535353535353535353535353535353535").into()), value: U256::from(0x2d9u64), input: Default::default(), diff --git a/crates/net/eth-wire-types/src/transactions.rs b/crates/net/eth-wire-types/src/transactions.rs index b4c7bc8b2b..53291b1ae0 100644 --- a/crates/net/eth-wire-types/src/transactions.rs +++ b/crates/net/eth-wire-types/src/transactions.rs @@ -77,6 +77,7 @@ impl FromIterator for PooledTransactions { mod tests { use crate::{message::RequestPair, GetPooledTransactions, PooledTransactions}; use alloy_rlp::{Decodable, Encodable}; + use reth_chainspec::MIN_TRANSACTION_GAS; use reth_primitives::{ hex, PooledTransactionsElement, Signature, Transaction, TransactionSigned, TxEip1559, TxKind, TxLegacy, U256, @@ -127,7 +128,7 @@ mod tests { chain_id: Some(1), nonce: 0x8u64, gas_price: 0x4a817c808, - gas_limit: 0x2e248u64, + gas_limit: 0x2e248, to: TxKind::Call(hex!("3535353535353535353535353535353535353535").into()), value: U256::from(0x200u64), input: Default::default(), @@ -149,7 +150,7 @@ mod tests { chain_id: Some(1), nonce: 0x09u64, gas_price: 0x4a817c809, - gas_limit: 0x33450u64, + gas_limit: 0x33450, to: TxKind::Call(hex!("3535353535353535353535353535353535353535").into()), value: U256::from(0x2d9u64), input: Default::default(), @@ -193,7 +194,7 @@ mod tests { chain_id: Some(1), nonce: 0x8u64, gas_price: 0x4a817c808, - gas_limit: 0x2e248u64, + gas_limit: 0x2e248, to: TxKind::Call(hex!("3535353535353535353535353535353535353535").into()), value: U256::from(0x200u64), input: Default::default(), @@ -215,7 +216,7 @@ mod tests { chain_id: Some(1), nonce: 0x09u64, gas_price: 0x4a817c809, - gas_limit: 0x33450u64, + gas_limit: 0x33450, to: TxKind::Call(hex!("3535353535353535353535353535353535353535").into()), value: U256::from(0x2d9u64), input: Default::default(), @@ -260,7 +261,7 @@ mod tests { chain_id: Some(4), nonce: 15u64, gas_price: 2200000000, - gas_limit: 34811u64, + gas_limit: 34811, to: TxKind::Call(hex!("cf7f9e66af820a19257a2108375b180b0ec49167").into()), value: U256::from(1234u64), input: Default::default(), @@ -283,7 +284,7 @@ mod tests { nonce: 26u64, max_priority_fee_per_gas: 1500000000, max_fee_per_gas: 1500000013, - gas_limit: 21000u64, + gas_limit: MIN_TRANSACTION_GAS as u128, to: TxKind::Call(hex!("61815774383099e24810ab832a5b2a5425c154d5").into()), value: U256::from(3000000000000000000u64), input: Default::default(), @@ -306,7 +307,7 @@ mod tests { chain_id: Some(4), nonce: 3u64, gas_price: 2000000000, - gas_limit: 10000000u64, + gas_limit: 10000000, to: TxKind::Call(hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into()), value: U256::from(1000000000000000u64), input: Default::default(), @@ -328,7 +329,7 @@ mod tests { chain_id: Some(4), nonce: 1u64, gas_price: 1000000000, - gas_limit: 100000u64, + gas_limit: 100000, to: TxKind::Call(hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into()), value: U256::from(693361000000000u64), input: Default::default(), @@ -350,7 +351,7 @@ mod tests { chain_id: Some(4), nonce: 2u64, gas_price: 1000000000, - gas_limit: 100000u64, + gas_limit: 100000, to: TxKind::Call(hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into()), value: U256::from(1000000000000000u64), input: Default::default(), @@ -399,7 +400,7 @@ mod tests { chain_id: Some(4), nonce: 15u64, gas_price: 2200000000, - gas_limit: 34811u64, + gas_limit: 34811, to: TxKind::Call(hex!("cf7f9e66af820a19257a2108375b180b0ec49167").into()), value: U256::from(1234u64), input: Default::default(), @@ -422,7 +423,7 @@ mod tests { nonce: 26u64, max_priority_fee_per_gas: 1500000000, max_fee_per_gas: 1500000013, - gas_limit: 21000u64, + gas_limit: MIN_TRANSACTION_GAS as u128, to: TxKind::Call(hex!("61815774383099e24810ab832a5b2a5425c154d5").into()), value: U256::from(3000000000000000000u64), input: Default::default(), @@ -445,7 +446,7 @@ mod tests { chain_id: Some(4), nonce: 3u64, gas_price: 2000000000, - gas_limit: 10000000u64, + gas_limit: 10000000, to: TxKind::Call(hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into()), value: U256::from(1000000000000000u64), input: Default::default(), @@ -467,7 +468,7 @@ mod tests { chain_id: Some(4), nonce: 1u64, gas_price: 1000000000, - gas_limit: 100000u64, + gas_limit: 100000, to: TxKind::Call(hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into()), value: U256::from(693361000000000u64), input: Default::default(), @@ -489,7 +490,7 @@ mod tests { chain_id: Some(4), nonce: 2u64, gas_price: 1000000000, - gas_limit: 100000u64, + gas_limit: 100000, to: TxKind::Call(hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into()), value: U256::from(1000000000000000u64), input: Default::default(), diff --git a/crates/net/nat/Cargo.toml b/crates/net/nat/Cargo.toml index 8ab78d46a5..99f2096167 100644 --- a/crates/net/nat/Cargo.toml +++ b/crates/net/nat/Cargo.toml @@ -17,6 +17,7 @@ reqwest.workspace = true serde_with = { workspace = true, optional = true } thiserror.workspace = true tokio = { workspace = true, features = ["time"] } +if-addrs.workspace = true [dev-dependencies] reth-tracing.workspace = true diff --git a/crates/net/nat/src/lib.rs b/crates/net/nat/src/lib.rs index 8f75790895..e58edae05c 100644 --- a/crates/net/nat/src/lib.rs +++ b/crates/net/nat/src/lib.rs @@ -12,6 +12,10 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +pub mod net_if; + +pub use net_if::{NetInterfaceError, DEFAULT_NET_IF_NAME}; + use std::{ fmt, future::{poll_fn, Future}, diff --git a/crates/net/nat/src/net_if.rs b/crates/net/nat/src/net_if.rs new file mode 100644 index 0000000000..d93dd8dd58 --- /dev/null +++ b/crates/net/nat/src/net_if.rs @@ -0,0 +1,55 @@ +//! IP resolution on non-host Docker network. + +#![cfg(not(target_os = "windows"))] + +use std::{io, net::IpAddr}; + +/// The 'eth0' interface tends to be the default interface that docker containers use to +/// communicate with each other. +pub const DEFAULT_NET_IF_NAME: &str = "eth0"; + +/// Errors resolving network interface IP. +#[derive(Debug, thiserror::Error)] +pub enum NetInterfaceError { + /// Error reading OS interfaces. + #[error("failed to read OS interfaces: {0}")] + Io(io::Error), + /// No interface found with given name. + #[error("interface not found: {0}, found other interfaces: {1:?}")] + IFNotFound(String, Vec), +} + +/// Reads IP of OS interface with given name, if exists. +#[cfg(not(target_os = "windows"))] +pub fn resolve_net_if_ip(if_name: &str) -> Result { + match if_addrs::get_if_addrs() { + Ok(ifs) => { + let ip = ifs.iter().find(|i| i.name == if_name).map(|i| i.ip()); + match ip { + Some(ip) => Ok(ip), + None => { + let ifs = ifs.into_iter().map(|i| i.name.as_str().into()).collect(); + Err(NetInterfaceError::IFNotFound(if_name.into(), ifs)) + } + } + } + Err(err) => Err(NetInterfaceError::Io(err)), + } +} + +#[cfg(test)] +mod tests { + use std::net::Ipv4Addr; + + use super::*; + + #[test] + fn read_docker_if_addr() { + const LOCALHOST_IF: [&str; 2] = ["lo0", "lo"]; + + let ip = resolve_net_if_ip(LOCALHOST_IF[0]) + .unwrap_or_else(|_| resolve_net_if_ip(LOCALHOST_IF[1]).unwrap()); + + assert_eq!(ip, Ipv4Addr::LOCALHOST); + } +} diff --git a/crates/net/network/src/cache.rs b/crates/net/network/src/cache.rs index f9f1a4da3c..fb2daca666 100644 --- a/crates/net/network/src/cache.rs +++ b/crates/net/network/src/cache.rs @@ -5,7 +5,6 @@ use std::{fmt, hash::Hash}; use derive_more::{Deref, DerefMut}; use itertools::Itertools; -// use linked_hash_set::LinkedHashSet; use schnellru::{ByLength, Limiter, RandomState, Unlimited}; /// A minimal LRU cache based on a [`LruMap`](schnellru::LruMap) with limited capacity. diff --git a/crates/net/network/src/discovery.rs b/crates/net/network/src/discovery.rs index feabeed873..d366027d68 100644 --- a/crates/net/network/src/discovery.rs +++ b/crates/net/network/src/discovery.rs @@ -196,6 +196,11 @@ impl Discovery { } } + /// Returns discv5 handle. + pub fn discv5(&self) -> Option { + self.discv5.clone() + } + /// Add a node to the discv4 table. pub(crate) fn add_discv5_node(&self, enr: Enr) -> Result<(), NetworkError> { if let Some(discv5) = &self.discv5 { diff --git a/crates/net/network/src/manager.rs b/crates/net/network/src/manager.rs index 936c8c6daf..8ed4321cb0 100644 --- a/crates/net/network/src/manager.rs +++ b/crates/net/network/src/manager.rs @@ -233,6 +233,7 @@ impl NetworkManager { // need to retrieve the addr here since provided port could be `0` let local_peer_id = discovery.local_id(); let discv4 = discovery.discv4(); + let discv5 = discovery.discv5(); let num_active_peers = Arc::new(AtomicUsize::new(0)); @@ -274,6 +275,7 @@ impl NetworkManager { Arc::new(AtomicU64::new(chain_spec.chain.id())), tx_gossip_disabled, discv4, + discv5, event_sender.clone(), ); diff --git a/crates/net/network/src/network.rs b/crates/net/network/src/network.rs index db7b1cdc83..d099812e6c 100644 --- a/crates/net/network/src/network.rs +++ b/crates/net/network/src/network.rs @@ -9,6 +9,7 @@ use std::{ use enr::Enr; use parking_lot::Mutex; use reth_discv4::Discv4; +use reth_discv5::Discv5; use reth_eth_wire::{DisconnectReason, NewBlock, NewPooledTransactionHashes, SharedTransactions}; use reth_network_api::{ events::EngineMessage, @@ -63,6 +64,7 @@ impl NetworkHandle { chain_id: Arc, tx_gossip_disabled: bool, discv4: Option, + discv5: Option, event_sender: EventSender, ) -> Self { let inner = NetworkInner { @@ -79,6 +81,7 @@ impl NetworkHandle { chain_id, tx_gossip_disabled, discv4, + discv5, event_sender, }; Self { inner: Arc::new(inner) } @@ -221,6 +224,8 @@ impl PeersInfo for NetworkHandle { fn local_node_record(&self) -> NodeRecord { if let Some(discv4) = &self.inner.discv4 { discv4.node_record() + } else if let Some(record) = self.inner.discv5.as_ref().and_then(|d| d.node_record()) { + record } else { let id = *self.peer_id(); let mut socket_addr = *self.inner.listener_address.lock(); @@ -442,6 +447,8 @@ struct NetworkInner { tx_gossip_disabled: bool, /// The instance of the discv4 service discv4: Option, + /// The instance of the discv5 service + discv5: Option, /// Sender for high level network events. event_sender: EventSender, } diff --git a/crates/net/network/src/transactions/config.rs b/crates/net/network/src/transactions/config.rs index 3dd77514da..b8023ca792 100644 --- a/crates/net/network/src/transactions/config.rs +++ b/crates/net/network/src/transactions/config.rs @@ -6,7 +6,8 @@ use super::{ SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE, }; use crate::transactions::constants::tx_fetcher::{ - DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS, DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS_PER_PEER, + DEFAULT_MAX_CAPACITY_CACHE_PENDING_FETCH, DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS, + DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS_PER_PEER, }; /// Configuration for managing transactions within the network. @@ -46,6 +47,11 @@ pub struct TransactionFetcherConfig { /// [`PooledTransactions`](reth_eth_wire::PooledTransactions) response on packing a /// [`GetPooledTransactions`](reth_eth_wire::GetPooledTransactions) request with hashes. pub soft_limit_byte_size_pooled_transactions_response_on_pack_request: usize, + /// Max capacity of the cache of transaction hashes, for transactions that weren't yet fetched. + /// A transaction is pending fetch if its hash didn't fit into a + /// [`GetPooledTransactions`](reth_eth_wire::GetPooledTransactions) yet, or it wasn't returned + /// upon request to peers. + pub max_capacity_cache_txns_pending_fetch: u32, } impl Default for TransactionFetcherConfig { @@ -56,7 +62,8 @@ impl Default for TransactionFetcherConfig { soft_limit_byte_size_pooled_transactions_response: SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE, soft_limit_byte_size_pooled_transactions_response_on_pack_request: - DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESP_ON_PACK_GET_POOLED_TRANSACTIONS_REQ + DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESP_ON_PACK_GET_POOLED_TRANSACTIONS_REQ, + max_capacity_cache_txns_pending_fetch: DEFAULT_MAX_CAPACITY_CACHE_PENDING_FETCH, } } } diff --git a/crates/net/network/src/transactions/fetcher.rs b/crates/net/network/src/transactions/fetcher.rs index 8a5b2fbadb..f553cfbfdb 100644 --- a/crates/net/network/src/transactions/fetcher.rs +++ b/crates/net/network/src/transactions/fetcher.rs @@ -134,6 +134,8 @@ impl TransactionFetcher { .metrics .capacity_inflight_requests .increment(tx_fetcher.info.max_inflight_requests as u64); + tx_fetcher.info.max_capacity_cache_txns_pending_fetch = + config.max_capacity_cache_txns_pending_fetch; tx_fetcher } @@ -1291,6 +1293,10 @@ pub struct TransactionFetcherInfo { /// Soft limit for the byte size of a [`PooledTransactions`] response, upon assembling the /// response. Spec'd at 2 MiB, but can be adjusted for research purpose. pub soft_limit_byte_size_pooled_transactions_response: usize, + /// Max capacity of the cache of transaction hashes, for transactions that weren't yet fetched. + /// A transaction is pending fetch if its hash didn't fit into a [`GetPooledTransactions`] yet, + /// or it wasn't returned upon request to peers. + pub max_capacity_cache_txns_pending_fetch: u32, } impl TransactionFetcherInfo { @@ -1299,11 +1305,13 @@ impl TransactionFetcherInfo { max_inflight_requests: usize, soft_limit_byte_size_pooled_transactions_response_on_pack_request: usize, soft_limit_byte_size_pooled_transactions_response: usize, + max_capacity_cache_txns_pending_fetch: u32, ) -> Self { Self { max_inflight_requests, soft_limit_byte_size_pooled_transactions_response_on_pack_request, soft_limit_byte_size_pooled_transactions_response, + max_capacity_cache_txns_pending_fetch, } } } @@ -1313,7 +1321,8 @@ impl Default for TransactionFetcherInfo { Self::new( DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS as usize * DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS_PER_PEER as usize, DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESP_ON_PACK_GET_POOLED_TRANSACTIONS_REQ, - SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE + SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE, + DEFAULT_MAX_CAPACITY_CACHE_PENDING_FETCH, ) } } diff --git a/crates/net/network/tests/it/main.rs b/crates/net/network/tests/it/main.rs index c23f93e0ec..090e60c7a3 100644 --- a/crates/net/network/tests/it/main.rs +++ b/crates/net/network/tests/it/main.rs @@ -1,3 +1,4 @@ +#![allow(missing_docs)] mod big_pooled_txs_req; mod connect; mod multiplex; diff --git a/crates/net/p2p/src/error.rs b/crates/net/p2p/src/error.rs index 285e83af2e..a52b9ccda9 100644 --- a/crates/net/p2p/src/error.rs +++ b/crates/net/p2p/src/error.rs @@ -1,7 +1,7 @@ use std::ops::RangeInclusive; use super::headers::client::HeadersRequest; -use derive_more::Display; +use derive_more::{Display, Error}; use reth_consensus::ConsensusError; use reth_network_peers::WithPeerId; use reth_network_types::ReputationChangeKind; @@ -76,7 +76,7 @@ impl EthResponseValidator for RequestResult> { /// Error variants that can happen when sending requests to a session. /// /// Represents errors encountered when sending requests. -#[derive(Clone, Debug, Eq, PartialEq, Display)] +#[derive(Clone, Debug, Eq, PartialEq, Display, Error)] pub enum RequestError { /// Closed channel to the peer. #[display("closed channel to the peer")] @@ -126,14 +126,11 @@ impl From for RequestError { } } -#[cfg(feature = "std")] -impl std::error::Error for RequestError {} - /// The download result type pub type DownloadResult = Result; /// The downloader error type -#[derive(Debug, Clone, PartialEq, Eq, Display)] +#[derive(Debug, Clone, PartialEq, Eq, Display, Error)] pub enum DownloadError { /* ==================== HEADER ERRORS ==================== */ /// Header validation failed. @@ -144,6 +141,7 @@ pub enum DownloadError { /// Number of header failing validation number: u64, /// The details of validation failure + #[error(source)] error: Box, }, /// Received an invalid tip. @@ -216,20 +214,6 @@ impl From for DownloadError { } } -#[cfg(feature = "std")] -impl std::error::Error for DownloadError { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - Self::HeaderValidation { error, .. } | Self::BodyValidation { error, .. } => { - std::error::Error::source(error) - } - Self::RequestError(error) => std::error::Error::source(error), - Self::Provider(error) => std::error::Error::source(error), - _ => None, - } - } -} - #[cfg(test)] mod tests { use super::*; diff --git a/crates/net/p2p/src/headers/error.rs b/crates/net/p2p/src/headers/error.rs index 3112f9d7d4..b22aae9248 100644 --- a/crates/net/p2p/src/headers/error.rs +++ b/crates/net/p2p/src/headers/error.rs @@ -1,4 +1,4 @@ -use derive_more::Display; +use derive_more::{Display, Error}; use reth_consensus::ConsensusError; use reth_primitives::SealedHeader; @@ -6,7 +6,7 @@ use reth_primitives::SealedHeader; pub type HeadersDownloaderResult = Result; /// Error variants that can happen when sending requests to a session. -#[derive(Debug, Clone, Eq, PartialEq, Display)] +#[derive(Debug, Clone, Eq, PartialEq, Display, Error)] pub enum HeadersDownloaderError { /// The downloaded header cannot be attached to the local head, /// but is valid otherwise. @@ -17,15 +17,7 @@ pub enum HeadersDownloaderError { /// The header we attempted to attach. header: Box, /// The error that occurred when attempting to attach the header. + #[error(source)] error: Box, }, } - -#[cfg(feature = "std")] -impl std::error::Error for HeadersDownloaderError { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - Self::DetachedHead { error, .. } => Some(error), - } - } -} diff --git a/crates/net/peers/src/node_record.rs b/crates/net/peers/src/node_record.rs index cd29e12d4c..d6836d8819 100644 --- a/crates/net/peers/src/node_record.rs +++ b/crates/net/peers/src/node_record.rs @@ -196,6 +196,15 @@ impl FromStr for NodeRecord { } } +#[cfg(feature = "secp256k1")] +impl TryFrom> for NodeRecord { + type Error = NodeRecordParseError; + + fn try_from(enr: Enr) -> Result { + (&enr).try_into() + } +} + #[cfg(feature = "secp256k1")] impl TryFrom<&Enr> for NodeRecord { type Error = NodeRecordParseError; diff --git a/crates/node/api/Cargo.toml b/crates/node/api/Cargo.toml index e9f297fc44..e7685acc84 100644 --- a/crates/node/api/Cargo.toml +++ b/crates/node/api/Cargo.toml @@ -14,8 +14,6 @@ workspace = true # reth reth-evm.workspace = true reth-provider.workspace = true -reth-chainspec.workspace = true -reth-db-api.workspace = true reth-engine-primitives.workspace = true reth-transaction-pool.workspace = true reth-payload-builder.workspace = true @@ -23,3 +21,5 @@ reth-payload-primitives.workspace = true reth-tasks.workspace = true reth-rpc-eth-api.workspace = true reth-network-api.workspace = true +reth-node-types.workspace = true +reth-primitives.workspace = true diff --git a/crates/node/api/src/lib.rs b/crates/node/api/src/lib.rs index 8748fc79d9..7692ed6f2c 100644 --- a/crates/node/api/src/lib.rs +++ b/crates/node/api/src/lib.rs @@ -17,14 +17,13 @@ pub use reth_payload_primitives as payload; pub use reth_payload_primitives::*; /// Traits and helper types used to abstract over EVM methods and types. -pub use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; - -pub mod primitives; +pub use reth_evm::{ConfigureEvm, ConfigureEvmEnv, NextBlockEnvAttributes}; pub mod node; pub use node::*; // re-export for convenience +pub use reth_node_types::*; pub use reth_provider::FullProvider; pub use reth_rpc_eth_api::EthApiTypes; diff --git a/crates/node/api/src/node.rs b/crates/node/api/src/node.rs index 50e87f98b7..ce6b16c8ff 100644 --- a/crates/node/api/src/node.rs +++ b/crates/node/api/src/node.rs @@ -2,124 +2,44 @@ use std::marker::PhantomData; -use reth_chainspec::{ChainSpec, EthChainSpec}; -use reth_db_api::{ - database::Database, - database_metrics::{DatabaseMetadata, DatabaseMetrics}, -}; use reth_evm::execute::BlockExecutorProvider; use reth_network_api::FullNetwork; +use reth_node_types::{NodeTypesWithDB, NodeTypesWithEngine}; use reth_payload_builder::PayloadBuilderHandle; +use reth_primitives::Header; use reth_provider::FullProvider; use reth_rpc_eth_api::EthApiTypes; use reth_tasks::TaskExecutor; use reth_transaction_pool::TransactionPool; -use crate::{primitives::NodePrimitives, ConfigureEvm, EngineTypes}; +use crate::ConfigureEvm; -/// The type that configures the essential types of an ethereum like node. -/// -/// This includes the primitive types of a node, the engine API types for communication with the -/// consensus layer. -/// -/// This trait is intended to be stateless and only define the types of the node. -pub trait NodeTypes: Send + Sync + Unpin + 'static { - /// The node's primitive types, defining basic operations and structures. - type Primitives: NodePrimitives; - /// The node's engine types, defining the interaction with the consensus engine. - type Engine: EngineTypes; - /// The type used for configuration of the EVM. - type ChainSpec: EthChainSpec; -} - -/// A [`NodeTypes`] type builder -#[derive(Default, Debug)] -pub struct AnyNodeTypes

(PhantomData

, PhantomData, PhantomData); - -impl AnyNodeTypes { - /// Sets the `Primitives` associated type. - pub const fn primitives(self) -> AnyNodeTypes { - AnyNodeTypes::(PhantomData::, PhantomData::, PhantomData::) - } - - /// Sets the `Engine` associated type. - pub const fn engine(self) -> AnyNodeTypes { - AnyNodeTypes::(PhantomData::

, PhantomData::, PhantomData::) - } -} - -impl NodeTypes for AnyNodeTypes -where - P: NodePrimitives + Send + Sync + Unpin + 'static, - E: EngineTypes + Send + Sync + Unpin, - C: EthChainSpec, -{ - type Primitives = P; - - type Engine = E; - - type ChainSpec = C; -} - -/// A helper trait that is downstream of the [`NodeTypes`] trait and adds stateful components to the -/// node. +/// A helper trait that is downstream of the [`NodeTypesWithEngine`] trait and adds stateful +/// components to the node. /// /// Its types are configured by node internally and are not intended to be user configurable. -pub trait FullNodeTypes: NodeTypes + 'static { - /// Underlying database type used by the node to store and retrieve data. - type DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static; +pub trait FullNodeTypes: Send + Sync + Unpin + 'static { + /// Node's types with the database. + type Types: NodeTypesWithDB + NodeTypesWithEngine; /// The provider type used to interact with the node. - type Provider: FullProvider; + type Provider: FullProvider; } /// An adapter type that adds the builtin provider type to the user configured node types. #[derive(Debug)] -pub struct FullNodeTypesAdapter { +pub struct FullNodeTypesAdapter { /// An instance of the user configured node types. pub types: PhantomData, - /// The database type used by the node. - pub db: PhantomData, /// The provider type used by the node. pub provider: PhantomData, } -impl FullNodeTypesAdapter { - /// Create a new adapter with the configured types. - pub fn new() -> Self { - Self { types: Default::default(), db: Default::default(), provider: Default::default() } - } -} - -impl Default for FullNodeTypesAdapter { - fn default() -> Self { - Self::new() - } -} - -impl Clone for FullNodeTypesAdapter { - fn clone(&self) -> Self { - Self { types: self.types, db: self.db, provider: self.provider } - } -} - -impl NodeTypes for FullNodeTypesAdapter -where - Types: NodeTypes, - DB: Send + Sync + Unpin + 'static, - Provider: Send + Sync + Unpin + 'static, -{ - type Primitives = Types::Primitives; - type Engine = Types::Engine; - type ChainSpec = Types::ChainSpec; -} - -impl FullNodeTypes for FullNodeTypesAdapter +impl FullNodeTypes for FullNodeTypesAdapter where - Types: NodeTypes, - Provider: FullProvider, - DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, + Types: NodeTypesWithDB + NodeTypesWithEngine, + Provider: FullProvider, { - type DB = DB; + type Types = Types; type Provider = Provider; } @@ -129,7 +49,7 @@ pub trait FullNodeComponents: FullNodeTypes + Clone + 'static { type Pool: TransactionPool + Unpin; /// The node's EVM configuration, defining settings for the Ethereum Virtual Machine. - type Evm: ConfigureEvm; + type Evm: ConfigureEvm

; /// The type that knows how to execute blocks. type Executor: BlockExecutorProvider; @@ -153,7 +73,9 @@ pub trait FullNodeComponents: FullNodeTypes + Clone + 'static { fn network(&self) -> &Self::Network; /// Returns the handle to the payload builder service. - fn payload_builder(&self) -> &PayloadBuilderHandle; + fn payload_builder( + &self, + ) -> &PayloadBuilderHandle<::Engine>; /// Returns handle to runtime. fn task_executor(&self) -> &TaskExecutor; diff --git a/crates/node/api/src/primitives.rs b/crates/node/api/src/primitives.rs deleted file mode 100644 index 235be5ff7b..0000000000 --- a/crates/node/api/src/primitives.rs +++ /dev/null @@ -1,8 +0,0 @@ -//! Type abstraction for node primitive types. - -/// Configures all the primitive types of the node. -// TODO(mattsse): this is currently a placeholder -pub trait NodePrimitives {} - -// TODO(mattsse): Placeholder -impl NodePrimitives for () {} diff --git a/crates/node/builder/Cargo.toml b/crates/node/builder/Cargo.toml index c95cb0105c..34dbef2ced 100644 --- a/crates/node/builder/Cargo.toml +++ b/crates/node/builder/Cargo.toml @@ -13,50 +13,54 @@ workspace = true [dependencies] ## reth -reth-chainspec.workspace = true reth-auto-seal-consensus.workspace = true reth-beacon-consensus.workspace = true reth-blockchain-tree.workspace = true -reth-db-common.workspace = true -reth-exex.workspace = true -reth-evm.workspace = true -reth-provider.workspace = true +reth-chainspec.workspace = true +reth-cli-util.workspace = true +reth-config.workspace = true +reth-consensus-debug-client.workspace = true +reth-consensus.workspace = true reth-db = { workspace = true, features = ["mdbx"], optional = true } reth-db-api.workspace = true -reth-rpc-engine-api.workspace = true -reth-rpc.workspace = true -reth-rpc-builder.workspace = true -reth-rpc-layer.workspace = true +reth-db-common.workspace = true +reth-downloaders.workspace = true +reth-engine-service.workspace = true +reth-engine-tree.workspace = true +reth-engine-util.workspace = true +reth-evm.workspace = true +reth-exex.workspace = true +reth-fs-util.workspace = true +reth-invalid-block-hooks.workspace = true +reth-network-api.workspace = true +reth-network-p2p.workspace = true +reth-network.workspace = true reth-node-api.workspace = true reth-node-core.workspace = true +reth-node-events.workspace = true reth-node-metrics.workspace = true -reth-network.workspace = true -reth-primitives.workspace = true reth-payload-builder.workspace = true -reth-transaction-pool.workspace = true -reth-tasks.workspace = true -reth-tracing.workspace = true -reth-network-p2p.workspace = true -reth-static-file.workspace = true +reth-payload-validator.workspace = true +reth-primitives.workspace = true +reth-provider.workspace = true reth-prune.workspace = true -reth-stages.workspace = true -reth-config.workspace = true -reth-downloaders.workspace = true -reth-node-events.workspace = true -reth-consensus.workspace = true -reth-consensus-debug-client.workspace = true -reth-rpc-types.workspace = true -reth-engine-util.workspace = true -reth-cli-util.workspace = true +reth-rpc-api.workspace = true +reth-rpc-builder.workspace = true +reth-rpc-engine-api.workspace = true reth-rpc-eth-types.workspace = true -reth-network-api.workspace = true -reth-payload-validator.workspace = true -reth-engine-service.workspace = true +reth-rpc-layer.workspace = true +reth-rpc-types.workspace = true +reth-rpc.workspace = true +reth-stages.workspace = true +reth-static-file.workspace = true +reth-tasks.workspace = true reth-tokio-util.workspace = true -reth-engine-tree.workspace = true +reth-tracing.workspace = true +reth-transaction-pool.workspace = true ## ethereum alloy-network.workspace = true +alloy-primitives.workspace = true ## async futures.workspace = true @@ -79,6 +83,7 @@ secp256k1 = { workspace = true, features = [ aquamarine.workspace = true eyre.workspace = true fdlimit.workspace = true +jsonrpsee.workspace = true rayon.workspace = true ## tracing diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 156ee05ae0..472bc425aa 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -21,13 +21,19 @@ use reth_exex::ExExContext; use reth_network::{ NetworkBuilder, NetworkConfig, NetworkConfigBuilder, NetworkHandle, NetworkManager, }; -use reth_node_api::{FullNodeTypes, FullNodeTypesAdapter, NodeAddOns, NodeTypes}; +use reth_node_api::{ + FullNodeTypes, FullNodeTypesAdapter, NodeAddOns, NodeTypes, NodeTypesWithDBAdapter, + NodeTypesWithEngine, +}; use reth_node_core::{ cli::config::{PayloadBuilderConfig, RethTransactionPoolConfig}, dirs::{ChainPath, DataDirPath}, node_config::NodeConfig, primitives::Head, - rpc::eth::{helpers::AddDevSigners, FullEthApiServer}, + rpc::{ + eth::{helpers::AddDevSigners, FullEthApiServer}, + types::AnyTransactionReceipt, + }, }; use reth_primitives::revm_primitives::EnvKzgSettings; use reth_provider::{providers::BlockchainProvider, ChainSpecProvider, FullProvider}; @@ -46,8 +52,12 @@ use crate::{ /// The adapter type for a reth node with the builtin provider type // Note: we need to hardcode this because custom components might depend on it in associated types. -pub type RethFullAdapter = FullNodeTypesAdapter>; +pub type RethFullAdapter = FullNodeTypesAdapter< + NodeTypesWithDBAdapter, + BlockchainProvider>, +>; +#[allow(clippy::doc_markdown)] #[cfg_attr(doc, aquamarine::aquamarine)] /// Declaratively construct a node. /// @@ -57,8 +67,8 @@ pub type RethFullAdapter = FullNodeTypesAdapter = FullNodeTypesAdapter(self) -> NodeBuilderWithTypes> where - T: NodeTypes, + T: NodeTypesWithEngine, { self.with_types_and_provider() } @@ -215,10 +225,10 @@ where /// Configures the types of the node and the provider type that will be used by the node. pub fn with_types_and_provider( self, - ) -> NodeBuilderWithTypes> + ) -> NodeBuilderWithTypes, P>> where - T: NodeTypes, - P: FullProvider, + T: NodeTypesWithEngine, + P: FullProvider>, { NodeBuilderWithTypes::new(self.config, self.database) } @@ -265,7 +275,7 @@ where /// Configures the types of the node. pub fn with_types(self) -> WithLaunchContext>> where - T: NodeTypes, + T: NodeTypesWithEngine, { WithLaunchContext { builder: self.builder.with_types(), task_executor: self.task_executor } } @@ -273,10 +283,12 @@ where /// Configures the types of the node and the provider type that will be used by the node. pub fn with_types_and_provider( self, - ) -> WithLaunchContext>> + ) -> WithLaunchContext< + NodeBuilderWithTypes, P>>, + > where - T: NodeTypes, - P: FullProvider, + T: NodeTypesWithEngine, + P: FullProvider>, { WithLaunchContext { builder: self.builder.with_types_and_provider(), @@ -332,6 +344,7 @@ where + FullEthApiServer< NetworkTypes: alloy_network::Network< TransactionResponse = WithOtherFields, + ReceiptResponse = AnyTransactionReceipt, >, > + AddDevSigners @@ -474,7 +487,7 @@ where impl WithLaunchContext, CB, AO>> where DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, - T: NodeTypes, + T: NodeTypesWithEngine, CB: NodeComponentsBuilder>, AO: NodeAddOns< NodeAdapter, CB::Components>, @@ -482,6 +495,7 @@ where + FullEthApiServer< NetworkTypes: alloy_network::Network< TransactionResponse = WithOtherFields, + ReceiptResponse = AnyTransactionReceipt, >, > + AddDevSigners, >, @@ -548,7 +562,7 @@ impl BuilderContext { } /// Returns the chain spec of the node. - pub fn chain_spec(&self) -> Arc { + pub fn chain_spec(&self) -> Arc<::ChainSpec> { self.provider().chain_spec() } diff --git a/crates/node/builder/src/builder/states.rs b/crates/node/builder/src/builder/states.rs index f6915a3db3..b5c99d082b 100644 --- a/crates/node/builder/src/builder/states.rs +++ b/crates/node/builder/src/builder/states.rs @@ -8,7 +8,9 @@ use std::{fmt, future::Future, marker::PhantomData}; use reth_exex::ExExContext; -use reth_node_api::{FullNodeComponents, FullNodeTypes, NodeAddOns, NodeTypes}; +use reth_node_api::{ + FullNodeComponents, FullNodeTypes, NodeAddOns, NodeTypesWithDB, NodeTypesWithEngine, +}; use reth_node_core::{ node_config::NodeConfig, rpc::eth::{helpers::AddDevSigners, FullEthApiServer}, @@ -34,7 +36,7 @@ pub struct NodeBuilderWithTypes { impl NodeBuilderWithTypes { /// Creates a new instance of the node builder with the given configuration and types. - pub const fn new(config: NodeConfig, database: T::DB) -> Self { + pub const fn new(config: NodeConfig, database: ::DB) -> Self { Self { config, adapter: NodeTypesAdapter::new(database) } } @@ -61,12 +63,12 @@ impl NodeBuilderWithTypes { /// Container for the node's types and the database the node uses. pub struct NodeTypesAdapter { /// The database type used by the node. - pub database: T::DB, + pub database: ::DB, } impl NodeTypesAdapter { /// Create a new adapter from the given node types. - pub(crate) const fn new(database: T::DB) -> Self { + pub(crate) const fn new(database: ::DB) -> Self { Self { database } } } @@ -88,14 +90,8 @@ pub struct NodeAdapter> { pub provider: T::Provider, } -impl> NodeTypes for NodeAdapter { - type Primitives = T::Primitives; - type Engine = T::Engine; - type ChainSpec = T::ChainSpec; -} - impl> FullNodeTypes for NodeAdapter { - type DB = T::DB; + type Types = T::Types; type Provider = T::Provider; } @@ -125,7 +121,7 @@ impl> FullNodeComponents for NodeAdapter< self.components.network() } - fn payload_builder(&self) -> &PayloadBuilderHandle { + fn payload_builder(&self) -> &PayloadBuilderHandle<::Engine> { self.components.payload_builder() } diff --git a/crates/node/builder/src/components/builder.rs b/crates/node/builder/src/components/builder.rs index 650ab24abc..ef9f303a74 100644 --- a/crates/node/builder/src/components/builder.rs +++ b/crates/node/builder/src/components/builder.rs @@ -4,6 +4,7 @@ use std::{future::Future, marker::PhantomData}; use reth_consensus::Consensus; use reth_evm::execute::BlockExecutorProvider; +use reth_primitives::Header; use reth_transaction_pool::TransactionPool; use crate::{ @@ -371,7 +372,7 @@ where F: FnOnce(&BuilderContext) -> Fut + Send, Fut: Future>> + Send, Pool: TransactionPool + Unpin + 'static, - EVM: ConfigureEvm, + EVM: ConfigureEvm
, Executor: BlockExecutorProvider, Cons: Consensus + Clone + Unpin + 'static, { diff --git a/crates/node/builder/src/components/execute.rs b/crates/node/builder/src/components/execute.rs index 891f8e01fe..90cff588f7 100644 --- a/crates/node/builder/src/components/execute.rs +++ b/crates/node/builder/src/components/execute.rs @@ -2,6 +2,7 @@ use crate::{BuilderContext, FullNodeTypes}; use reth_evm::execute::BlockExecutorProvider; use reth_node_api::ConfigureEvm; +use reth_primitives::Header; use std::future::Future; /// A type that knows how to build the executor types. @@ -9,7 +10,7 @@ pub trait ExecutorBuilder: Send { /// The EVM config to use. /// /// This provides the node with the necessary configuration to configure an EVM. - type EVM: ConfigureEvm; + type EVM: ConfigureEvm
; /// The type that knows how to execute blocks. type Executor: BlockExecutorProvider; @@ -24,7 +25,7 @@ pub trait ExecutorBuilder: Send { impl ExecutorBuilder for F where Node: FullNodeTypes, - EVM: ConfigureEvm, + EVM: ConfigureEvm
, Executor: BlockExecutorProvider, F: FnOnce(&BuilderContext) -> Fut + Send, Fut: Future> + Send, diff --git a/crates/node/builder/src/components/mod.rs b/crates/node/builder/src/components/mod.rs index e2d35c4706..42001fc100 100644 --- a/crates/node/builder/src/components/mod.rs +++ b/crates/node/builder/src/components/mod.rs @@ -25,7 +25,9 @@ use reth_consensus::Consensus; use reth_evm::execute::BlockExecutorProvider; use reth_network::NetworkHandle; use reth_network_api::FullNetwork; +use reth_node_api::NodeTypesWithEngine; use reth_payload_builder::PayloadBuilderHandle; +use reth_primitives::Header; use reth_transaction_pool::TransactionPool; use crate::{ConfigureEvm, FullNodeTypes}; @@ -35,12 +37,12 @@ use crate::{ConfigureEvm, FullNodeTypes}; /// - transaction pool /// - network /// - payload builder. -pub trait NodeComponents: Clone + Unpin + Send + Sync + 'static { +pub trait NodeComponents: Clone + Unpin + Send + Sync + 'static { /// The transaction pool of the node. type Pool: TransactionPool + Unpin; /// The node's EVM configuration, defining settings for the Ethereum Virtual Machine. - type Evm: ConfigureEvm; + type Evm: ConfigureEvm
; /// The type that knows how to execute blocks. type Executor: BlockExecutorProvider; @@ -67,7 +69,7 @@ pub trait NodeComponents: Clone + Unpin + Send + Sync fn network(&self) -> &Self::Network; /// Returns the handle to the payload builder service. - fn payload_builder(&self) -> &PayloadBuilderHandle; + fn payload_builder(&self) -> &PayloadBuilderHandle<::Engine>; } /// All the components of the node. @@ -86,7 +88,7 @@ pub struct Components { /// The network implementation of the node. pub network: NetworkHandle, /// The handle to the payload builder service. - pub payload_builder: PayloadBuilderHandle, + pub payload_builder: PayloadBuilderHandle<::Engine>, } impl NodeComponents @@ -94,7 +96,7 @@ impl NodeComponents where Node: FullNodeTypes, Pool: TransactionPool + Unpin + 'static, - EVM: ConfigureEvm, + EVM: ConfigureEvm
, Executor: BlockExecutorProvider, Cons: Consensus + Clone + Unpin + 'static, { @@ -124,7 +126,9 @@ where &self.network } - fn payload_builder(&self) -> &PayloadBuilderHandle { + fn payload_builder( + &self, + ) -> &PayloadBuilderHandle<::Engine> { &self.payload_builder } } @@ -133,7 +137,7 @@ impl Clone for Components, Executor: BlockExecutorProvider, Cons: Consensus + Clone, { diff --git a/crates/node/builder/src/components/payload.rs b/crates/node/builder/src/components/payload.rs index 55c7669efd..0efad9ba5c 100644 --- a/crates/node/builder/src/components/payload.rs +++ b/crates/node/builder/src/components/payload.rs @@ -2,6 +2,7 @@ use std::future::Future; +use reth_node_api::NodeTypesWithEngine; use reth_payload_builder::PayloadBuilderHandle; use reth_transaction_pool::TransactionPool; @@ -16,7 +17,9 @@ pub trait PayloadServiceBuilder: Sen self, ctx: &BuilderContext, pool: Pool, - ) -> impl Future>> + Send; + ) -> impl Future< + Output = eyre::Result::Engine>>, + > + Send; } impl PayloadServiceBuilder for F @@ -24,13 +27,19 @@ where Node: FullNodeTypes, Pool: TransactionPool, F: Fn(&BuilderContext, Pool) -> Fut + Send, - Fut: Future>> + Send, + Fut: Future< + Output = eyre::Result< + PayloadBuilderHandle<::Engine>, + >, + > + Send, { fn spawn_payload_service( self, ctx: &BuilderContext, pool: Pool, - ) -> impl Future>> + Send { + ) -> impl Future< + Output = eyre::Result::Engine>>, + > + Send { self(ctx, pool) } } diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 5ae8286f09..ee5ce5968e 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -1,8 +1,9 @@ //! Helper types that can be used by launchers. -use std::{marker::PhantomData, sync::Arc, thread::available_parallelism}; +use std::{sync::Arc, thread::available_parallelism}; -use eyre::Context; +use alloy_primitives::{BlockNumber, B256}; +use eyre::{Context, OptionExt}; use rayon::ThreadPoolBuilder; use reth_auto_seal_consensus::MiningMode; use reth_beacon_consensus::EthBeaconConsensus; @@ -12,13 +13,17 @@ use reth_blockchain_tree::{ use reth_chainspec::{Chain, ChainSpec}; use reth_config::{config::EtlConfig, PruneConfig}; use reth_consensus::Consensus; -use reth_db_api::{database::Database, database_metrics::DatabaseMetrics}; +use reth_db_api::database::Database; use reth_db_common::init::{init_genesis, InitDatabaseError}; use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader}; +use reth_engine_tree::tree::{InvalidBlockHook, InvalidBlockHooks, NoopInvalidBlockHook}; use reth_evm::noop::NoopBlockExecutorProvider; +use reth_fs_util as fs; +use reth_invalid_block_hooks::InvalidBlockWitnessHook; use reth_network_p2p::headers::client::HeadersClient; -use reth_node_api::FullNodeTypes; +use reth_node_api::{FullNodeTypes, NodeTypes, NodeTypesWithDB}; use reth_node_core::{ + args::InvalidBlockHookType, dirs::{ChainPath, DataDirPath}, node_config::NodeConfig, version::{ @@ -27,17 +32,20 @@ use reth_node_core::{ }, }; use reth_node_metrics::{ + chain::ChainSpecInfo, hooks::Hooks, server::{MetricServer, MetricServerConfig}, version::VersionInfo, }; -use reth_primitives::{BlockNumber, Head, B256}; +use reth_primitives::Head; use reth_provider::{ providers::{BlockchainProvider, BlockchainProvider2, StaticFileProvider}, - BlockHashReader, CanonStateNotificationSender, FullProvider, ProviderFactory, ProviderResult, - StageCheckpointReader, StaticFileProviderFactory, TreeViewer, + BlockHashReader, CanonStateNotificationSender, ChainSpecProvider, ProviderFactory, + ProviderResult, StageCheckpointReader, StateProviderFactory, StaticFileProviderFactory, + TreeViewer, }; use reth_prune::{PruneModes, PrunerBuilder}; +use reth_rpc_api::clients::EthApiClient; use reth_rpc_builder::config::RethRpcServerConfig; use reth_rpc_layer::JwtSecret; use reth_stages::{sets::DefaultStages, MetricEvent, Pipeline, PipelineTarget, StageId}; @@ -64,13 +72,13 @@ pub trait WithTree { fn set_tree(self, tree: Arc) -> Self; } -impl WithTree for BlockchainProvider { +impl WithTree for BlockchainProvider { fn set_tree(self, tree: Arc) -> Self { self.with_tree(tree) } } -impl WithTree for BlockchainProvider2 { +impl WithTree for BlockchainProvider2 { fn set_tree(self, _tree: Arc) -> Self { self } @@ -343,8 +351,16 @@ impl LaunchContextWith> { } /// Returns the configured [`PruneConfig`] + /// Any configuration set in CLI will take precedence over those set in toml pub fn prune_config(&self) -> Option { - self.toml_config().prune.clone().or_else(|| self.node_config().prune_config()) + let Some(mut node_prune_config) = self.node_config().prune_config() else { + // No CLI config is set, use the toml config. + return self.toml_config().prune.clone(); + }; + + // Otherwise, use the CLI configuration and merge with toml config. + node_prune_config.merge(self.toml_config().prune.clone()); + Some(node_prune_config) } /// Returns the configured [`PruneModes`], returning the default if no config was available. @@ -385,7 +401,9 @@ where /// Returns the [`ProviderFactory`] for the attached storage after executing a consistent check /// between the database and static files. **It may execute a pipeline unwind if it fails this /// check.** - pub async fn create_provider_factory(&self) -> eyre::Result> { + pub async fn create_provider_factory>( + &self, + ) -> eyre::Result> { let factory = ProviderFactory::new( self.right().clone(), self.chain_spec(), @@ -412,7 +430,7 @@ where let (_tip_tx, tip_rx) = watch::channel(B256::ZERO); // Builds an unwind-only pipeline - let pipeline = Pipeline::builder() + let pipeline = Pipeline::::builder() .add_stages(DefaultStages::new( factory.clone(), tip_rx, @@ -447,9 +465,9 @@ where } /// Creates a new [`ProviderFactory`] and attaches it to the launch context. - pub async fn with_provider_factory( + pub async fn with_provider_factory>( self, - ) -> eyre::Result>>> { + ) -> eyre::Result>>> { let factory = self.create_provider_factory().await?; let ctx = LaunchContextWith { inner: self.inner, @@ -460,17 +478,17 @@ where } } -impl LaunchContextWith>> +impl LaunchContextWith>> where - DB: Database + DatabaseMetrics + Send + Sync + Clone + 'static, + T: NodeTypesWithDB, { /// Returns access to the underlying database. - pub fn database(&self) -> &DB { + pub const fn database(&self) -> &T::DB { self.right().db_ref() } /// Returns the configured `ProviderFactory`. - pub const fn provider_factory(&self) -> &ProviderFactory { + pub const fn provider_factory(&self) -> &ProviderFactory { self.right() } @@ -502,6 +520,7 @@ where target_triple: VERGEN_CARGO_TARGET_TRIPLE, build_profile: BUILD_PROFILE_NAME, }, + ChainSpecInfo { name: self.left().config.chain.chain.to_string() }, self.task_executor().clone(), Hooks::new(self.database().clone(), self.static_file_provider()), ); @@ -514,13 +533,13 @@ where /// Convenience function to [`Self::init_genesis`] pub fn with_genesis(self) -> Result { - init_genesis(self.provider_factory().clone())?; + init_genesis(self.provider_factory())?; Ok(self) } /// Write the genesis block and state if it has not already been written pub fn init_genesis(&self) -> Result { - init_genesis(self.provider_factory().clone()) + init_genesis(self.provider_factory()) } /// Creates a new `WithMeteredProvider` container and attaches it to the @@ -530,7 +549,7 @@ where /// prometheus. pub fn with_metrics_task( self, - ) -> LaunchContextWith>> { + ) -> LaunchContextWith>> { let (metrics_sender, metrics_receiver) = unbounded_channel(); let with_metrics = @@ -547,12 +566,12 @@ where } } -impl LaunchContextWith>> +impl LaunchContextWith>> where - DB: Database + DatabaseMetrics + Send + Sync + Clone + 'static, + N: NodeTypesWithDB, { /// Returns the configured `ProviderFactory`. - const fn provider_factory(&self) -> &ProviderFactory { + const fn provider_factory(&self) -> &ProviderFactory { &self.right().provider_factory } @@ -567,11 +586,10 @@ where create_blockchain_provider: F, tree_config: BlockchainTreeConfig, canon_state_notification_sender: CanonStateNotificationSender, - ) -> eyre::Result>>> + ) -> eyre::Result>>> where - T: FullNodeTypes, - T::Provider: FullProvider, - F: FnOnce(ProviderFactory) -> eyre::Result, + T: FullNodeTypes, + F: FnOnce(ProviderFactory) -> eyre::Result, { let blockchain_db = create_blockchain_provider(self.provider_factory().clone())?; @@ -583,8 +601,6 @@ where blockchain_db, tree_config, canon_state_notification_sender, - // we store here a reference to T. - phantom_data: PhantomData, }; let ctx = LaunchContextWith { @@ -596,18 +612,17 @@ where } } -impl LaunchContextWith>> +impl LaunchContextWith>> where - DB: Database + DatabaseMetrics + Send + Sync + Clone + 'static, - T: FullNodeTypes + WithTree>, + T: FullNodeTypes, Provider: WithTree>, { /// Returns access to the underlying database. - pub fn database(&self) -> &DB { + pub const fn database(&self) -> &::DB { self.provider_factory().db_ref() } /// Returns the configured `ProviderFactory`. - pub const fn provider_factory(&self) -> &ProviderFactory { + pub const fn provider_factory(&self) -> &ProviderFactory { &self.right().db_provider_container.provider_factory } @@ -616,7 +631,7 @@ where /// If the database is empty, returns the genesis block. pub fn lookup_head(&self) -> eyre::Result { self.node_config() - .lookup_head(self.provider_factory().clone()) + .lookup_head(self.provider_factory()) .wrap_err("the head block is missing") } @@ -647,7 +662,7 @@ where on_component_initialized: Box< dyn OnComponentInitializedHook>, >, - ) -> eyre::Result>>> + ) -> eyre::Result>>> where CB: NodeComponentsBuilder, { @@ -667,19 +682,17 @@ where let consensus: Arc = Arc::new(components.consensus().clone()); let tree_externals = TreeExternals::new( - self.provider_factory().clone(), + self.provider_factory().clone().with_prune_modes(self.prune_modes()), consensus.clone(), components.block_executor().clone(), ); - let mut tree = - BlockchainTree::new(tree_externals, *self.tree_config(), self.prune_modes())? - .with_sync_metrics_tx(self.sync_metrics_tx()) - // Note: This is required because we need to ensure that both the components and the - // tree are using the same channel for canon state notifications. This will be - // removed once the Blockchain provider no longer depends on an - // instance of the tree - .with_canon_state_notification_sender(self.canon_state_notification_sender()); + let mut tree = BlockchainTree::new(tree_externals, *self.tree_config())? + .with_sync_metrics_tx(self.sync_metrics_tx()) + // Note: This is required because we need to ensure that both the components and the + // tree are using the same channel for canon state notifications. This will be removed + // once the Blockchain provider no longer depends on an instance of the tree + .with_canon_state_notification_sender(self.canon_state_notification_sender()); if self.node_config().enable_prefetch { tree = tree.enable_prefetch(); @@ -726,14 +739,13 @@ where } } -impl LaunchContextWith>> +impl LaunchContextWith>> where - DB: Database + DatabaseMetrics + Send + Sync + Clone + 'static, - T: FullNodeTypes + WithTree>, + T: FullNodeTypes>, CB: NodeComponentsBuilder, { /// Returns the configured `ProviderFactory`. - pub const fn provider_factory(&self) -> &ProviderFactory { + pub const fn provider_factory(&self) -> &ProviderFactory { &self.right().db_provider_container.provider_factory } @@ -752,7 +764,7 @@ where } /// Creates a new [`StaticFileProducer`] with the attached database. - pub fn static_file_producer(&self) -> StaticFileProducer { + pub fn static_file_producer(&self) -> StaticFileProducer> { StaticFileProducer::new(self.provider_factory().clone(), self.prune_modes()) } @@ -825,7 +837,7 @@ where inconsistent_stage_checkpoint = stage_checkpoint, "Pipeline sync progress is inconsistent" ); - return self.blockchain_db().block_hash(first_stage_checkpoint) + return self.blockchain_db().block_hash(first_stage_checkpoint); } } @@ -853,6 +865,75 @@ where } } +impl LaunchContextWith>> +where + T: FullNodeTypes< + Provider: WithTree + StateProviderFactory + ChainSpecProvider, + Types: NodeTypes, + >, + CB: NodeComponentsBuilder, +{ + /// Returns the [`InvalidBlockHook`] to use for the node. + pub fn invalid_block_hook(&self) -> eyre::Result> { + let Some(ref hook) = self.node_config().debug.invalid_block_hook else { + return Ok(Box::new(NoopInvalidBlockHook::default())) + }; + let healthy_node_rpc_client = self.get_healthy_node_client()?; + + let output_directory = self.data_dir().invalid_block_hooks(); + let hooks = hook + .iter() + .copied() + .map(|hook| { + let output_directory = output_directory.join(hook.to_string()); + fs::create_dir_all(&output_directory)?; + + Ok(match hook { + InvalidBlockHookType::Witness => Box::new(InvalidBlockWitnessHook::new( + self.blockchain_db().clone(), + self.components().evm_config().clone(), + output_directory, + healthy_node_rpc_client.clone(), + )), + InvalidBlockHookType::PreState | InvalidBlockHookType::Opcode => { + eyre::bail!("invalid block hook {hook:?} is not implemented yet") + } + } as Box) + }) + .collect::>()?; + + Ok(Box::new(InvalidBlockHooks(hooks))) + } + + /// Returns an RPC client for the healthy node, if configured in the node config. + fn get_healthy_node_client(&self) -> eyre::Result> { + self.node_config() + .debug + .healthy_node_rpc_url + .as_ref() + .map(|url| { + let client = jsonrpsee::http_client::HttpClientBuilder::default().build(url)?; + + // Verify that the healthy node is running the same chain as the current node. + let chain_id = futures::executor::block_on(async { + EthApiClient::< + reth_rpc_types::Transaction, + reth_rpc_types::Block, + reth_rpc_types::Receipt, + >::chain_id(&client) + .await + })? + .ok_or_eyre("healthy node rpc client didn't return a chain id")?; + if chain_id.to::() != self.chain_id().id() { + eyre::bail!("invalid chain id for healthy node: {chain_id}") + } + + Ok(client) + }) + .transpose() + } +} + /// Joins two attachments together. #[derive(Clone, Copy, Debug)] pub struct Attached { @@ -916,37 +997,32 @@ pub struct WithConfigs { /// Helper container type to bundle the [`ProviderFactory`] and the metrics /// sender. #[derive(Debug, Clone)] -pub struct WithMeteredProvider { - provider_factory: ProviderFactory, +pub struct WithMeteredProvider { + provider_factory: ProviderFactory, metrics_sender: UnboundedSender, } /// Helper container to bundle the [`ProviderFactory`], [`BlockchainProvider`] /// and a metrics sender. #[allow(missing_debug_implementations)] -pub struct WithMeteredProviders +pub struct WithMeteredProviders where - DB: Database, - T: FullNodeTypes>, + T: FullNodeTypes, { - db_provider_container: WithMeteredProvider, + db_provider_container: WithMeteredProvider, blockchain_db: T::Provider, canon_state_notification_sender: CanonStateNotificationSender, tree_config: BlockchainTreeConfig, - // this field is used to store a reference to the FullNodeTypes so that we - // can build the components in `with_components` method. - phantom_data: PhantomData, } /// Helper container to bundle the metered providers container and [`NodeAdapter`]. #[allow(missing_debug_implementations)] -pub struct WithComponents +pub struct WithComponents where - DB: Database, - T: FullNodeTypes>, + T: FullNodeTypes, CB: NodeComponentsBuilder, { - db_provider_container: WithMeteredProvider, + db_provider_container: WithMeteredProvider, tree_config: BlockchainTreeConfig, blockchain_db: T::Provider, node_adapter: NodeAdapter, @@ -973,8 +1049,29 @@ mod tests { fn test_save_prune_config() { with_tempdir("prune-store-test", |config_path| { let mut reth_config = Config::default(); - let node_config = - NodeConfig { pruning: PruningArgs { full: true }, ..NodeConfig::test() }; + let node_config = NodeConfig { + pruning: PruningArgs { + full: true, + block_interval: 0, + sender_recovery_full: false, + sender_recovery_distance: None, + sender_recovery_before: None, + transaction_lookup_full: false, + transaction_lookup_distance: None, + transaction_lookup_before: None, + receipts_full: false, + receipts_distance: None, + receipts_before: None, + account_history_full: false, + account_history_distance: None, + account_history_before: None, + storage_history_full: false, + storage_history_distance: None, + storage_history_before: None, + receipts_log_filter: vec![], + }, + ..NodeConfig::test() + }; LaunchContext::save_pruning_config_if_full_node( &mut reth_config, &node_config, diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index db1e1745ef..0ecd689c3d 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -9,6 +9,7 @@ use reth_blockchain_tree::BlockchainTreeConfig; #[cfg(feature = "bsc")] use reth_bsc_engine::ParliaEngineBuilder; use reth_chainspec::ChainSpec; +use reth_consensus_debug_client::{DebugConsensusClient, EtherscanBlockProvider}; use reth_engine_service::service::{ChainEvent, EngineService}; use reth_engine_tree::{ engine::{EngineApiRequest, EngineRequestHandler}, @@ -20,12 +21,17 @@ use reth_network::{NetworkSyncUpdater, SyncState}; #[cfg(feature = "bsc")] use reth_network_api::EngineRxProvider; use reth_network_api::{BlockDownloaderProvider, NetworkEventListenerProvider}; -use reth_node_api::{BuiltPayload, FullNodeTypes, NodeAddOns}; +use reth_node_api::{ + BuiltPayload, FullNodeTypes, NodeAddOns, NodeTypesWithDB, NodeTypesWithEngine, +}; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, exit::NodeExitFuture, primitives::Head, - rpc::eth::{helpers::AddDevSigners, FullEthApiServer}, + rpc::{ + eth::{helpers::AddDevSigners, FullEthApiServer}, + types::AnyTransactionReceipt, + }, version::{CARGO_PKG_VERSION, CLIENT_CODE, NAME_CLIENT, VERGEN_GIT_SHA}, }; use reth_node_events::{cl::ConsensusLayerHealthEvents, node}; @@ -37,6 +43,7 @@ use reth_rpc_types::{engine::ClientVersionV1, WithOtherFields}; use reth_tasks::TaskExecutor; use reth_tokio_util::EventSender; use reth_tracing::tracing::{debug, error, info}; +use std::sync::Arc; use tokio::sync::{mpsc::unbounded_channel, oneshot}; use tokio_stream::wrappers::UnboundedReceiverStream; @@ -53,21 +60,27 @@ use crate::{ pub struct EngineNodeLauncher { /// The task executor for the node. pub ctx: LaunchContext, + + /// Temporary configuration for engine tree. + /// After engine is stabilized, this should be configured through node builder. + pub engine_tree_config: TreeConfig, } impl EngineNodeLauncher { /// Create a new instance of the ethereum node launcher. - pub const fn new(task_executor: TaskExecutor, data_dir: ChainPath) -> Self { - Self { ctx: LaunchContext::new(task_executor, data_dir) } + pub const fn new( + task_executor: TaskExecutor, + data_dir: ChainPath, + engine_tree_config: TreeConfig, + ) -> Self { + Self { ctx: LaunchContext::new(task_executor, data_dir), engine_tree_config } } } -impl LaunchNode> for EngineNodeLauncher +impl LaunchNode> for EngineNodeLauncher where - T: FullNodeTypes< - Provider = BlockchainProvider2<::DB>, - ChainSpec = ChainSpec, - >, + Types: NodeTypesWithDB + NodeTypesWithEngine, + T: FullNodeTypes>, CB: NodeComponentsBuilder, AO: NodeAddOns< NodeAdapter, @@ -75,6 +88,7 @@ where + FullEthApiServer< NetworkTypes: alloy_network::Network< TransactionResponse = WithOtherFields, + ReceiptResponse = AnyTransactionReceipt, >, > + AddDevSigners, >, @@ -85,7 +99,7 @@ where self, target: NodeBuilderWithComponents, ) -> eyre::Result { - let Self { ctx } = self; + let Self { ctx, engine_tree_config } = self; let NodeBuilderWithComponents { adapter: NodeTypesAdapter { database }, components_builder, @@ -225,7 +239,9 @@ where ctx.blockchain_db().clone(), pruner, ctx.components().payload_builder().clone(), - TreeConfig::default(), + engine_tree_config, + ctx.invalid_block_hook()?, + ctx.sync_metrics_tx(), ); eth_service } @@ -254,7 +270,9 @@ where ctx.blockchain_db().clone(), pruner, ctx.components().payload_builder().clone(), - TreeConfig::default(), + engine_tree_config, + ctx.invalid_block_hook()?, + ctx.sync_metrics_tx(), ); eth_service } @@ -302,6 +320,7 @@ where ctx.chain_spec(), beacon_engine_handle, ctx.components().payload_builder().clone().into(), + ctx.components().pool().clone(), Box::new(ctx.task_executor().clone()), client, EngineCapabilities::default(), @@ -321,6 +340,36 @@ where ) .await?; + // TODO: migrate to devmode with https://github.com/paradigmxyz/reth/issues/10104 + if let Some(maybe_custom_etherscan_url) = ctx.node_config().debug.etherscan.clone() { + info!(target: "reth::cli", "Using etherscan as consensus client"); + + let chain = ctx.node_config().chain.chain; + let etherscan_url = maybe_custom_etherscan_url.map(Ok).unwrap_or_else(|| { + // If URL isn't provided, use default Etherscan URL for the chain if it is known + chain + .etherscan_urls() + .map(|urls| urls.0.to_string()) + .ok_or_else(|| eyre::eyre!("failed to get etherscan url for chain: {chain}")) + })?; + + let block_provider = EtherscanBlockProvider::new( + etherscan_url, + chain.etherscan_api_key().ok_or_else(|| { + eyre::eyre!( + "etherscan api key not found for rpc consensus client for chain: {chain}" + ) + })?, + ); + let rpc_consensus_client = DebugConsensusClient::new( + rpc_server_handles.auth.clone(), + Arc::new(block_provider), + ); + ctx.task_executor().spawn_critical("etherscan consensus client", async move { + rpc_consensus_client.run::<::Engine>().await + }); + } + // Run consensus engine to completion let initial_target = ctx.initial_backfill_target()?; let network_handle = ctx.components().network().clone(); diff --git a/crates/node/builder/src/launch/exex.rs b/crates/node/builder/src/launch/exex.rs index 86bb14ecf5..c3f842e5df 100644 --- a/crates/node/builder/src/launch/exex.rs +++ b/crates/node/builder/src/launch/exex.rs @@ -48,7 +48,12 @@ impl ExExLauncher { for (id, exex) in extensions { // create a new exex handle - let (handle, events, notifications) = ExExHandle::new(id.clone()); + let (handle, events, notifications) = ExExHandle::new( + id.clone(), + head, + components.provider().clone(), + components.block_executor().clone(), + ); exex_handles.push(handle); // create the launch context for the exex diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index fedb31ffe0..cfe5e92118 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -25,11 +25,16 @@ use reth_exex::ExExManagerHandle; use reth_network::{BlockDownloaderProvider, NetworkEventListenerProvider}; #[cfg(feature = "bsc")] use reth_network_api::EngineRxProvider; -use reth_node_api::{FullNodeComponents, FullNodeTypes, NodeAddOns}; +use reth_node_api::{ + FullNodeComponents, FullNodeTypes, NodeAddOns, NodeTypesWithDB, NodeTypesWithEngine, +}; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, exit::NodeExitFuture, - rpc::eth::{helpers::AddDevSigners, FullEthApiServer}, + rpc::{ + eth::{helpers::AddDevSigners, FullEthApiServer}, + types::AnyTransactionReceipt, + }, version::{CARGO_PKG_VERSION, CLIENT_CODE, NAME_CLIENT, VERGEN_GIT_SHA}, }; use reth_node_events::{cl::ConsensusLayerHealthEvents, node}; @@ -106,12 +111,10 @@ impl DefaultNodeLauncher { } } -impl LaunchNode> for DefaultNodeLauncher +impl LaunchNode> for DefaultNodeLauncher where - T: FullNodeTypes< - Provider = BlockchainProvider<::DB>, - ChainSpec = ChainSpec, - >, + Types: NodeTypesWithDB + NodeTypesWithEngine, + T: FullNodeTypes, Types = Types>, CB: NodeComponentsBuilder, AO: NodeAddOns< NodeAdapter, @@ -119,6 +122,7 @@ where + FullEthApiServer< NetworkTypes: alloy_network::Network< TransactionResponse = WithOtherFields, + ReceiptResponse = AnyTransactionReceipt, >, > + AddDevSigners, >, @@ -373,6 +377,7 @@ where ctx.chain_spec(), beacon_engine_handle, ctx.components().payload_builder().clone().into(), + ctx.components().pool().clone(), Box::new(ctx.task_executor().clone()), client, EngineCapabilities::default(), @@ -430,7 +435,7 @@ where Arc::new(block_provider), ); ctx.task_executor().spawn_critical("etherscan consensus client", async move { - rpc_consensus_client.run::().await + rpc_consensus_client.run::().await }); } @@ -443,7 +448,7 @@ where Arc::new(block_provider), ); ctx.task_executor().spawn_critical("rpc consensus client", async move { - rpc_consensus_client.run::().await + rpc_consensus_client.run::().await }); } diff --git a/crates/node/builder/src/lib.rs b/crates/node/builder/src/lib.rs index e9191f04a6..cfe16074a5 100644 --- a/crates/node/builder/src/lib.rs +++ b/crates/node/builder/src/lib.rs @@ -28,6 +28,9 @@ pub use builder::{ mod launch; pub use launch::{engine::EngineNodeLauncher, *}; +/// Temporarily re-export engine tree config. +pub use reth_engine_tree::tree::config as engine_tree_config; + mod handle; pub use handle::NodeHandle; diff --git a/crates/node/builder/src/node.rs b/crates/node/builder/src/node.rs index 4b54b09cdf..912046643c 100644 --- a/crates/node/builder/src/node.rs +++ b/crates/node/builder/src/node.rs @@ -1,9 +1,9 @@ // re-export the node api types -pub use reth_node_api::{FullNodeTypes, NodeTypes}; +pub use reth_node_api::{FullNodeTypes, NodeTypes, NodeTypesWithEngine}; use std::{marker::PhantomData, sync::Arc}; -use reth_node_api::FullNodeComponents; +use reth_node_api::{EngineTypes, FullNodeComponents}; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, node_config::NodeConfig, @@ -20,10 +20,10 @@ use crate::{ NodeAdapter, NodeAddOns, }; -/// A [`crate::Node`] is a [`NodeTypes`] that comes with preconfigured components. +/// A [`crate::Node`] is a [`NodeTypesWithEngine`] that comes with preconfigured components. /// /// This can be used to configure the builder with a preset of components. -pub trait Node: NodeTypes + Clone { +pub trait Node: NodeTypesWithEngine + Clone { /// The type that builds the node's components. type ComponentsBuilder: NodeComponentsBuilder; @@ -58,11 +58,18 @@ where C: Send + Sync + Unpin + 'static, AO: Send + Sync + Unpin + Clone + 'static, { - type Primitives = N::Primitives; + type Primitives = ::Primitives; - type Engine = N::Engine; + type ChainSpec = ::ChainSpec; +} - type ChainSpec = N::ChainSpec; +impl NodeTypesWithEngine for AnyNode +where + N: FullNodeTypes, + C: Send + Sync + Unpin + 'static, + AO: Send + Sync + Unpin + Clone + 'static, +{ + type Engine = ::Engine; } impl Node for AnyNode @@ -82,7 +89,7 @@ where /// The launched node with all components including RPC handlers. /// /// This can be used to interact with the launched node. -#[derive(Debug, Clone)] +#[derive(Debug)] pub struct FullNode> { /// The evm configuration. pub evm_config: Node::Evm, @@ -95,7 +102,7 @@ pub struct FullNode> { /// Provider to interact with the node's database pub provider: Node::Provider, /// Handle to the node's payload builder service. - pub payload_builder: PayloadBuilderHandle, + pub payload_builder: PayloadBuilderHandle<::Engine>, /// Task executor for the node. pub task_executor: TaskExecutor, /// Handles to the node's rpc servers @@ -108,13 +115,32 @@ pub struct FullNode> { pub data_dir: ChainPath, } -impl FullNode +impl> Clone for FullNode { + fn clone(&self) -> Self { + Self { + evm_config: self.evm_config.clone(), + block_executor: self.block_executor.clone(), + pool: self.pool.clone(), + network: self.network.clone(), + provider: self.provider.clone(), + payload_builder: self.payload_builder.clone(), + task_executor: self.task_executor.clone(), + rpc_server_handles: self.rpc_server_handles.clone(), + rpc_registry: self.rpc_registry.clone(), + config: self.config.clone(), + data_dir: self.data_dir.clone(), + } + } +} + +impl FullNode where - Node: FullNodeComponents, + Engine: EngineTypes, + Node: FullNodeComponents>, AddOns: NodeAddOns, { /// Returns the chain spec of the node. - pub fn chain_spec(&self) -> Arc { + pub fn chain_spec(&self) -> Arc<::ChainSpec> { self.provider.chain_spec() } @@ -131,14 +157,14 @@ where /// Returns the [`EngineApiClient`] interface for the authenticated engine API. /// /// This will send authenticated http requests to the node's auth server. - pub fn engine_http_client(&self) -> impl EngineApiClient { + pub fn engine_http_client(&self) -> impl EngineApiClient { self.auth_server_handle().http_client() } /// Returns the [`EngineApiClient`] interface for the authenticated engine API. /// /// This will send authenticated ws requests to the node's auth server. - pub async fn engine_ws_client(&self) -> impl EngineApiClient { + pub async fn engine_ws_client(&self) -> impl EngineApiClient { self.auth_server_handle().ws_client().await } @@ -146,7 +172,7 @@ where /// /// This will send not authenticated IPC requests to the node's auth server. #[cfg(unix)] - pub async fn engine_ipc_client(&self) -> Option> { + pub async fn engine_ipc_client(&self) -> Option> { self.auth_server_handle().ipc_client().await } } diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index 141bca1f92..64ce0b8ffa 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -6,12 +6,14 @@ use std::{ }; use futures::TryFutureExt; -use reth_node_api::{BuilderProvider, FullNodeComponents}; +use reth_chainspec::ChainSpec; +use reth_node_api::{BuilderProvider, FullNodeComponents, NodeTypesWithDB, NodeTypesWithEngine}; use reth_node_core::{ node_config::NodeConfig, rpc::{ api::EngineApiServer, eth::{EthApiTypes, FullEthApiServer}, + types::AnyTransactionReceipt, }, }; use reth_payload_builder::PayloadBuilderHandle; @@ -283,7 +285,9 @@ where } /// Returns the handle to the payload builder service - pub fn payload_builder(&self) -> &PayloadBuilderHandle { + pub fn payload_builder( + &self, + ) -> &PayloadBuilderHandle<::Engine> { self.node.payload_builder() } } @@ -297,12 +301,13 @@ pub async fn launch_rpc_servers( add_ons: RpcAddOns, ) -> eyre::Result<(RethRpcServerHandles, RpcRegistry)> where - Node: FullNodeComponents + Clone, - Engine: EngineApiServer, + Node: FullNodeComponents> + Clone, + Engine: EngineApiServer<::Engine>, EthApi: EthApiBuilderProvider + FullEthApiServer< NetworkTypes: alloy_network::Network< TransactionResponse = WithOtherFields, + ReceiptResponse = AnyTransactionReceipt, >, >, { diff --git a/crates/node/builder/src/setup.rs b/crates/node/builder/src/setup.rs index ec239c38db..8ee13e8104 100644 --- a/crates/node/builder/src/setup.rs +++ b/crates/node/builder/src/setup.rs @@ -4,7 +4,6 @@ use std::sync::Arc; use reth_config::{config::StageConfig, PruneConfig}; use reth_consensus::Consensus; -use reth_db_api::database::Database; use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, headers::reverse_headers::ReverseHeadersDownloaderBuilder, @@ -15,7 +14,7 @@ use reth_network_p2p::{ bodies::downloader::BodyDownloader, headers::downloader::HeaderDownloader, BlockClient, }; use reth_node_core::primitives::{BlockNumber, B256}; -use reth_provider::ProviderFactory; +use reth_provider::{providers::ProviderNodeTypes, ProviderFactory}; use reth_stages::{prelude::DefaultStages, stages::ExecutionStage, Pipeline, StageSet}; use reth_static_file::StaticFileProducer; use reth_tasks::TaskExecutor; @@ -24,22 +23,22 @@ use tokio::sync::watch; /// Constructs a [Pipeline] that's wired to the network #[allow(clippy::too_many_arguments)] -pub fn build_networked_pipeline( +pub fn build_networked_pipeline( config: &StageConfig, client: Client, consensus: Arc, - provider_factory: ProviderFactory, + provider_factory: ProviderFactory, task_executor: &TaskExecutor, metrics_tx: reth_stages::MetricEventsSender, prune_config: Option, max_block: Option, - static_file_producer: StaticFileProducer, + static_file_producer: StaticFileProducer>, executor: Executor, exex_manager_handle: ExExManagerHandle, skip_state_root_validation: bool, -) -> eyre::Result> +) -> eyre::Result> where - DB: Database + Unpin + Clone + 'static, + N: ProviderNodeTypes, Client: BlockClient + 'static, Executor: BlockExecutorProvider, { @@ -72,8 +71,8 @@ where /// Builds the [Pipeline] with the given [`ProviderFactory`] and downloaders. #[allow(clippy::too_many_arguments)] -pub fn build_pipeline( - provider_factory: ProviderFactory, +pub fn build_pipeline( + provider_factory: ProviderFactory, stage_config: &StageConfig, header_downloader: H, body_downloader: B, @@ -81,18 +80,18 @@ pub fn build_pipeline( max_block: Option, metrics_tx: reth_stages::MetricEventsSender, prune_config: Option, - static_file_producer: StaticFileProducer, + static_file_producer: StaticFileProducer>, executor: Executor, exex_manager_handle: ExExManagerHandle, skip_state_root_validation: bool, -) -> eyre::Result> +) -> eyre::Result> where - DB: Database + Clone + 'static, + N: ProviderNodeTypes, H: HeaderDownloader + 'static, B: BodyDownloader + 'static, Executor: BlockExecutorProvider, { - let mut builder = Pipeline::builder(); + let mut builder = Pipeline::::builder(); if let Some(max_block) = max_block { debug!(target: "reth::cli", max_block, "Configuring builder to use max block"); @@ -105,7 +104,7 @@ where let pipeline = builder .with_tip_sender(tip_tx) - .with_metrics_tx(metrics_tx.clone()) + .with_metrics_tx(metrics_tx) .add_stages( DefaultStages::new( provider_factory.clone(), @@ -118,16 +117,13 @@ where prune_modes.clone(), skip_state_root_validation, ) - .set( - ExecutionStage::new( - executor, - stage_config.execution.into(), - stage_config.execution_external_clean_threshold(), - prune_modes, - exex_manager_handle, - ) - .with_metrics_tx(metrics_tx), - ), + .set(ExecutionStage::new( + executor, + stage_config.execution.into(), + stage_config.execution_external_clean_threshold(), + prune_modes, + exex_manager_handle, + )), ) .build(provider_factory, static_file_producer); diff --git a/crates/node/core/Cargo.toml b/crates/node/core/Cargo.toml index 811bed4e31..9ce4f3cfea 100644 --- a/crates/node/core/Cargo.toml +++ b/crates/node/core/Cargo.toml @@ -14,12 +14,12 @@ workspace = true # reth reth-chainspec.workspace = true reth-primitives.workspace = true +reth-cli.workspace = true reth-cli-util.workspace = true reth-fs-util.workspace = true reth-db = { workspace = true, features = ["mdbx"] } -reth-db-api.workspace = true reth-storage-errors.workspace = true -reth-provider.workspace = true +reth-storage-api.workspace = true reth-network = { workspace = true, features = ["serde"] } reth-network-p2p.workspace = true reth-rpc-eth-types.workspace = true @@ -43,7 +43,8 @@ reth-bsc-chainspec = { workspace = true, optional = true } # ethereum alloy-genesis.workspace = true -alloy-rpc-types-engine.workspace = true +alloy-primitives.workspace = true +alloy-rpc-types-engine = { workspace = true, features = ["jwt"] } # misc eyre.workspace = true @@ -54,6 +55,8 @@ rand.workspace = true derive_more.workspace = true toml.workspace = true serde.workspace = true +strum = { workspace = true, features = ["derive"] } +thiserror.workspace = true # io dirs-next = "2.0.0" @@ -73,7 +76,6 @@ secp256k1 = { workspace = true, features = [ # async futures.workspace = true - [dev-dependencies] # test vectors generation proptest.workspace = true @@ -83,7 +85,6 @@ tempfile.workspace = true [features] optimism = [ "reth-primitives/optimism", - "reth-provider/optimism", "reth-rpc-types-compat/optimism", "reth-rpc-eth-api/optimism", "dep:reth-optimism-chainspec", diff --git a/crates/node/core/src/args/datadir_args.rs b/crates/node/core/src/args/datadir_args.rs index 85adc49a4a..cb0590f177 100644 --- a/crates/node/core/src/args/datadir_args.rs +++ b/crates/node/core/src/args/datadir_args.rs @@ -20,7 +20,12 @@ pub struct DatadirArgs { pub datadir: MaybePlatformPath, /// The absolute path to store static files in. - #[arg(long = "datadir.static_files", verbatim_doc_comment, value_name = "PATH")] + #[arg( + long = "datadir.static-files", + alias = "datadir.static_files", + value_name = "PATH", + verbatim_doc_comment + )] pub static_files_path: Option, } diff --git a/crates/node/core/src/args/debug.rs b/crates/node/core/src/args/debug.rs index 084e5cdc82..83c5c268d7 100644 --- a/crates/node/core/src/args/debug.rs +++ b/crates/node/core/src/args/debug.rs @@ -1,11 +1,15 @@ //! clap [Args](clap::Args) for debugging purposes -use clap::Args; -use reth_primitives::B256; -use std::path::PathBuf; +use alloy_primitives::B256; +use clap::{ + builder::{PossibleValue, TypedValueParser}, + Arg, Args, Command, +}; +use std::{collections::HashSet, ffi::OsStr, fmt, path::PathBuf, str::FromStr}; +use strum::{AsRefStr, EnumIter, IntoStaticStr, ParseError, VariantArray, VariantNames}; /// Parameters for debugging purposes -#[derive(Debug, Clone, Args, PartialEq, Eq, Default)] +#[derive(Debug, Clone, Args, PartialEq, Eq)] #[command(next_help_heading = "Debug")] pub struct DebugArgs { /// Flag indicating whether the node should be terminated after the pipeline sync. @@ -63,6 +67,252 @@ pub struct DebugArgs { /// will be written to specified location. #[arg(long = "debug.engine-api-store", help_heading = "Debug", value_name = "PATH")] pub engine_api_store: Option, + + /// Determines which type of invalid block hook to install + /// + /// Example: `witness,prestate` + #[arg( + long = "debug.invalid-block-hook", + help_heading = "Debug", + value_parser = InvalidBlockSelectionValueParser::default(), + default_value = "witness" + )] + pub invalid_block_hook: Option, + + /// The RPC URL of a healthy node to use for comparing invalid block hook results against. + #[arg( + long = "debug.healthy-node-rpc-url", + help_heading = "Debug", + value_name = "URL", + verbatim_doc_comment + )] + pub healthy_node_rpc_url: Option, +} + +impl Default for DebugArgs { + fn default() -> Self { + Self { + terminate: false, + tip: None, + max_block: None, + etherscan: None, + rpc_consensus_ws: None, + skip_fcu: None, + skip_new_payload: None, + reorg_frequency: None, + reorg_depth: None, + engine_api_store: None, + invalid_block_hook: Some(InvalidBlockSelection::default()), + healthy_node_rpc_url: None, + } + } +} + +/// Describes the invalid block hooks that should be installed. +/// +/// # Example +/// +/// Create a [`InvalidBlockSelection`] from a selection. +/// +/// ``` +/// use reth_node_core::args::{InvalidBlockHookType, InvalidBlockSelection}; +/// let config: InvalidBlockSelection = vec![InvalidBlockHookType::Witness].into(); +/// ``` +#[derive(Debug, Clone, PartialEq, Eq, derive_more::Deref)] +pub struct InvalidBlockSelection(HashSet); + +impl Default for InvalidBlockSelection { + fn default() -> Self { + Self([InvalidBlockHookType::Witness].into()) + } +} + +impl InvalidBlockSelection { + /// Creates a new _unique_ [`InvalidBlockSelection`] from the given items. + /// + /// # Note + /// + /// This will dedupe the selection and remove duplicates while preserving the order. + /// + /// # Example + /// + /// Create a selection from the [`InvalidBlockHookType`] string identifiers + /// + /// ``` + /// use reth_node_core::args::{InvalidBlockHookType, InvalidBlockSelection}; + /// let selection = vec!["witness", "prestate", "opcode"]; + /// let config = InvalidBlockSelection::try_from_selection(selection).unwrap(); + /// assert_eq!( + /// config, + /// InvalidBlockSelection::from([ + /// InvalidBlockHookType::Witness, + /// InvalidBlockHookType::PreState, + /// InvalidBlockHookType::Opcode + /// ]) + /// ); + /// ``` + /// + /// Create a unique selection from the [`InvalidBlockHookType`] string identifiers + /// + /// ``` + /// use reth_node_core::args::{InvalidBlockHookType, InvalidBlockSelection}; + /// let selection = vec!["witness", "prestate", "opcode", "witness", "prestate"]; + /// let config = InvalidBlockSelection::try_from_selection(selection).unwrap(); + /// assert_eq!( + /// config, + /// InvalidBlockSelection::from([ + /// InvalidBlockHookType::Witness, + /// InvalidBlockHookType::PreState, + /// InvalidBlockHookType::Opcode + /// ]) + /// ); + /// ``` + pub fn try_from_selection(selection: I) -> Result + where + I: IntoIterator, + T: TryInto, + { + selection.into_iter().map(TryInto::try_into).collect() + } + + /// Clones the set of configured [`InvalidBlockHookType`]. + pub fn to_selection(&self) -> HashSet { + self.0.clone() + } +} + +impl From<&[InvalidBlockHookType]> for InvalidBlockSelection { + fn from(s: &[InvalidBlockHookType]) -> Self { + Self(s.iter().copied().collect()) + } +} + +impl From> for InvalidBlockSelection { + fn from(s: Vec) -> Self { + Self(s.into_iter().collect()) + } +} + +impl From<[InvalidBlockHookType; N]> for InvalidBlockSelection { + fn from(s: [InvalidBlockHookType; N]) -> Self { + Self(s.iter().copied().collect()) + } +} + +impl FromIterator for InvalidBlockSelection { + fn from_iter(iter: I) -> Self + where + I: IntoIterator, + { + Self(iter.into_iter().collect()) + } +} + +impl FromStr for InvalidBlockSelection { + type Err = ParseError; + + fn from_str(s: &str) -> Result { + if s.is_empty() { + return Ok(Self(Default::default())) + } + let hooks = s.split(',').map(str::trim).peekable(); + Self::try_from_selection(hooks) + } +} + +impl fmt::Display for InvalidBlockSelection { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "[{}]", self.0.iter().map(|s| s.to_string()).collect::>().join(", ")) + } +} + +/// clap value parser for [`InvalidBlockSelection`]. +#[derive(Clone, Debug, Default)] +#[non_exhaustive] +struct InvalidBlockSelectionValueParser; + +impl TypedValueParser for InvalidBlockSelectionValueParser { + type Value = InvalidBlockSelection; + + fn parse_ref( + &self, + _cmd: &Command, + arg: Option<&Arg>, + value: &OsStr, + ) -> Result { + let val = + value.to_str().ok_or_else(|| clap::Error::new(clap::error::ErrorKind::InvalidUtf8))?; + val.parse::().map_err(|err| { + let arg = arg.map(|a| a.to_string()).unwrap_or_else(|| "...".to_owned()); + let possible_values = InvalidBlockHookType::all_variant_names().to_vec().join(","); + let msg = format!( + "Invalid value '{val}' for {arg}: {err}.\n [possible values: {possible_values}]" + ); + clap::Error::raw(clap::error::ErrorKind::InvalidValue, msg) + }) + } + + fn possible_values(&self) -> Option + '_>> { + let values = InvalidBlockHookType::all_variant_names().iter().map(PossibleValue::new); + Some(Box::new(values)) + } +} + +/// The type of invalid block hook to install +#[derive( + Debug, + Clone, + Copy, + PartialEq, + Eq, + Hash, + AsRefStr, + IntoStaticStr, + VariantNames, + VariantArray, + EnumIter, +)] +#[strum(serialize_all = "kebab-case")] +pub enum InvalidBlockHookType { + /// A witness value enum + Witness, + /// A prestate trace value enum + PreState, + /// An opcode trace value enum + Opcode, +} + +impl FromStr for InvalidBlockHookType { + type Err = ParseError; + + fn from_str(s: &str) -> Result { + Ok(match s { + "witness" => Self::Witness, + "prestate" => Self::PreState, + "opcode" => Self::Opcode, + _ => return Err(ParseError::VariantNotFound), + }) + } +} + +impl TryFrom<&str> for InvalidBlockHookType { + type Error = ParseError; + fn try_from(s: &str) -> Result>::Error> { + FromStr::from_str(s) + } +} + +impl fmt::Display for InvalidBlockHookType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.pad(self.as_ref()) + } +} + +impl InvalidBlockHookType { + /// Returns all variant names of the enum + pub const fn all_variant_names() -> &'static [&'static str] { + ::VARIANTS + } } #[cfg(test)] @@ -78,9 +328,63 @@ mod tests { } #[test] - fn test_parse_database_args() { + fn test_parse_default_debug_args() { let default_args = DebugArgs::default(); let args = CommandParser::::parse_from(["reth"]).args; assert_eq!(args, default_args); } + + #[test] + fn test_parse_invalid_block_args() { + let expected_args = DebugArgs { + invalid_block_hook: Some(InvalidBlockSelection::from([InvalidBlockHookType::Witness])), + ..Default::default() + }; + let args = CommandParser::::parse_from([ + "reth", + "--debug.invalid-block-hook", + "witness", + ]) + .args; + assert_eq!(args, expected_args); + + let expected_args = DebugArgs { + invalid_block_hook: Some(InvalidBlockSelection::from([ + InvalidBlockHookType::Witness, + InvalidBlockHookType::PreState, + ])), + ..Default::default() + }; + let args = CommandParser::::parse_from([ + "reth", + "--debug.invalid-block-hook", + "witness,prestate", + ]) + .args; + assert_eq!(args, expected_args); + + let args = CommandParser::::parse_from([ + "reth", + "--debug.invalid-block-hook", + "witness,prestate,prestate", + ]) + .args; + assert_eq!(args, expected_args); + + let args = CommandParser::::parse_from([ + "reth", + "--debug.invalid-block-hook", + "witness,witness,prestate", + ]) + .args; + assert_eq!(args, expected_args); + + let args = CommandParser::::parse_from([ + "reth", + "--debug.invalid-block-hook", + "prestate,witness,prestate", + ]) + .args; + assert_eq!(args, expected_args); + } } diff --git a/crates/node/core/src/args/error.rs b/crates/node/core/src/args/error.rs new file mode 100644 index 0000000000..7119501ac9 --- /dev/null +++ b/crates/node/core/src/args/error.rs @@ -0,0 +1,22 @@ +use std::num::ParseIntError; + +/// Error while parsing a `ReceiptsLogPruneConfig` +#[derive(thiserror::Error, Debug)] +#[allow(clippy::enum_variant_names)] +pub(crate) enum ReceiptsLogError { + /// The format of the filter is invalid. + #[error("invalid filter format: {0}")] + InvalidFilterFormat(String), + /// Address is invalid. + #[error("address is invalid: {0}")] + InvalidAddress(String), + /// The prune mode is not one of full, distance, before. + #[error("prune mode is invalid: {0}")] + InvalidPruneMode(String), + /// The distance value supplied is invalid. + #[error("distance is invalid: {0}")] + InvalidDistance(ParseIntError), + /// The block number supplied is invalid. + #[error("block number is invalid: {0}")] + InvalidBlockNumber(ParseIntError), +} diff --git a/crates/node/core/src/args/mod.rs b/crates/node/core/src/args/mod.rs index 206292e1bd..f5b3530188 100644 --- a/crates/node/core/src/args/mod.rs +++ b/crates/node/core/src/args/mod.rs @@ -14,7 +14,7 @@ pub use rpc_state_cache::RpcStateCacheArgs; /// DebugArgs struct for debugging purposes mod debug; -pub use debug::DebugArgs; +pub use debug::{DebugArgs, InvalidBlockHookType, InvalidBlockSelection}; /// DatabaseArgs struct for configuring the database mod database; @@ -62,4 +62,5 @@ pub use performance_optimization::PerformanceOptimizationArgs; pub mod utils; +mod error; pub mod types; diff --git a/crates/node/core/src/args/network.rs b/crates/node/core/src/args/network.rs index 53891ff0bd..b033be4ac3 100644 --- a/crates/node/core/src/args/network.rs +++ b/crates/node/core/src/args/network.rs @@ -15,12 +15,12 @@ use reth_discv5::{ discv5::ListenConfig, DEFAULT_COUNT_BOOTSTRAP_LOOKUPS, DEFAULT_DISCOVERY_V5_PORT, DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL, DEFAULT_SECONDS_LOOKUP_INTERVAL, }; -use reth_net_nat::NatResolver; +use reth_net_nat::{NatResolver, DEFAULT_NET_IF_NAME}; use reth_network::{ transactions::{ constants::{ tx_fetcher::{ - DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS, + DEFAULT_MAX_CAPACITY_CACHE_PENDING_FETCH, DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS, DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS_PER_PEER, }, tx_manager::{ @@ -35,6 +35,7 @@ use reth_network::{ }; use reth_network_peers::{mainnet_nodes, TrustedPeer}; use secp256k1::SecretKey; +use tracing::error; use crate::version::P2P_CLIENT_VERSION; @@ -144,9 +145,42 @@ pub struct NetworkArgs { /// Default is 128 KiB. #[arg(long = "pooled-tx-pack-soft-limit", value_name = "BYTES", default_value_t = DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESP_ON_PACK_GET_POOLED_TRANSACTIONS_REQ, verbatim_doc_comment)] pub soft_limit_byte_size_pooled_transactions_response_on_pack_request: usize, + + /// Max capacity of cache of hashes for transactions pending fetch. + #[arg(long = "max-tx-pending-fetch", value_name = "COUNT", default_value_t = DEFAULT_MAX_CAPACITY_CACHE_PENDING_FETCH, verbatim_doc_comment)] + pub max_capacity_cache_txns_pending_fetch: u32, + + /// Name of network interface used to communicate with peers. + /// + /// If flag is set, but no value is passed, the default interface for docker `eth0` is tried. + #[cfg(not(target_os = "windows"))] + #[arg(long = "net-if.experimental", conflicts_with = "addr", value_name = "IF_NAME")] + pub net_if: Option, } impl NetworkArgs { + /// Returns the resolved IP address. + pub fn resolved_addr(&self) -> IpAddr { + #[cfg(not(target_os = "windows"))] + if let Some(ref if_name) = self.net_if { + let if_name = if if_name.is_empty() { DEFAULT_NET_IF_NAME } else { if_name }; + return match reth_net_nat::net_if::resolve_net_if_ip(if_name) { + Ok(addr) => addr, + Err(err) => { + error!(target: "reth::cli", + if_name, + %err, + "Failed to read network interface IP" + ); + + DEFAULT_DISCOVERY_ADDR + } + } + } + + self.addr + } + /// Returns the resolved bootnodes if any are provided. pub fn resolved_bootnodes(&self) -> Option> { self.bootnodes.clone().map(|bootnodes| { @@ -172,6 +206,7 @@ impl NetworkArgs { secret_key: SecretKey, default_peers_file: PathBuf, ) -> NetworkConfigBuilder { + let addr = self.resolved_addr(); let chain_bootnodes = self .resolved_bootnodes() .unwrap_or_else(|| chain_spec.bootnodes().unwrap_or_else(mainnet_nodes)); @@ -191,6 +226,7 @@ impl NetworkArgs { self.max_concurrent_tx_requests_per_peer, self.soft_limit_byte_size_pooled_transactions_response, self.soft_limit_byte_size_pooled_transactions_response_on_pack_request, + self.max_capacity_cache_txns_pending_fetch, ), max_transactions_seen_by_peer_history: self.max_seen_tx_history, }; @@ -219,11 +255,11 @@ impl NetworkArgs { }) // apply discovery settings .apply(|builder| { - let rlpx_socket = (self.addr, self.port).into(); + let rlpx_socket = (addr, self.port).into(); self.discovery.apply_to_builder(builder, rlpx_socket, chain_bootnodes) }) .listener_addr(SocketAddr::new( - self.addr, // set discovery port based on instance number + addr, // set discovery port based on instance number self.port, )) .discovery_addr(SocketAddr::new( @@ -297,6 +333,8 @@ impl Default for NetworkArgs { soft_limit_byte_size_pooled_transactions_response_on_pack_request: DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESP_ON_PACK_GET_POOLED_TRANSACTIONS_REQ, max_pending_pool_imports: DEFAULT_MAX_COUNT_PENDING_POOL_IMPORTS, max_seen_tx_history: DEFAULT_MAX_COUNT_TRANSACTIONS_SEEN_BY_PEER, + max_capacity_cache_txns_pending_fetch: DEFAULT_MAX_CAPACITY_CACHE_PENDING_FETCH, + net_if: None, } } } diff --git a/crates/node/core/src/args/pruning.rs b/crates/node/core/src/args/pruning.rs index e8591faca5..204a54ea5b 100644 --- a/crates/node/core/src/args/pruning.rs +++ b/crates/node/core/src/args/pruning.rs @@ -1,52 +1,251 @@ //! Pruning and full node arguments +use crate::args::error::ReceiptsLogError; +use alloy_primitives::{Address, BlockNumber}; use clap::Args; use reth_chainspec::ChainSpec; use reth_config::config::PruneConfig; use reth_prune_types::{PruneMode, PruneModes, ReceiptsLogPruneConfig, MINIMUM_PRUNING_DISTANCE}; +use std::collections::BTreeMap; /// Parameters for pruning and full node #[derive(Debug, Clone, Args, PartialEq, Eq, Default)] #[command(next_help_heading = "Pruning")] pub struct PruningArgs { /// Run full node. Only the most recent [`MINIMUM_PRUNING_DISTANCE`] block states are stored. - /// This flag takes priority over pruning configuration in reth.toml. #[arg(long, default_value_t = false)] pub full: bool, + + /// Minimum pruning interval measured in blocks. + #[arg(long, default_value_t = 0)] + pub block_interval: u64, + + // Sender Recovery + /// Prunes all sender recovery data. + #[arg(long = "prune.senderrecovery.full", conflicts_with_all = &["sender_recovery_distance", "sender_recovery_before"])] + pub sender_recovery_full: bool, + /// Prune sender recovery data before the `head-N` block number. In other words, keep last N + + /// 1 blocks. + #[arg(long = "prune.senderrecovery.distance", value_name = "BLOCKS", conflicts_with_all = &["sender_recovery_full", "sender_recovery_before"])] + pub sender_recovery_distance: Option, + /// Prune sender recovery data before the specified block number. The specified block number is + /// not pruned. + #[arg(long = "prune.senderrecovery.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["sender_recovery_full", "sender_recovery_distance"])] + pub sender_recovery_before: Option, + + // Transaction Lookup + /// Prunes all transaction lookup data. + #[arg(long = "prune.transactionlookup.full", conflicts_with_all = &["transaction_lookup_distance", "transaction_lookup_before"])] + pub transaction_lookup_full: bool, + /// Prune transaction lookup data before the `head-N` block number. In other words, keep last N + /// + 1 blocks. + #[arg(long = "prune.transactionlookup.distance", value_name = "BLOCKS", conflicts_with_all = &["transaction_lookup_full", "transaction_lookup_before"])] + pub transaction_lookup_distance: Option, + /// Prune transaction lookup data before the specified block number. The specified block number + /// is not pruned. + #[arg(long = "prune.transactionlookup.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["transaction_lookup_full", "transaction_lookup_distance"])] + pub transaction_lookup_before: Option, + + // Receipts + /// Prunes all receipt data. + #[arg(long = "prune.receipts.full", conflicts_with_all = &["receipts_distance", "receipts_before"])] + pub receipts_full: bool, + /// Prune receipts before the `head-N` block number. In other words, keep last N + 1 blocks. + #[arg(long = "prune.receipts.distance", value_name = "BLOCKS", conflicts_with_all = &["receipts_full", "receipts_before"])] + pub receipts_distance: Option, + /// Prune receipts before the specified block number. The specified block number is not pruned. + #[arg(long = "prune.receipts.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["receipts_full", "receipts_distance"])] + pub receipts_before: Option, + + // Account History + /// Prunes all account history. + #[arg(long = "prune.accounthistory.full", conflicts_with_all = &["account_history_distance", "account_history_before"])] + pub account_history_full: bool, + /// Prune account before the `head-N` block number. In other words, keep last N + 1 blocks. + #[arg(long = "prune.accounthistory.distance", value_name = "BLOCKS", conflicts_with_all = &["account_history_full", "account_history_before"])] + pub account_history_distance: Option, + /// Prune account history before the specified block number. The specified block number is not + /// pruned. + #[arg(long = "prune.accounthistory.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["account_history_full", "account_history_distance"])] + pub account_history_before: Option, + + // Storage History + /// Prunes all storage history data. + #[arg(long = "prune.storagehistory.full", conflicts_with_all = &["storage_history_distance", "storage_history_before"])] + pub storage_history_full: bool, + /// Prune storage history before the `head-N` block number. In other words, keep last N + 1 + /// blocks. + #[arg(long = "prune.storagehistory.distance", value_name = "BLOCKS", conflicts_with_all = &["storage_history_full", "storage_history_before"])] + pub storage_history_distance: Option, + /// Prune storage history before the specified block number. The specified block number is not + /// pruned. + #[arg(long = "prune.storagehistory.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["storage_history_full", "storage_history_distance"])] + pub storage_history_before: Option, + + // Receipts Log Filter + /// Configure receipts log filter. Format: + /// <`address`>:<`prune_mode`>[,<`address`>:<`prune_mode`>...] Where <`prune_mode`> can be + /// 'full', 'distance:<`blocks`>', or 'before:<`block_number`>' + #[arg(long = "prune.receiptslogfilter", value_name = "FILTER_CONFIG", value_delimiter = ',', value_parser = parse_receipts_log_filter)] + pub receipts_log_filter: Vec, } impl PruningArgs { /// Returns pruning configuration. pub fn prune_config(&self, chain_spec: &ChainSpec) -> Option { - if !self.full { - return None - } + // Initialise with a default prune configuration. + let mut config = PruneConfig::default(); - Some(PruneConfig { - block_interval: 5, - recent_sidecars_kept_blocks: 0, - segments: PruneModes { - sender_recovery: Some(PruneMode::Full), - transaction_lookup: None, - // prune all receipts if chain doesn't have deposit contract specified in chain spec - receipts: chain_spec - .deposit_contract - .as_ref() - .map(|contract| PruneMode::Before(contract.block)) - .or(Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE))), - account_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), - storage_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), - receipts_log_filter: ReceiptsLogPruneConfig( - chain_spec + // If --full is set, use full node defaults. + if self.full { + config = PruneConfig { + block_interval: 5, + recent_sidecars_kept_blocks: 0, + segments: PruneModes { + sender_recovery: Some(PruneMode::Full), + transaction_lookup: None, + // prune all receipts if chain doesn't have deposit contract specified in chain + // spec + receipts: chain_spec .deposit_contract .as_ref() - .map(|contract| (contract.address, PruneMode::Before(contract.block))) - .into_iter() - .collect(), - ), - }, - }) + .map(|contract| PruneMode::Before(contract.block)) + .or(Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE))), + account_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), + storage_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), + receipts_log_filter: ReceiptsLogPruneConfig( + chain_spec + .deposit_contract + .as_ref() + .map(|contract| (contract.address, PruneMode::Before(contract.block))) + .into_iter() + .collect(), + ), + }, + } + } + + // Override with any explicitly set prune.* flags. + if let Some(mode) = self.sender_recovery_prune_mode() { + config.segments.sender_recovery = Some(mode); + } + if let Some(mode) = self.transaction_lookup_prune_mode() { + config.segments.transaction_lookup = Some(mode); + } + if let Some(mode) = self.receipts_prune_mode() { + config.segments.receipts = Some(mode); + } + if let Some(mode) = self.account_history_prune_mode() { + config.segments.account_history = Some(mode); + } + if let Some(mode) = self.storage_history_prune_mode() { + config.segments.storage_history = Some(mode); + } + + Some(config) + } + const fn sender_recovery_prune_mode(&self) -> Option { + if self.sender_recovery_full { + Some(PruneMode::Full) + } else if let Some(distance) = self.sender_recovery_distance { + Some(PruneMode::Distance(distance)) + } else if let Some(block_number) = self.sender_recovery_before { + Some(PruneMode::Before(block_number)) + } else { + None + } + } + + const fn transaction_lookup_prune_mode(&self) -> Option { + if self.transaction_lookup_full { + Some(PruneMode::Full) + } else if let Some(distance) = self.transaction_lookup_distance { + Some(PruneMode::Distance(distance)) + } else if let Some(block_number) = self.transaction_lookup_before { + Some(PruneMode::Before(block_number)) + } else { + None + } + } + + const fn receipts_prune_mode(&self) -> Option { + if self.receipts_full { + Some(PruneMode::Full) + } else if let Some(distance) = self.receipts_distance { + Some(PruneMode::Distance(distance)) + } else if let Some(block_number) = self.receipts_before { + Some(PruneMode::Before(block_number)) + } else { + None + } + } + + const fn account_history_prune_mode(&self) -> Option { + if self.account_history_full { + Some(PruneMode::Full) + } else if let Some(distance) = self.account_history_distance { + Some(PruneMode::Distance(distance)) + } else if let Some(block_number) = self.account_history_before { + Some(PruneMode::Before(block_number)) + } else { + None + } + } + + const fn storage_history_prune_mode(&self) -> Option { + if self.storage_history_full { + Some(PruneMode::Full) + } else if let Some(distance) = self.storage_history_distance { + Some(PruneMode::Distance(distance)) + } else if let Some(block_number) = self.storage_history_before { + Some(PruneMode::Before(block_number)) + } else { + None + } + } +} + +pub(crate) fn parse_receipts_log_filter( + value: &str, +) -> Result { + let mut config = BTreeMap::new(); + // Split out each of the filters. + let filters = value.split(','); + for filter in filters { + let parts: Vec<&str> = filter.split(':').collect(); + if parts.len() < 2 { + return Err(ReceiptsLogError::InvalidFilterFormat(filter.to_string())); + } + // Parse the address + let address = parts[0] + .parse::
() + .map_err(|_| ReceiptsLogError::InvalidAddress(parts[0].to_string()))?; + + // Parse the prune mode + let prune_mode = match parts[1] { + "full" => PruneMode::Full, + s if s.starts_with("distance") => { + if parts.len() < 3 { + return Err(ReceiptsLogError::InvalidFilterFormat(filter.to_string())); + } + let distance = + parts[2].parse::().map_err(ReceiptsLogError::InvalidDistance)?; + PruneMode::Distance(distance) + } + s if s.starts_with("before") => { + if parts.len() < 3 { + return Err(ReceiptsLogError::InvalidFilterFormat(filter.to_string())); + } + let block_number = parts[2] + .parse::() + .map_err(ReceiptsLogError::InvalidBlockNumber)?; + PruneMode::Before(block_number) + } + _ => return Err(ReceiptsLogError::InvalidPruneMode(parts[1].to_string())), + }; + config.insert(address, prune_mode); } + Ok(ReceiptsLogPruneConfig(config)) } #[cfg(test)] @@ -67,4 +266,62 @@ mod tests { let args = CommandParser::::parse_from(["reth"]).args; assert_eq!(args, default_args); } + + #[test] + fn test_parse_receipts_log_filter() { + let filter1 = "0x0000000000000000000000000000000000000001:full"; + let filter2 = "0x0000000000000000000000000000000000000002:distance:1000"; + let filter3 = "0x0000000000000000000000000000000000000003:before:5000000"; + let filters = [filter1, filter2, filter3].join(","); + + // Args can be parsed. + let result = parse_receipts_log_filter(&filters); + assert!(result.is_ok()); + let config = result.unwrap(); + assert_eq!(config.0.len(), 3); + + // Check that the args were parsed correctly. + let addr1: Address = "0x0000000000000000000000000000000000000001".parse().unwrap(); + let addr2: Address = "0x0000000000000000000000000000000000000002".parse().unwrap(); + let addr3: Address = "0x0000000000000000000000000000000000000003".parse().unwrap(); + + assert_eq!(config.0.get(&addr1), Some(&PruneMode::Full)); + assert_eq!(config.0.get(&addr2), Some(&PruneMode::Distance(1000))); + assert_eq!(config.0.get(&addr3), Some(&PruneMode::Before(5000000))); + } + + #[test] + fn test_parse_receipts_log_filter_invalid_filter_format() { + let result = parse_receipts_log_filter("invalid_format"); + assert!(matches!(result, Err(ReceiptsLogError::InvalidFilterFormat(_)))); + } + + #[test] + fn test_parse_receipts_log_filter_invalid_address() { + let result = parse_receipts_log_filter("invalid_address:full"); + assert!(matches!(result, Err(ReceiptsLogError::InvalidAddress(_)))); + } + + #[test] + fn test_parse_receipts_log_filter_invalid_prune_mode() { + let result = + parse_receipts_log_filter("0x0000000000000000000000000000000000000000:invalid_mode"); + assert!(matches!(result, Err(ReceiptsLogError::InvalidPruneMode(_)))); + } + + #[test] + fn test_parse_receipts_log_filter_invalid_distance() { + let result = parse_receipts_log_filter( + "0x0000000000000000000000000000000000000000:distance:invalid_distance", + ); + assert!(matches!(result, Err(ReceiptsLogError::InvalidDistance(_)))); + } + + #[test] + fn test_parse_receipts_log_filter_invalid_block_number() { + let result = parse_receipts_log_filter( + "0x0000000000000000000000000000000000000000:before:invalid_block", + ); + assert!(matches!(result, Err(ReceiptsLogError::InvalidBlockNumber(_)))); + } } diff --git a/crates/node/core/src/args/rpc_server.rs b/crates/node/core/src/args/rpc_server.rs index 23af075216..15771e9897 100644 --- a/crates/node/core/src/args/rpc_server.rs +++ b/crates/node/core/src/args/rpc_server.rs @@ -156,6 +156,14 @@ pub struct RpcServerArgs { )] pub rpc_gas_cap: u64, + /// Maximum number of blocks for `eth_simulateV1` call. + #[arg( + long = "rpc.max-simulate-blocks", + value_name = "BLOCKS_COUNT", + default_value_t = constants::DEFAULT_MAX_SIMULATE_BLOCKS + )] + pub rpc_max_simulate_blocks: u64, + /// The maximum proof window for historical proof generation. /// This value allows for generating historical proofs up to /// configured number of blocks from current tip (up to `tip - window`). @@ -300,6 +308,7 @@ impl Default for RpcServerArgs { rpc_max_blocks_per_filter: constants::DEFAULT_MAX_BLOCKS_PER_FILTER.into(), rpc_max_logs_per_response: (constants::DEFAULT_MAX_LOGS_PER_RESPONSE as u64).into(), rpc_gas_cap: constants::gas_oracle::RPC_DEFAULT_GAS_CAP, + rpc_max_simulate_blocks: constants::DEFAULT_MAX_SIMULATE_BLOCKS, rpc_eth_proof_window: constants::DEFAULT_ETH_PROOF_WINDOW, gas_price_oracle: GasPriceOracleArgs::default(), rpc_state_cache: RpcStateCacheArgs::default(), diff --git a/crates/node/core/src/args/txpool.rs b/crates/node/core/src/args/txpool.rs index a27b1fa806..98e125ab9f 100644 --- a/crates/node/core/src/args/txpool.rs +++ b/crates/node/core/src/args/txpool.rs @@ -1,8 +1,9 @@ //! Transaction pool arguments use crate::cli::config::RethTransactionPoolConfig; +use alloy_primitives::Address; use clap::Args; -use reth_primitives::Address; +use reth_primitives::constants::{ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE}; use reth_transaction_pool::{ blobstore::disk::DEFAULT_MAX_CACHED_BLOBS, pool::{NEW_TX_LISTENER_BUFFER_SIZE, PENDING_TX_LISTENER_BUFFER_SIZE}, @@ -45,6 +46,14 @@ pub struct TxPoolArgs { #[arg(long = "txpool.pricebump", default_value_t = DEFAULT_PRICE_BUMP)] pub price_bump: u128, + /// Minimum base fee required by the protocol. + #[arg(long = "txpool.minimal-protocol-fee", default_value_t = MIN_PROTOCOL_BASE_FEE)] + pub minimal_protocol_basefee: u64, + + /// The default enforced gas limit for transactions entering the pool + #[arg(long = "txpool.gas-limit", default_value_t = ETHEREUM_BLOCK_GAS_LIMIT)] + pub gas_limit: u64, + /// Price bump percentage to replace an already existing blob transaction #[arg(long = "blobpool.pricebump", default_value_t = REPLACE_BLOB_PRICE_BUMP)] pub blob_transaction_price_bump: u128, @@ -90,6 +99,8 @@ impl Default for TxPoolArgs { queued_max_size: TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT, max_account_slots: TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER, price_bump: DEFAULT_PRICE_BUMP, + minimal_protocol_basefee: MIN_PROTOCOL_BASE_FEE, + gas_limit: ETHEREUM_BLOCK_GAS_LIMIT, blob_transaction_price_bump: REPLACE_BLOB_PRICE_BUMP, max_tx_input_bytes: DEFAULT_MAX_TX_INPUT_BYTES, max_cached_entries: DEFAULT_MAX_CACHED_BLOBS, @@ -133,6 +144,8 @@ impl RethTransactionPoolConfig for TxPoolArgs { default_price_bump: self.price_bump, replace_blob_tx_price_bump: self.blob_transaction_price_bump, }, + minimal_protocol_basefee: self.minimal_protocol_basefee, + gas_limit: self.gas_limit, pending_tx_listener_buffer_size: self.pending_tx_listener_buffer_size, new_tx_listener_buffer_size: self.new_tx_listener_buffer_size, } diff --git a/crates/node/core/src/args/utils.rs b/crates/node/core/src/args/utils.rs index b96e997f09..bc1f76205e 100644 --- a/crates/node/core/src/args/utils.rs +++ b/crates/node/core/src/args/utils.rs @@ -8,6 +8,7 @@ use reth_bsc_chainspec::{BSC_CHAPEL, BSC_DEV, BSC_MAINNET, BSC_RIALTO}; use reth_chainspec::ChainSpec; #[cfg(all(not(feature = "optimism"), not(feature = "bsc")))] use reth_chainspec::{DEV, HOLESKY, MAINNET, SEPOLIA}; +use reth_cli::chainspec::ChainSpecParser; use reth_fs_util as fs; #[cfg(feature = "optimism")] use reth_optimism_chainspec::{BASE_MAINNET, BASE_SEPOLIA, OP_DEV, OP_MAINNET, OP_SEPOLIA}; @@ -32,11 +33,6 @@ pub const SUPPORTED_CHAINS: &[&str] = &[ /// Chains supported by reth. First value should be used as the default. pub const SUPPORTED_CHAINS: &[&str] = &["mainnet", "sepolia", "holesky", "dev"]; -/// The help info for the --chain flag -pub fn chain_help() -> String { - format!("The chain this node is running.\nPossible values are either a built-in chain or the path to a chain specification file.\n\nBuilt-in chains:\n {}", SUPPORTED_CHAINS.join(", ")) -} - /// Clap value parser for [`ChainSpec`]s. /// /// The value parser matches either a known chain, the path @@ -110,6 +106,20 @@ pub fn parse_custom_chain_spec(s: &str) -> eyre::Result Ok(genesis.into()) } +/// Default chain specification parser. +#[derive(Debug, Clone, Default)] +pub struct DefaultChainSpecParser; + +impl ChainSpecParser for DefaultChainSpecParser { + type ChainSpec = ChainSpec; + + const SUPPORTED_CHAINS: &'static [&'static str] = SUPPORTED_CHAINS; + + fn parse(s: &str) -> eyre::Result> { + chain_value_parser(s) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/node/core/src/cli/config.rs b/crates/node/core/src/cli/config.rs index f40d4287f4..73ada50fcd 100644 --- a/crates/node/core/src/cli/config.rs +++ b/crates/node/core/src/cli/config.rs @@ -1,7 +1,7 @@ //! Config traits for various node components. +use alloy_primitives::Bytes; use reth_network::protocol::IntoRlpxSubProtocol; -use reth_primitives::Bytes; use reth_transaction_pool::PoolConfig; use std::{borrow::Cow, time::Duration}; diff --git a/crates/node/core/src/dirs.rs b/crates/node/core/src/dirs.rs index 5922b6818c..c788f35da1 100644 --- a/crates/node/core/src/dirs.rs +++ b/crates/node/core/src/dirs.rs @@ -343,6 +343,13 @@ impl ChainPath { pub fn jwt(&self) -> PathBuf { self.data_dir().join("jwt.hex") } + + /// Returns the path to the invalid block hooks directory for this chain. + /// + /// `//invalid_block_hooks` + pub fn invalid_block_hooks(&self) -> PathBuf { + self.data_dir().join("invalid_block_hooks") + } } impl AsRef for ChainPath { diff --git a/crates/node/core/src/node_config.rs b/crates/node/core/src/node_config.rs index 249fea6cc1..12c3d3dabb 100644 --- a/crates/node/core/src/node_config.rs +++ b/crates/node/core/src/node_config.rs @@ -11,16 +11,16 @@ use crate::{ use eyre::eyre; use reth_chainspec::{ChainSpec, MAINNET}; use reth_config::config::PruneConfig; -use reth_db_api::database::Database; use reth_network_p2p::headers::client::HeadersClient; use serde::{de::DeserializeOwned, Serialize}; use std::{fs, path::Path}; -use reth_primitives::{ - revm_primitives::EnvKzgSettings, BlockHashOrNumber, BlockNumber, Head, SealedHeader, B256, -}; -use reth_provider::{BlockHashReader, HeaderProvider, ProviderFactory, StageCheckpointReader}; +use alloy_primitives::{BlockNumber, B256}; +use reth_primitives::{BlockHashOrNumber, Head, SealedHeader}; use reth_stages_types::StageId; +use reth_storage_api::{ + BlockHashReader, DatabaseProviderFactory, HeaderProvider, StageCheckpointReader, +}; use reth_storage_errors::provider::ProviderResult; use std::{net::SocketAddr, path::PathBuf, sync::Arc}; use tracing::*; @@ -235,7 +235,7 @@ impl NodeConfig { } /// Set the pruning args for the node - pub const fn with_pruning(mut self, pruning: PruningArgs) -> Self { + pub fn with_pruning(mut self, pruning: PruningArgs) -> Self { self.pruning = pruning; self } @@ -267,16 +267,16 @@ impl NodeConfig { Ok(max_block) } - /// Loads '`EnvKzgSettings::Default`' - pub const fn kzg_settings(&self) -> eyre::Result { - Ok(EnvKzgSettings::Default) - } - /// Fetches the head block from the database. /// /// If the database is empty, returns the genesis block. - pub fn lookup_head(&self, factory: ProviderFactory) -> ProviderResult { - let provider = factory.provider()?; + pub fn lookup_head(&self, factory: &Factory) -> ProviderResult + where + Factory: DatabaseProviderFactory< + Provider: HeaderProvider + StageCheckpointReader + BlockHashReader, + >, + { + let provider = factory.database_provider_ro()?; let head = provider.get_stage_checkpoint(StageId::Finish)?.unwrap_or_default().block_number; diff --git a/crates/node/core/src/version.rs b/crates/node/core/src/version.rs index 78dbcfbcf5..683c305642 100644 --- a/crates/node/core/src/version.rs +++ b/crates/node/core/src/version.rs @@ -1,5 +1,5 @@ //! Version information for reth. -use reth_db_api::models::ClientVersion; +use reth_db::ClientVersion; use reth_rpc_types::engine::ClientCode; /// The client code for Reth diff --git a/crates/node/events/Cargo.toml b/crates/node/events/Cargo.toml index a4970677ac..9c56c2da9b 100644 --- a/crates/node/events/Cargo.toml +++ b/crates/node/events/Cargo.toml @@ -11,6 +11,7 @@ repository.workspace = true workspace = true [dependencies] +# reth reth-provider.workspace = true reth-beacon-consensus.workspace = true reth-network = { workspace = true, features = ["serde"] } @@ -21,7 +22,8 @@ reth-static-file.workspace = true reth-primitives.workspace = true reth-primitives-traits.workspace = true -# alloy +# ethereum +alloy-primitives.workspace = true alloy-rpc-types-engine.workspace = true # async @@ -32,6 +34,6 @@ futures.workspace = true tracing.workspace = true -#misc +# misc pin-project.workspace = true humantime.workspace = true diff --git a/crates/node/events/src/node.rs b/crates/node/events/src/node.rs index 1583c619e7..9c554478ae 100644 --- a/crates/node/events/src/node.rs +++ b/crates/node/events/src/node.rs @@ -1,6 +1,7 @@ //! Support for handling events emitted by node components. use crate::cl::ConsensusLayerHealthEvent; +use alloy_primitives::{BlockNumber, B256}; use alloy_rpc_types_engine::ForkchoiceState; use futures::Stream; use reth_beacon_consensus::{ @@ -8,7 +9,7 @@ use reth_beacon_consensus::{ }; use reth_network::NetworkEvent; use reth_network_api::PeersInfo; -use reth_primitives::{constants, BlockNumber, B256}; +use reth_primitives::constants; use reth_primitives_traits::{format_gas, format_gas_throughput}; use reth_prune::PrunerEvent; use reth_stages::{EntitiesCheckpoint, ExecOutput, PipelineEvent, StageCheckpoint, StageId}; @@ -275,8 +276,8 @@ impl NodeState { info!(number=head.number, hash=?head.hash(), ?elapsed, "Canonical chain committed"); } - BeaconConsensusEngineEvent::ForkBlockAdded(block) => { - info!(number=block.number, hash=?block.hash(), "Block added to fork chain"); + BeaconConsensusEngineEvent::ForkBlockAdded(block, elapsed) => { + info!(number=block.number, hash=?block.hash(), ?elapsed, "Block added to fork chain"); } } } diff --git a/crates/node/metrics/Cargo.toml b/crates/node/metrics/Cargo.toml index 74333cf4fb..153036d34f 100644 --- a/crates/node/metrics/Cargo.toml +++ b/crates/node/metrics/Cargo.toml @@ -28,16 +28,16 @@ tracing.workspace = true eyre.workspace = true [target.'cfg(unix)'.dependencies] -tikv-jemalloc-ctl = { version = "0.5.0", optional = true } +tikv-jemalloc-ctl = { workspace = true, optional = true, features = ["stats"] } [target.'cfg(target_os = "linux")'.dependencies] procfs = "0.16.0" [dev-dependencies] -reth-db = { workspace = true, features = ["test-utils"] } reqwest.workspace = true reth-chainspec.workspace = true socket2 = { version = "0.4", default-features = false } +reth-provider = { workspace = true, features = ["test-utils"] } [lints] workspace = true @@ -45,6 +45,5 @@ workspace = true [features] jemalloc = ["dep:tikv-jemalloc-ctl"] - [build-dependencies] vergen = { version = "8.0.0", features = ["build", "cargo", "git", "gitcl"] } diff --git a/crates/node/metrics/src/chain.rs b/crates/node/metrics/src/chain.rs new file mode 100644 index 0000000000..191aae9923 --- /dev/null +++ b/crates/node/metrics/src/chain.rs @@ -0,0 +1,19 @@ +//! This exposes reth's chain information over prometheus. +use metrics::{describe_gauge, gauge}; + +/// Contains chain information for the application. +#[derive(Debug, Clone)] +pub struct ChainSpecInfo { + /// The name of the chain. + pub name: String, +} + +impl ChainSpecInfo { + /// This exposes reth's chain information over prometheus. + pub fn register_chain_spec_metrics(&self) { + let labels: [(&str, String); 1] = [("name", self.name.clone())]; + + describe_gauge!("chain_spec", "Information about the chain"); + let _gauge = gauge!("chain_spec", &labels); + } +} diff --git a/crates/node/metrics/src/lib.rs b/crates/node/metrics/src/lib.rs index 4abc39a32d..d74a8aeffb 100644 --- a/crates/node/metrics/src/lib.rs +++ b/crates/node/metrics/src/lib.rs @@ -7,6 +7,7 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +pub mod chain; /// The metrics hooks for prometheus. pub mod hooks; pub mod recorder; diff --git a/crates/node/metrics/src/server.rs b/crates/node/metrics/src/server.rs index 5eadaaab52..06bb490a43 100644 --- a/crates/node/metrics/src/server.rs +++ b/crates/node/metrics/src/server.rs @@ -1,4 +1,5 @@ use crate::{ + chain::ChainSpecInfo, hooks::{Hook, Hooks}, recorder::install_prometheus_recorder, version::VersionInfo, @@ -17,6 +18,7 @@ use tracing::info; pub struct MetricServerConfig { listen_addr: SocketAddr, version_info: VersionInfo, + chain_spec_info: ChainSpecInfo, task_executor: TaskExecutor, hooks: Hooks, } @@ -26,10 +28,11 @@ impl MetricServerConfig { pub const fn new( listen_addr: SocketAddr, version_info: VersionInfo, + chain_spec_info: ChainSpecInfo, task_executor: TaskExecutor, hooks: Hooks, ) -> Self { - Self { listen_addr, hooks, task_executor, version_info } + Self { listen_addr, hooks, task_executor, version_info, chain_spec_info } } } @@ -47,7 +50,8 @@ impl MetricServer { /// Spawns the metrics server pub async fn serve(&self) -> eyre::Result<()> { - let MetricServerConfig { listen_addr, hooks, task_executor, version_info } = &self.config; + let MetricServerConfig { listen_addr, hooks, task_executor, version_info, chain_spec_info } = + &self.config; info!(target: "reth::cli", addr = %listen_addr, "Starting metrics endpoint"); @@ -68,6 +72,7 @@ impl MetricServer { describe_io_stats(); version_info.register_version_metrics(); + chain_spec_info.register_chain_spec_metrics(); Ok(()) } @@ -204,27 +209,11 @@ const fn describe_io_stats() {} mod tests { use super::*; use reqwest::Client; - use reth_chainspec::MAINNET; - use reth_db::{ - test_utils::{create_test_rw_db, create_test_static_files_dir, TempDatabase}, - DatabaseEnv, - }; - use reth_provider::{ - providers::StaticFileProvider, ProviderFactory, StaticFileProviderFactory, - }; + use reth_provider::{test_utils::create_test_provider_factory, StaticFileProviderFactory}; use reth_tasks::TaskManager; use socket2::{Domain, Socket, Type}; use std::net::{SocketAddr, TcpListener}; - fn create_test_db() -> ProviderFactory>> { - let (_, static_dir_path) = create_test_static_files_dir(); - ProviderFactory::new( - create_test_rw_db(), - MAINNET.clone(), - StaticFileProvider::read_write(static_dir_path).unwrap(), - ) - } - fn get_random_available_addr() -> SocketAddr { let addr = &"127.0.0.1:0".parse::().unwrap().into(); let socket = Socket::new(Domain::IPV4, Type::STREAM, None).unwrap(); @@ -237,6 +226,7 @@ mod tests { #[tokio::test] async fn test_metrics_endpoint() { + let chain_spec_info = ChainSpecInfo { name: "test".to_string() }; let version_info = VersionInfo { version: "test", build_timestamp: "test", @@ -249,11 +239,12 @@ mod tests { let tasks = TaskManager::current(); let executor = tasks.executor(); - let factory = create_test_db(); + let factory = create_test_provider_factory(); let hooks = Hooks::new(factory.db_ref().clone(), factory.static_file_provider()); let listen_addr = get_random_available_addr(); - let config = MetricServerConfig::new(listen_addr, version_info, executor, hooks); + let config = + MetricServerConfig::new(listen_addr, version_info, chain_spec_info, executor, hooks); MetricServer::new(config).serve().await.unwrap(); diff --git a/crates/node/types/Cargo.toml b/crates/node/types/Cargo.toml new file mode 100644 index 0000000000..f04925d9cd --- /dev/null +++ b/crates/node/types/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "reth-node-types" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +# reth +reth-chainspec.workspace = true +reth-db-api.workspace = true +reth-engine-primitives.workspace = true \ No newline at end of file diff --git a/crates/node/types/src/lib.rs b/crates/node/types/src/lib.rs new file mode 100644 index 0000000000..2ad2f8abd8 --- /dev/null +++ b/crates/node/types/src/lib.rs @@ -0,0 +1,173 @@ +//! Standalone crate for Reth configuration traits and builder types. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +use std::marker::PhantomData; + +use reth_chainspec::EthChainSpec; +use reth_db_api::{ + database_metrics::{DatabaseMetadata, DatabaseMetrics}, + Database, +}; +use reth_engine_primitives::EngineTypes; + +/// Configures all the primitive types of the node. +// TODO(mattsse): this is currently a placeholder +pub trait NodePrimitives {} + +// TODO(mattsse): Placeholder +impl NodePrimitives for () {} + +/// The type that configures the essential types of an Ethereum-like node. +/// +/// This includes the primitive types of a node and chain specification. +/// +/// This trait is intended to be stateless and only define the types of the node. +pub trait NodeTypes: Send + Sync + Unpin + 'static { + /// The node's primitive types, defining basic operations and structures. + type Primitives: NodePrimitives; + /// The type used for configuration of the EVM. + type ChainSpec: EthChainSpec; +} + +/// The type that configures an Ethereum-like node with an engine for consensus. +pub trait NodeTypesWithEngine: NodeTypes { + /// The node's engine types, defining the interaction with the consensus engine. + type Engine: EngineTypes; +} + +/// A helper trait that is downstream of the [`NodeTypesWithEngine`] trait and adds database to the +/// node. +/// +/// Its types are configured by node internally and are not intended to be user configurable. +pub trait NodeTypesWithDB: NodeTypes { + /// Underlying database type used by the node to store and retrieve data. + type DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static; +} + +/// An adapter type combining [`NodeTypes`] and db into [`NodeTypesWithDB`]. +#[derive(Debug)] +pub struct NodeTypesWithDBAdapter { + types: PhantomData, + db: PhantomData, +} + +impl NodeTypesWithDBAdapter { + /// Create a new adapter with the configured types. + pub fn new() -> Self { + Self { types: Default::default(), db: Default::default() } + } +} + +impl Default for NodeTypesWithDBAdapter { + fn default() -> Self { + Self::new() + } +} + +impl Clone for NodeTypesWithDBAdapter { + fn clone(&self) -> Self { + Self { types: self.types, db: self.db } + } +} + +impl NodeTypes for NodeTypesWithDBAdapter +where + Types: NodeTypes, + DB: Send + Sync + Unpin + 'static, +{ + type Primitives = Types::Primitives; + type ChainSpec = Types::ChainSpec; +} + +impl NodeTypesWithEngine for NodeTypesWithDBAdapter +where + Types: NodeTypesWithEngine, + DB: Send + Sync + Unpin + 'static, +{ + type Engine = Types::Engine; +} + +impl NodeTypesWithDB for NodeTypesWithDBAdapter +where + Types: NodeTypes, + DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, +{ + type DB = DB; +} + +/// A [`NodeTypes`] type builder. +#[derive(Default, Debug)] +pub struct AnyNodeTypes

(PhantomData

, PhantomData); + +impl AnyNodeTypes { + /// Sets the `Primitives` associated type. + pub const fn primitives(self) -> AnyNodeTypes { + AnyNodeTypes::(PhantomData::, PhantomData::) + } + + /// Sets the `ChainSpec` associated type. + pub const fn chain_spec(self) -> AnyNodeTypes { + AnyNodeTypes::(PhantomData::

, PhantomData::) + } +} + +impl NodeTypes for AnyNodeTypes +where + P: NodePrimitives + Send + Sync + Unpin + 'static, + C: EthChainSpec, +{ + type Primitives = P; + type ChainSpec = C; +} + +/// A [`NodeTypesWithEngine`] type builder. +#[derive(Default, Debug)] +pub struct AnyNodeTypesWithEngine

{ + /// Embedding the basic node types. + base: AnyNodeTypes, + /// Phantom data for the engine. + _engine: PhantomData, +} + +impl AnyNodeTypesWithEngine { + /// Sets the `Primitives` associated type. + pub const fn primitives(self) -> AnyNodeTypesWithEngine { + AnyNodeTypesWithEngine { base: self.base.primitives::(), _engine: PhantomData } + } + + /// Sets the `Engine` associated type. + pub const fn engine(self) -> AnyNodeTypesWithEngine { + AnyNodeTypesWithEngine { base: self.base, _engine: PhantomData:: } + } + + /// Sets the `ChainSpec` associated type. + pub const fn chain_spec(self) -> AnyNodeTypesWithEngine { + AnyNodeTypesWithEngine { base: self.base.chain_spec::(), _engine: PhantomData } + } +} + +impl NodeTypes for AnyNodeTypesWithEngine +where + P: NodePrimitives + Send + Sync + Unpin + 'static, + E: EngineTypes + Send + Sync + Unpin, + C: EthChainSpec, +{ + type Primitives = P; + type ChainSpec = C; +} + +impl NodeTypesWithEngine for AnyNodeTypesWithEngine +where + P: NodePrimitives + Send + Sync + Unpin + 'static, + E: EngineTypes + Send + Sync + Unpin, + C: EthChainSpec, +{ + type Engine = E; +} diff --git a/crates/optimism/bin/Cargo.toml b/crates/optimism/bin/Cargo.toml new file mode 100644 index 0000000000..0722167f40 --- /dev/null +++ b/crates/optimism/bin/Cargo.toml @@ -0,0 +1,41 @@ +[package] +name = "op-reth" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +exclude.workspace = true + +[dependencies] +reth-node-builder.workspace = true +reth-cli-util.workspace = true +reth-optimism-cli.workspace = true +reth-provider.workspace = true +reth-optimism-rpc.workspace = true +reth-node-optimism.workspace = true + +clap = { workspace = true, features = ["derive", "env"] } + +[lints] +workspace = true + +[features] +default = ["jemalloc"] + +jemalloc = ["reth-cli-util/jemalloc"] +jemalloc-prof = ["reth-cli-util/jemalloc-prof"] +tracy-allocator = ["reth-cli-util/tracy-allocator"] + +asm-keccak = ["reth-optimism-cli/asm-keccak", "reth-node-optimism/asm-keccak"] + +optimism = ["reth-optimism-cli/optimism", "reth-node-optimism/optimism"] + +opbnb = [ + "reth-node-optimism/opbnb" +] + +[[bin]] +name = "op-reth" +path = "src/main.rs" diff --git a/crates/optimism/bin/src/main.rs b/crates/optimism/bin/src/main.rs new file mode 100644 index 0000000000..f5a88798a0 --- /dev/null +++ b/crates/optimism/bin/src/main.rs @@ -0,0 +1,83 @@ +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![allow(missing_docs, rustdoc::missing_crate_level_docs)] +// The `optimism` feature must be enabled to use this crate. +#![cfg(feature = "optimism")] + +use clap::Parser; +use reth_node_builder::{engine_tree_config::TreeConfig, EngineNodeLauncher}; +use reth_node_optimism::{args::RollupArgs, node::OptimismAddOns, OptimismNode}; +use reth_optimism_cli::{chainspec::OpChainSpecParser, Cli}; +use reth_optimism_rpc::SequencerClient; +use reth_provider::providers::BlockchainProvider2; + +#[global_allocator] +static ALLOC: reth_cli_util::allocator::Allocator = reth_cli_util::allocator::new_allocator(); + +fn main() { + reth_cli_util::sigsegv_handler::install(); + + // Enable backtraces unless a RUST_BACKTRACE value has already been explicitly provided. + if std::env::var_os("RUST_BACKTRACE").is_none() { + std::env::set_var("RUST_BACKTRACE", "1"); + } + + if let Err(err) = + Cli::::parse().run(|builder, rollup_args| async move { + let enable_engine2 = rollup_args.experimental; + let sequencer_http_arg = rollup_args.sequencer_http.clone(); + match enable_engine2 { + true => { + let engine_tree_config = TreeConfig::default() + .with_persistence_threshold(rollup_args.persistence_threshold) + .with_memory_block_buffer_target(rollup_args.memory_block_buffer_target); + let handle = builder + .with_types_and_provider::>() + .with_components(OptimismNode::components(rollup_args)) + .with_add_ons::() + .extend_rpc_modules(move |ctx| { + // register sequencer tx forwarder + if let Some(sequencer_http) = sequencer_http_arg { + ctx.registry + .eth_api() + .set_sequencer_client(SequencerClient::new(sequencer_http))?; + } + + Ok(()) + }) + .launch_with_fn(|builder| { + let launcher = EngineNodeLauncher::new( + builder.task_executor().clone(), + builder.config().datadir(), + engine_tree_config, + ); + builder.launch_with(launcher) + }) + .await?; + + handle.node_exit_future.await + } + false => { + let handle = builder + .node(OptimismNode::new(rollup_args.clone())) + .extend_rpc_modules(move |ctx| { + // register sequencer tx forwarder + if let Some(sequencer_http) = sequencer_http_arg { + ctx.registry + .eth_api() + .set_sequencer_client(SequencerClient::new(sequencer_http))?; + } + + Ok(()) + }) + .launch() + .await?; + + handle.node_exit_future.await + } + } + }) + { + eprintln!("Error: {err:?}"); + std::process::exit(1); + } +} diff --git a/crates/optimism/chainspec/src/base.rs b/crates/optimism/chainspec/src/base.rs index d14a6b3d69..6b13b27081 100644 --- a/crates/optimism/chainspec/src/base.rs +++ b/crates/optimism/chainspec/src/base.rs @@ -1,14 +1,11 @@ //! Chain specification for the Base Mainnet network. -#[cfg(not(feature = "std"))] use alloc::sync::Arc; -#[cfg(feature = "std")] -use std::sync::Arc; use alloy_chains::Chain; use alloy_primitives::{b256, U256}; use once_cell::sync::Lazy; -use reth_chainspec::{BaseFeeParams, BaseFeeParamsKind, ChainSpec}; +use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec}; use reth_ethereum_forks::{EthereumHardfork, OptimismHardfork}; use crate::OpChainSpec; @@ -20,7 +17,7 @@ pub static BASE_MAINNET: Lazy> = Lazy::new(|| { chain: Chain::base_mainnet(), genesis: serde_json::from_str(include_str!("../res/genesis/base.json")) .expect("Can't deserialize Base genesis json"), - genesis_hash: Some(b256!( + genesis_hash: once_cell_set(b256!( "f712aa9241cc24369b143cf6dce85f0902a9731e70d66818a3a5845b296c73dd" )), paris_block_and_final_difficulty: Some((0, U256::from(0))), diff --git a/crates/optimism/chainspec/src/base_sepolia.rs b/crates/optimism/chainspec/src/base_sepolia.rs index 916e47cf90..932cacc439 100644 --- a/crates/optimism/chainspec/src/base_sepolia.rs +++ b/crates/optimism/chainspec/src/base_sepolia.rs @@ -1,14 +1,11 @@ //! Chain specification for the Base Sepolia testnet network. -#[cfg(not(feature = "std"))] use alloc::sync::Arc; -#[cfg(feature = "std")] -use std::sync::Arc; use alloy_chains::Chain; use alloy_primitives::{b256, U256}; use once_cell::sync::Lazy; -use reth_chainspec::{BaseFeeParams, BaseFeeParamsKind, ChainSpec}; +use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec}; use reth_ethereum_forks::{EthereumHardfork, OptimismHardfork}; use crate::OpChainSpec; @@ -20,7 +17,7 @@ pub static BASE_SEPOLIA: Lazy> = Lazy::new(|| { chain: Chain::base_sepolia(), genesis: serde_json::from_str(include_str!("../res/genesis/sepolia_base.json")) .expect("Can't deserialize Base Sepolia genesis json"), - genesis_hash: Some(b256!( + genesis_hash: once_cell_set(b256!( "0dcc9e089e30b90ddfc55be9a37dd15bc551aeee999d2e2b51414c54eaf934e4" )), paris_block_and_final_difficulty: Some((0, U256::from(0))), diff --git a/crates/optimism/chainspec/src/dev.rs b/crates/optimism/chainspec/src/dev.rs index 3351ee03bb..96ec0e42ec 100644 --- a/crates/optimism/chainspec/src/dev.rs +++ b/crates/optimism/chainspec/src/dev.rs @@ -1,14 +1,11 @@ //! Chain specification in dev mode for custom chain. -#[cfg(not(feature = "std"))] use alloc::sync::Arc; -#[cfg(feature = "std")] -use std::sync::Arc; use alloy_chains::Chain; use alloy_primitives::U256; use once_cell::sync::Lazy; -use reth_chainspec::{BaseFeeParams, BaseFeeParamsKind, ChainSpec}; +use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec}; use reth_ethereum_forks::DEV_HARDFORKS; use reth_primitives_traits::constants::DEV_GENESIS_HASH; @@ -19,20 +16,18 @@ use crate::OpChainSpec; /// Includes 20 prefunded accounts with `10_000` ETH each derived from mnemonic "test test test test /// test test test test test test test junk". pub static OP_DEV: Lazy> = Lazy::new(|| { - { - OpChainSpec { - inner: ChainSpec { - chain: Chain::dev(), - genesis: serde_json::from_str(include_str!("../res/genesis/dev.json")) - .expect("Can't deserialize Dev testnet genesis json"), - genesis_hash: Some(DEV_GENESIS_HASH), - paris_block_and_final_difficulty: Some((0, U256::from(0))), - hardforks: DEV_HARDFORKS.clone(), - base_fee_params: BaseFeeParamsKind::Constant(BaseFeeParams::ethereum()), - deposit_contract: None, // TODO: do we even have? - ..Default::default() - }, - } + OpChainSpec { + inner: ChainSpec { + chain: Chain::dev(), + genesis: serde_json::from_str(include_str!("../res/genesis/dev.json")) + .expect("Can't deserialize Dev testnet genesis json"), + genesis_hash: once_cell_set(DEV_GENESIS_HASH), + paris_block_and_final_difficulty: Some((0, U256::from(0))), + hardforks: DEV_HARDFORKS.clone(), + base_fee_params: BaseFeeParamsKind::Constant(BaseFeeParams::ethereum()), + deposit_contract: None, // TODO: do we even have? + ..Default::default() + }, } .into() }); diff --git a/crates/optimism/chainspec/src/lib.rs b/crates/optimism/chainspec/src/lib.rs index 8954f8ab0e..151d5a67ea 100644 --- a/crates/optimism/chainspec/src/lib.rs +++ b/crates/optimism/chainspec/src/lib.rs @@ -7,7 +7,6 @@ )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -#[cfg(not(feature = "std"))] extern crate alloc; pub mod constants; @@ -34,7 +33,7 @@ use derive_more::{Constructor, Deref, Into}; use reth_chainspec::ChainSpec; /// OP stack chain spec type. -#[derive(Debug, Deref, Into, Constructor)] +#[derive(Debug, Clone, Deref, Into, Constructor)] pub struct OpChainSpec { /// [`ChainSpec`]. pub inner: ChainSpec, diff --git a/crates/optimism/chainspec/src/op.rs b/crates/optimism/chainspec/src/op.rs index 15a952ae65..cbd0a0d0b5 100644 --- a/crates/optimism/chainspec/src/op.rs +++ b/crates/optimism/chainspec/src/op.rs @@ -1,14 +1,11 @@ //! Chain specification for the Optimism Mainnet network. -#[cfg(not(feature = "std"))] use alloc::sync::Arc; -#[cfg(feature = "std")] -use std::sync::Arc; use alloy_chains::Chain; use alloy_primitives::{b256, U256}; use once_cell::sync::Lazy; -use reth_chainspec::{BaseFeeParams, BaseFeeParamsKind, ChainSpec}; +use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec}; use reth_ethereum_forks::{EthereumHardfork, OptimismHardfork}; use reth_primitives_traits::constants::ETHEREUM_BLOCK_GAS_LIMIT; @@ -23,7 +20,7 @@ pub static OP_MAINNET: Lazy> = Lazy::new(|| { // manually from trusted source genesis: serde_json::from_str(include_str!("../res/genesis/optimism.json")) .expect("Can't deserialize Optimism Mainnet genesis json"), - genesis_hash: Some(b256!( + genesis_hash: once_cell_set(b256!( "7ca38a1916c42007829c55e69d3e9a73265554b586a499015373241b8a3fa48b" )), paris_block_and_final_difficulty: Some((0, U256::from(0))), diff --git a/crates/optimism/chainspec/src/op_sepolia.rs b/crates/optimism/chainspec/src/op_sepolia.rs index 9f223f7e0d..3d2165f448 100644 --- a/crates/optimism/chainspec/src/op_sepolia.rs +++ b/crates/optimism/chainspec/src/op_sepolia.rs @@ -1,14 +1,11 @@ //! Chain specification for the Optimism Sepolia testnet network. -#[cfg(not(feature = "std"))] use alloc::sync::Arc; -#[cfg(feature = "std")] -use std::sync::Arc; use alloy_chains::{Chain, NamedChain}; use alloy_primitives::{b256, U256}; use once_cell::sync::Lazy; -use reth_chainspec::{BaseFeeParams, BaseFeeParamsKind, ChainSpec}; +use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec}; use reth_ethereum_forks::{EthereumHardfork, OptimismHardfork}; use reth_primitives_traits::constants::ETHEREUM_BLOCK_GAS_LIMIT; @@ -21,7 +18,7 @@ pub static OP_SEPOLIA: Lazy> = Lazy::new(|| { chain: Chain::from_named(NamedChain::OptimismSepolia), genesis: serde_json::from_str(include_str!("../res/genesis/sepolia_op.json")) .expect("Can't deserialize OP Sepolia genesis json"), - genesis_hash: Some(b256!( + genesis_hash: once_cell_set(b256!( "102de6ffb001480cc9b8b548fd05c34cd4f46ae4aa91759393db90ea0409887d" )), paris_block_and_final_difficulty: Some((0, U256::from(0))), diff --git a/crates/optimism/chainspec/src/opbnb.rs b/crates/optimism/chainspec/src/opbnb.rs index a316492e77..aaa72dd82b 100644 --- a/crates/optimism/chainspec/src/opbnb.rs +++ b/crates/optimism/chainspec/src/opbnb.rs @@ -8,7 +8,7 @@ use std::sync::Arc; use alloy_chains::Chain; use alloy_primitives::{b256, U256}; use once_cell::sync::Lazy; -use reth_chainspec::{BaseFeeParams, BaseFeeParamsKind, ChainSpec}; +use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec}; use reth_ethereum_forks::{EthereumHardfork, OptimismHardfork}; use crate::OpChainSpec; @@ -20,7 +20,7 @@ pub static OPBNB_MAINNET: Lazy> = Lazy::new(|| { chain: Chain::opbnb_mainnet(), genesis: serde_json::from_str(include_str!("../res/genesis/opbnb_mainnet.json")) .expect("Can't deserialize opBNB mainent genesis json"), - genesis_hash: Some(b256!( + genesis_hash: once_cell_set(b256!( "4dd61178c8b0f01670c231597e7bcb368e84545acd46d940a896d6a791dd6df4" )), paris_block_and_final_difficulty: Some((0, U256::from(0))), diff --git a/crates/optimism/chainspec/src/opbnb_qa.rs b/crates/optimism/chainspec/src/opbnb_qa.rs index 7e038581cb..43e25bd9d9 100644 --- a/crates/optimism/chainspec/src/opbnb_qa.rs +++ b/crates/optimism/chainspec/src/opbnb_qa.rs @@ -8,7 +8,7 @@ use std::sync::Arc; use alloy_chains::Chain; use alloy_primitives::{b256, U256}; use once_cell::sync::Lazy; -use reth_chainspec::{BaseFeeParams, BaseFeeParamsKind, ChainSpec}; +use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec}; use reth_ethereum_forks::{EthereumHardfork, OptimismHardfork}; use crate::OpChainSpec; @@ -20,7 +20,7 @@ pub static OPBNB_QA: Lazy> = Lazy::new(|| { chain: Chain::from_id(3534), genesis: serde_json::from_str(include_str!("../res/genesis/opbnb_qa.json")) .expect("Can't deserialize opBNB qa genesis json"), - genesis_hash: Some(b256!( + genesis_hash: once_cell_set(b256!( "1c2ad01526f22793643de4978dbf5cec5aeaedcb628470de8b950f8a46539ddf" )), paris_block_and_final_difficulty: Some((0, U256::from(0))), diff --git a/crates/optimism/chainspec/src/opbnb_testnet.rs b/crates/optimism/chainspec/src/opbnb_testnet.rs index c1f9df30e9..b3882ecc81 100644 --- a/crates/optimism/chainspec/src/opbnb_testnet.rs +++ b/crates/optimism/chainspec/src/opbnb_testnet.rs @@ -8,7 +8,7 @@ use std::sync::Arc; use alloy_chains::Chain; use alloy_primitives::{b256, U256}; use once_cell::sync::Lazy; -use reth_chainspec::{BaseFeeParams, BaseFeeParamsKind, ChainSpec}; +use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec}; use reth_ethereum_forks::{EthereumHardfork, OptimismHardfork}; use crate::OpChainSpec; @@ -20,7 +20,7 @@ pub static OPBNB_TESTNET: Lazy> = Lazy::new(|| { chain: Chain::opbnb_testnet(), genesis: serde_json::from_str(include_str!("../res/genesis/opbnb_testnet.json")) .expect("Can't deserialize opBNB testnet genesis json"), - genesis_hash: Some(b256!( + genesis_hash: once_cell_set(b256!( "51fa57729dfb1c27542c21b06cb72a0459c57440ceb43a465dae1307cd04fe80" )), paris_block_and_final_difficulty: Some((0, U256::from(0))), diff --git a/crates/optimism/cli/Cargo.toml b/crates/optimism/cli/Cargo.toml index 9f4be1586b..edaa6efcfa 100644 --- a/crates/optimism/cli/Cargo.toml +++ b/crates/optimism/cli/Cargo.toml @@ -16,6 +16,7 @@ reth-cli-commands.workspace = true reth-consensus.workspace = true reth-db = { workspace = true, features = ["mdbx"] } reth-db-api.workspace = true +reth-db-common.workspace = true reth-downloaders.workspace = true reth-provider.workspace = true reth-prune.workspace = true @@ -23,9 +24,10 @@ reth-stages.workspace = true reth-static-file.workspace = true reth-execution-types.workspace = true reth-node-core.workspace = true +reth-node-optimism.workspace = true reth-primitives.workspace = true -## optimisim +## optimism reth-optimism-primitives.workspace = true reth-optimism-chainspec.workspace = true @@ -37,12 +39,14 @@ reth-errors.workspace = true reth-config.workspace = true reth-evm-optimism.workspace = true reth-cli.workspace = true +reth-cli-runner.workspace = true +reth-node-builder.workspace = true +reth-tracing.workspace = true # eth alloy-primitives.workspace = true alloy-rlp.workspace = true - # misc futures-util.workspace = true clap = { workspace = true, features = ["derive", "env"] } @@ -63,9 +67,17 @@ tempfile.workspace = true reth-stages = { workspace = true, features = ["test-utils"] } reth-db-common.workspace = true -[features] - optimism = [ +[features] +optimism = [ "reth-primitives/optimism", "reth-evm-optimism/optimism", "reth-provider/optimism", - ] \ No newline at end of file + "reth-node-core/optimism", + "reth-node-optimism/optimism", +] +asm-keccak = [ + "alloy-primitives/asm-keccak", + "reth-node-core/asm-keccak", + "reth-node-optimism/asm-keccak", + "reth-primitives/asm-keccak", +] diff --git a/crates/optimism/cli/src/chainspec.rs b/crates/optimism/cli/src/chainspec.rs index 86c95dea72..93eb2c7ad1 100644 --- a/crates/optimism/cli/src/chainspec.rs +++ b/crates/optimism/cli/src/chainspec.rs @@ -1,6 +1,6 @@ -use std::{ffi::OsStr, sync::Arc}; +use std::sync::Arc; -use clap::{builder::TypedValueParser, error::Result, Arg, Command}; +use reth_chainspec::ChainSpec; use reth_cli::chainspec::ChainSpecParser; use reth_node_core::args::utils::parse_custom_chain_spec; use reth_optimism_chainspec::{ @@ -30,7 +30,9 @@ fn chain_value_parser(s: &str) -> eyre::Result, eyre::Error> { #[derive(Debug, Clone, Default)] pub struct OpChainSpecParser; -impl ChainSpecParser for OpChainSpecParser { +impl ChainSpecParser for OpChainSpecParser { + type ChainSpec = ChainSpec; + const SUPPORTED_CHAINS: &'static [&'static str] = &[ "dev", "optimism", @@ -47,39 +49,8 @@ impl ChainSpecParser for OpChainSpecParser { "opbnb-qa", ]; - fn parse(s: &str) -> eyre::Result> { - chain_value_parser(s) - } -} - -impl TypedValueParser for OpChainSpecParser { - type Value = Arc; - - fn parse_ref( - &self, - _cmd: &Command, - arg: Option<&Arg>, - value: &OsStr, - ) -> Result { - let val = - value.to_str().ok_or_else(|| clap::Error::new(clap::error::ErrorKind::InvalidUtf8))?; - >::parse(val).map_err(|err| { - let arg = arg.map(|a| a.to_string()).unwrap_or_else(|| "...".to_owned()); - let possible_values = Self::SUPPORTED_CHAINS.join(", "); - clap::Error::raw( - clap::error::ErrorKind::InvalidValue, - format!( - "Invalid value '{val}' for {arg}: {err}. [possible values: {possible_values}]" - ), - ) - }) - } - - fn possible_values( - &self, - ) -> Option + '_>> { - let values = Self::SUPPORTED_CHAINS.iter().map(clap::builder::PossibleValue::new); - Some(Box::new(values)) + fn parse(s: &str) -> eyre::Result> { + chain_value_parser(s).map(|s| Arc::new(Arc::unwrap_or_clone(s).inner)) } } @@ -90,7 +61,7 @@ mod tests { #[test] fn parse_known_chain_spec() { for &chain in OpChainSpecParser::SUPPORTED_CHAINS { - assert!(>::parse(chain).is_ok()); + assert!(::parse(chain).is_ok()); } } } diff --git a/crates/optimism/cli/src/commands/build_pipeline.rs b/crates/optimism/cli/src/commands/build_pipeline.rs index 1babb6b1e1..f24e039825 100644 --- a/crates/optimism/cli/src/commands/build_pipeline.rs +++ b/crates/optimism/cli/src/commands/build_pipeline.rs @@ -1,8 +1,8 @@ use alloy_primitives::B256; use futures_util::{Stream, StreamExt}; +use reth_chainspec::ChainSpec; use reth_config::Config; use reth_consensus::Consensus; -use reth_db_api::database::Database; use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, file_client::FileClient, headers::reverse_headers::ReverseHeadersDownloaderBuilder, @@ -13,6 +13,7 @@ use reth_network_p2p::{ bodies::downloader::BodyDownloader, headers::downloader::{HeaderDownloader, SyncTarget}, }; +use reth_node_builder::NodeTypesWithDB; use reth_node_events::node::NodeEvent; use reth_provider::{BlockNumReader, ChainSpecProvider, HeaderProvider, ProviderFactory}; use reth_prune::PruneModes; @@ -26,17 +27,17 @@ use tokio::sync::watch; /// /// If configured to execute, all stages will run. Otherwise, only stages that don't require state /// will run. -pub(crate) async fn build_import_pipeline( +pub(crate) async fn build_import_pipeline( config: &Config, - provider_factory: ProviderFactory, + provider_factory: ProviderFactory, consensus: &Arc, file_client: Arc, - static_file_producer: StaticFileProducer, + static_file_producer: StaticFileProducer>, disable_exec: bool, skip_state_root_validation: bool, -) -> eyre::Result<(Pipeline, impl Stream)> +) -> eyre::Result<(Pipeline, impl Stream)> where - DB: Database + Clone + Unpin + 'static, + N: NodeTypesWithDB, C: Consensus + 'static, { if !file_client.has_canonical_blocks() { @@ -71,7 +72,7 @@ where let max_block = file_client.max_block().unwrap_or(0); - let pipeline = Pipeline::builder() + let pipeline = Pipeline::::builder() .with_tip_sender(tip_tx) // we want to sync all blocks the file client provides or 0 if empty .with_max_block(max_block) diff --git a/crates/optimism/cli/src/commands/import.rs b/crates/optimism/cli/src/commands/import.rs index 66d4122d98..ed38234dca 100644 --- a/crates/optimism/cli/src/commands/import.rs +++ b/crates/optimism/cli/src/commands/import.rs @@ -1,6 +1,8 @@ //! Command that initializes the node by importing OP Mainnet chain segment below Bedrock, from a //! file. use clap::Parser; +use reth_chainspec::ChainSpec; +use reth_cli::chainspec::ChainSpecParser; use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs}; use reth_consensus::noop::NoopConsensus; use reth_db::tables; @@ -8,8 +10,9 @@ use reth_db_api::transaction::DbTx; use reth_downloaders::file_client::{ ChunkedFileReader, FileClient, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE, }; +use reth_node_builder::NodeTypesWithEngine; use reth_node_core::version::SHORT_VERSION; -use reth_optimism_primitives::bedrock_import::is_dup_tx; +use reth_optimism_primitives::bedrock::is_dup_tx; use reth_provider::StageCheckpointReader; use reth_prune::PruneModes; use reth_stages::StageId; @@ -21,9 +24,9 @@ use crate::commands::build_pipeline::build_import_pipeline; /// Syncs RLP encoded blocks from a file. #[derive(Debug, Parser)] -pub struct ImportOpCommand { +pub struct ImportOpCommand { #[command(flatten)] - env: EnvironmentArgs, + env: EnvironmentArgs, /// Chunk byte length to read from file. #[arg(long, value_name = "CHUNK_LEN", verbatim_doc_comment)] @@ -37,9 +40,11 @@ pub struct ImportOpCommand { path: PathBuf, } -impl ImportOpCommand { +impl> ImportOpCommand { /// Execute `import` command - pub async fn execute(self) -> eyre::Result<()> { + pub async fn execute>( + self, + ) -> eyre::Result<()> { info!(target: "reth::cli", "reth {} starting", SHORT_VERSION); info!(target: "reth::cli", @@ -51,7 +56,7 @@ impl ImportOpCommand { "Chunking chain import" ); - let Environment { provider_factory, config, .. } = self.env.init(AccessRights::RW)?; + let Environment { provider_factory, config, .. } = self.env.init::(AccessRights::RW)?; // we use noop here because we expect the inputs to be valid let consensus = Arc::new(NoopConsensus::default()); diff --git a/crates/optimism/cli/src/commands/import_receipts.rs b/crates/optimism/cli/src/commands/import_receipts.rs index d216e0f575..58a8590bd8 100644 --- a/crates/optimism/cli/src/commands/import_receipts.rs +++ b/crates/optimism/cli/src/commands/import_receipts.rs @@ -4,16 +4,18 @@ use std::path::{Path, PathBuf}; use clap::Parser; +use reth_chainspec::ChainSpec; +use reth_cli::chainspec::ChainSpecParser; use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs}; use reth_db::tables; -use reth_db_api::database::Database; use reth_downloaders::{ file_client::{ChunkedFileReader, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE}, receipt_file_client::ReceiptFileClient, }; use reth_execution_types::ExecutionOutcome; +use reth_node_builder::{NodeTypesWithDB, NodeTypesWithEngine}; use reth_node_core::version::SHORT_VERSION; -use reth_optimism_primitives::bedrock_import::is_dup_tx; +use reth_optimism_primitives::bedrock::is_dup_tx; use reth_primitives::Receipts; use reth_provider::{ writer::UnifiedStorageWriter, DatabaseProviderFactory, OriginalValuesKnown, ProviderFactory, @@ -23,13 +25,13 @@ use reth_stages::StageId; use reth_static_file_types::StaticFileSegment; use tracing::{debug, error, info, trace}; -use crate::file_codec_ovm_receipt::HackReceiptFileCodec; +use crate::receipt_file_codec::HackReceiptFileCodec; /// Initializes the database with the genesis block. #[derive(Debug, Parser)] -pub struct ImportReceiptsOpCommand { +pub struct ImportReceiptsOpCommand { #[command(flatten)] - env: EnvironmentArgs, + env: EnvironmentArgs, /// Chunk byte length to read from file. #[arg(long, value_name = "CHUNK_LEN", verbatim_doc_comment)] @@ -43,9 +45,11 @@ pub struct ImportReceiptsOpCommand { path: PathBuf, } -impl ImportReceiptsOpCommand { +impl> ImportReceiptsOpCommand { /// Execute `import` command - pub async fn execute(self) -> eyre::Result<()> { + pub async fn execute>( + self, + ) -> eyre::Result<()> { info!(target: "reth::cli", "reth {} starting", SHORT_VERSION); debug!(target: "reth::cli", @@ -53,7 +57,7 @@ impl ImportReceiptsOpCommand { "Chunking receipts import" ); - let Environment { provider_factory, .. } = self.env.init(AccessRights::RW)?; + let Environment { provider_factory, .. } = self.env.init::(AccessRights::RW)?; import_receipts_from_file( provider_factory, @@ -76,14 +80,14 @@ impl ImportReceiptsOpCommand { } /// Imports receipts to static files from file in chunks. See [`import_receipts_from_reader`]. -pub async fn import_receipts_from_file( - provider_factory: ProviderFactory, +pub async fn import_receipts_from_file( + provider_factory: ProviderFactory, path: P, chunk_len: Option, filter: F, ) -> eyre::Result<()> where - DB: Database, + N: NodeTypesWithDB, P: AsRef, F: FnMut(u64, &mut Receipts) -> usize, { @@ -168,13 +172,13 @@ where /// Caution! Filter callback must replace completely filtered out receipts for a block, with empty /// vectors, rather than `vec!(None)`. This is since the code for writing to static files, expects /// indices in the [`Receipts`] list, to map to sequential block numbers. -pub async fn import_receipts_from_reader( - provider_factory: &ProviderFactory, +pub async fn import_receipts_from_reader( + provider_factory: &ProviderFactory, mut reader: ChunkedFileReader, mut filter: F, ) -> eyre::Result where - DB: Database, + N: NodeTypesWithDB, F: FnMut(u64, &mut Receipts) -> usize, { let mut total_decoded_receipts = 0; @@ -248,8 +252,8 @@ pub struct ImportReceiptsResult { #[cfg(test)] mod test { + use alloy_primitives::hex; use reth_db_common::init::init_genesis; - use reth_primitives::hex; use reth_stages::test_utils::TestStageDB; use tempfile::tempfile; use tokio::{ @@ -257,7 +261,7 @@ mod test { io::{AsyncSeekExt, AsyncWriteExt, SeekFrom}, }; - use crate::file_codec_ovm_receipt::test::{ + use crate::receipt_file_codec::test::{ HACK_RECEIPT_ENCODED_BLOCK_1, HACK_RECEIPT_ENCODED_BLOCK_2, HACK_RECEIPT_ENCODED_BLOCK_3, }; @@ -281,7 +285,7 @@ mod test { ChunkedFileReader::from_file(f, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE).await.unwrap(); let db = TestStageDB::default(); - init_genesis(db.factory.clone()).unwrap(); + init_genesis(&db.factory).unwrap(); // todo: where does import command init receipts ? probably somewhere in pipeline diff --git a/crates/optimism/cli/src/commands/init_state/bedrock.rs b/crates/optimism/cli/src/commands/init_state/bedrock.rs new file mode 100644 index 0000000000..1c77569f91 --- /dev/null +++ b/crates/optimism/cli/src/commands/init_state/bedrock.rs @@ -0,0 +1,136 @@ +use alloy_primitives::B256; +use reth_db::Database; +use reth_optimism_primitives::bedrock::{BEDROCK_HEADER, BEDROCK_HEADER_HASH, BEDROCK_HEADER_TTD}; +use reth_primitives::{ + BlockBody, BlockNumber, Header, SealedBlock, SealedBlockWithSenders, SealedHeader, + StaticFileSegment, U256, +}; +use reth_provider::{ + providers::StaticFileProvider, BlockWriter, DatabaseProviderRW, StageCheckpointWriter, + StaticFileWriter, +}; +use reth_stages::{StageCheckpoint, StageId}; +use tracing::info; + +/// Creates a dummy chain (with no transactions) up to the last OVM block and appends the +/// first valid Bedrock block. +pub(crate) fn setup_op_mainnet_without_ovm( + provider_rw: &DatabaseProviderRW, + static_file_provider: &StaticFileProvider, +) -> Result<(), eyre::Error> { + info!(target: "reth::cli", "Setting up dummy OVM chain before importing state."); + + // Write OVM dummy data up to `BEDROCK_HEADER - 1` block + append_dummy_chain(static_file_provider, BEDROCK_HEADER.number - 1)?; + + info!(target: "reth::cli", "Appending Bedrock block."); + + append_bedrock_block(provider_rw, static_file_provider)?; + + for stage in StageId::ALL { + provider_rw.save_stage_checkpoint(stage, StageCheckpoint::new(BEDROCK_HEADER.number))?; + } + + info!(target: "reth::cli", "Set up finished."); + + Ok(()) +} + +/// Appends the first bedrock block. +/// +/// By appending it, static file writer also verifies that all segments are at the same +/// height. +fn append_bedrock_block( + provider_rw: &DatabaseProviderRW, + sf_provider: &StaticFileProvider, +) -> Result<(), eyre::Error> { + provider_rw.insert_block( + SealedBlockWithSenders::new( + SealedBlock::new( + SealedHeader::new(BEDROCK_HEADER, BEDROCK_HEADER_HASH), + BlockBody::default(), + ), + vec![], + ) + .expect("no senders or txes"), + )?; + + sf_provider.latest_writer(StaticFileSegment::Headers)?.append_header( + &BEDROCK_HEADER, + BEDROCK_HEADER_TTD, + &BEDROCK_HEADER_HASH, + )?; + + sf_provider + .latest_writer(StaticFileSegment::Receipts)? + .increment_block(BEDROCK_HEADER.number)?; + + sf_provider + .latest_writer(StaticFileSegment::Transactions)? + .increment_block(BEDROCK_HEADER.number)?; + + Ok(()) +} + +/// Creates a dummy chain with no transactions/receipts up to `target_height` block inclusive. +/// +/// * Headers: It will push an empty block. +/// * Transactions: It will not push any tx, only increments the end block range. +/// * Receipts: It will not push any receipt, only increments the end block range. +fn append_dummy_chain( + sf_provider: &StaticFileProvider, + target_height: BlockNumber, +) -> Result<(), eyre::Error> { + let (tx, rx) = std::sync::mpsc::channel(); + + // Spawn jobs for incrementing the block end range of transactions and receipts + for segment in [StaticFileSegment::Transactions, StaticFileSegment::Receipts] { + let tx_clone = tx.clone(); + let provider = sf_provider.clone(); + std::thread::spawn(move || { + let result = provider.latest_writer(segment).and_then(|mut writer| { + for block_num in 1..=target_height { + writer.increment_block(block_num)?; + } + Ok(()) + }); + + tx_clone.send(result).unwrap(); + }); + } + + // Spawn job for appending empty headers + let provider = sf_provider.clone(); + std::thread::spawn(move || { + let mut empty_header = Header::default(); + let result = provider.latest_writer(StaticFileSegment::Headers).and_then(|mut writer| { + for block_num in 1..=target_height { + // TODO: should we fill with real parent_hash? + empty_header.number = block_num; + writer.append_header(&empty_header, U256::ZERO, &B256::ZERO)?; + } + Ok(()) + }); + + tx.send(result).unwrap(); + }); + + // Catches any StaticFileWriter error. + while let Ok(r) = rx.recv() { + r?; + } + + // If, for any reason, rayon crashes this verifies if all segments are at the same + // target_height. + for segment in + [StaticFileSegment::Headers, StaticFileSegment::Receipts, StaticFileSegment::Transactions] + { + assert_eq!( + sf_provider.latest_writer(segment)?.user_header().block_end(), + Some(target_height), + "Static file segment {segment} was unsuccessful advancing its block height." + ); + } + + Ok(()) +} diff --git a/crates/optimism/cli/src/commands/init_state/mod.rs b/crates/optimism/cli/src/commands/init_state/mod.rs new file mode 100644 index 0000000000..e62ec3d16c --- /dev/null +++ b/crates/optimism/cli/src/commands/init_state/mod.rs @@ -0,0 +1,80 @@ +//! Command that initializes the node from a genesis file. + +use clap::Parser; +use reth_chainspec::ChainSpec; +use reth_cli::chainspec::ChainSpecParser; +use reth_cli_commands::common::{AccessRights, Environment}; +use reth_db_common::init::init_from_state_dump; +use reth_node_builder::NodeTypesWithEngine; +use reth_optimism_primitives::bedrock::BEDROCK_HEADER; +use reth_provider::{ + BlockNumReader, ChainSpecProvider, StaticFileProviderFactory, StaticFileWriter, +}; +use std::{fs::File, io::BufReader}; +use tracing::info; + +mod bedrock; + +/// Initializes the database with the genesis block. +#[derive(Debug, Parser)] +pub struct InitStateCommandOp { + #[command(flatten)] + init_state: reth_cli_commands::init_state::InitStateCommand, + + /// **Optimism Mainnet Only** + /// + /// Specifies whether to initialize the state without relying on OVM historical data. + /// + /// When enabled, and before inserting the state, it creates a dummy chain up to the last OVM + /// block (#105235062) (14GB / 90 seconds). It then, appends the Bedrock block. + /// + /// - **Note**: **Do not** import receipts and blocks beforehand, or this will fail or be + /// ignored. + #[arg(long, default_value = "false")] + without_ovm: bool, +} + +impl> InitStateCommandOp { + /// Execute the `init` command + pub async fn execute>( + self, + ) -> eyre::Result<()> { + info!(target: "reth::cli", "Reth init-state starting"); + + let Environment { config, provider_factory, .. } = + self.init_state.env.init::(AccessRights::RW)?; + + let static_file_provider = provider_factory.static_file_provider(); + let provider_rw = provider_factory.provider_rw()?; + + // OP-Mainnet may want to bootstrap a chain without OVM historical data + if provider_factory.chain_spec().is_optimism_mainnet() && self.without_ovm { + let last_block_number = provider_rw.last_block_number()?; + + if last_block_number == 0 { + bedrock::setup_op_mainnet_without_ovm(&provider_rw, &static_file_provider)?; + + // SAFETY: it's safe to commit static files, since in the event of a crash, they + // will be unwinded according to database checkpoints. + // + // Necessary to commit, so the BEDROCK_HEADER is accessible to provider_rw and + // init_state_dump + static_file_provider.commit()?; + } else if last_block_number > 0 && last_block_number < BEDROCK_HEADER.number { + return Err(eyre::eyre!( + "Data directory should be empty when calling init-state with --without-ovm." + )) + } + } + + info!(target: "reth::cli", "Initiating state dump"); + + let reader = BufReader::new(File::open(self.init_state.state)?); + let hash = init_from_state_dump(reader, &provider_rw.0, config.stages.etl)?; + + provider_rw.commit()?; + + info!(target: "reth::cli", hash = ?hash, "Genesis block written"); + Ok(()) + } +} diff --git a/crates/optimism/cli/src/commands/mod.rs b/crates/optimism/cli/src/commands/mod.rs index 4c63bb6237..41a19e3ded 100644 --- a/crates/optimism/cli/src/commands/mod.rs +++ b/crates/optimism/cli/src/commands/mod.rs @@ -1,8 +1,11 @@ +use crate::chainspec::OpChainSpecParser; use clap::Subcommand; use import::ImportOpCommand; use import_receipts::ImportReceiptsOpCommand; +use reth_chainspec::ChainSpec; +use reth_cli::chainspec::ChainSpecParser; use reth_cli_commands::{ - config_cmd, db, dump_genesis, init_cmd, init_state, + config_cmd, db, dump_genesis, init_cmd, node::{self, NoArgs}, p2p, prune, recover, stage, }; @@ -12,43 +15,47 @@ use std::fmt; mod build_pipeline; pub mod import; pub mod import_receipts; +pub mod init_state; /// Commands to be executed #[derive(Debug, Subcommand)] -pub enum Commands { +pub enum Commands< + Spec: ChainSpecParser = OpChainSpecParser, + Ext: clap::Args + fmt::Debug = NoArgs, +> { /// Start the node #[command(name = "node")] - Node(node::NodeCommand), + Node(Box>), /// Initialize the database from a genesis file. #[command(name = "init")] - Init(init_cmd::InitCommand), + Init(init_cmd::InitCommand), /// Initialize the database from a state dump file. #[command(name = "init-state")] - InitState(init_state::InitStateCommand), + InitState(init_state::InitStateCommandOp), /// This syncs RLP encoded OP blocks below Bedrock from a file, without executing. #[command(name = "import-op")] - ImportOp(ImportOpCommand), + ImportOp(ImportOpCommand), /// This imports RLP encoded receipts from a file. #[command(name = "import-receipts-op")] - ImportReceiptsOp(ImportReceiptsOpCommand), + ImportReceiptsOp(ImportReceiptsOpCommand), /// Dumps genesis block JSON configuration to stdout. - DumpGenesis(dump_genesis::DumpGenesisCommand), + DumpGenesis(dump_genesis::DumpGenesisCommand), /// Database debugging utilities #[command(name = "db")] - Db(db::Command), + Db(db::Command), /// Manipulate individual stages. #[command(name = "stage")] - Stage(stage::Command), + Stage(Box>), /// P2P Debugging utilities #[command(name = "p2p")] - P2P(p2p::Command), + P2P(p2p::Command), /// Write config to stdout #[command(name = "config")] Config(config_cmd::Command), /// Scripts for node recovery #[command(name = "recover")] - Recover(recover::Command), + Recover(recover::Command), /// Prune according to the configuration without any limits #[command(name = "prune")] - Prune(prune::PruneCommand), + Prune(prune::PruneCommand), } diff --git a/crates/optimism/cli/src/lib.rs b/crates/optimism/cli/src/lib.rs index 6260a8e904..e2d06e00a7 100644 --- a/crates/optimism/cli/src/lib.rs +++ b/crates/optimism/cli/src/lib.rs @@ -18,14 +18,14 @@ pub mod commands; /// /// Enables decoding and encoding `HackReceipt` type. See . /// -/// Currently configured to use codec [`HackReceipt`](file_codec_ovm_receipt::HackReceipt) based on +/// Currently configured to use codec [`HackReceipt`](receipt_file_codec::HackReceipt) based on /// export of below Bedrock data using . Codec can /// be replaced with regular encoding of receipts for export. /// /// NOTE: receipts can be exported using regular op-geth encoding for `Receipt` type, to fit /// reth's needs for importing. However, this would require patching the diff in to export the `Receipt` and not `HackReceipt` type (originally /// made for op-erigon's import needs). -pub mod file_codec_ovm_receipt; +pub mod receipt_file_codec; pub use commands::{import::ImportOpCommand, import_receipts::ImportReceiptsOpCommand}; @@ -34,23 +34,34 @@ use std::{ffi::OsString, fmt, sync::Arc}; use chainspec::OpChainSpecParser; use clap::{command, value_parser, Parser}; use commands::Commands; +use futures_util::Future; use reth_chainspec::ChainSpec; use reth_cli::chainspec::ChainSpecParser; use reth_cli_commands::node::NoArgs; +use reth_cli_runner::CliRunner; +use reth_db::DatabaseEnv; +use reth_evm_optimism::OpExecutorProvider; +use reth_node_builder::{NodeBuilder, WithLaunchContext}; use reth_node_core::{ - args::{utils::chain_help, LogArgs}, + args::LogArgs, version::{LONG_VERSION, SHORT_VERSION}, }; +use reth_node_optimism::OptimismNode; +use reth_tracing::FileWorkerGuard; +use tracing::info; -/// The main reth cli interface. +/// The main op-reth cli interface. /// /// This is the entrypoint to the executable. #[derive(Debug, Parser)] #[command(author, version = SHORT_VERSION, long_version = LONG_VERSION, about = "Reth", long_about = None)] -pub struct Cli { +pub struct Cli< + Spec: ChainSpecParser = OpChainSpecParser, + Ext: clap::Args + fmt::Debug = NoArgs, +> { /// The command to run #[command(subcommand)] - command: Commands, + command: Commands, /// The chain this node is running. /// @@ -58,12 +69,12 @@ pub struct Cli { #[arg( long, value_name = "CHAIN_OR_PATH", - long_help = chain_help(), - default_value = OpChainSpecParser::SUPPORTED_CHAINS[0], - value_parser = OpChainSpecParser::default(), + long_help = Spec::help_message(), + default_value = Spec::SUPPORTED_CHAINS[0], + value_parser = Spec::parser(), global = true, )] - chain: Arc, + chain: Arc, /// Add a new instance of a node. /// @@ -100,3 +111,67 @@ impl Cli { Self::try_parse_from(itr) } } + +impl Cli +where + Spec: ChainSpecParser, + Ext: clap::Args + fmt::Debug, +{ + /// Execute the configured cli command. + /// + /// This accepts a closure that is used to launch the node via the + /// [`NodeCommand`](reth_cli_commands::node::NodeCommand). + pub fn run(mut self, launcher: L) -> eyre::Result<()> + where + L: FnOnce(WithLaunchContext>>, Ext) -> Fut, + Fut: Future>, + { + // add network name to logs dir + self.logs.log_file_directory = + self.logs.log_file_directory.join(self.chain.chain.to_string()); + + let _guard = self.init_tracing()?; + info!(target: "reth::cli", "Initialized tracing, debug log directory: {}", self.logs.log_file_directory); + + let runner = CliRunner::default(); + match self.command { + Commands::Node(command) => { + runner.run_command_until_exit(|ctx| command.execute(ctx, launcher)) + } + Commands::Init(command) => { + runner.run_blocking_until_ctrl_c(command.execute::()) + } + Commands::InitState(command) => { + runner.run_blocking_until_ctrl_c(command.execute::()) + } + Commands::ImportOp(command) => { + runner.run_blocking_until_ctrl_c(command.execute::()) + } + Commands::ImportReceiptsOp(command) => { + runner.run_blocking_until_ctrl_c(command.execute::()) + } + Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()), + Commands::Db(command) => { + runner.run_blocking_until_ctrl_c(command.execute::()) + } + Commands::Stage(command) => runner.run_command_until_exit(|ctx| { + command.execute::(ctx, OpExecutorProvider::optimism) + }), + Commands::P2P(command) => runner.run_until_ctrl_c(command.execute()), + Commands::Config(command) => runner.run_until_ctrl_c(command.execute()), + Commands::Recover(command) => { + runner.run_command_until_exit(|ctx| command.execute::(ctx)) + } + Commands::Prune(command) => runner.run_until_ctrl_c(command.execute::()), + } + } + + /// Initializes tracing with the configured options. + /// + /// If file logging is enabled, this function returns a guard that must be kept alive to ensure + /// that all logs are flushed to disk. + pub fn init_tracing(&self) -> eyre::Result> { + let guard = self.logs.init_tracing()?; + Ok(guard) + } +} diff --git a/crates/optimism/cli/src/file_codec_ovm_receipt.rs b/crates/optimism/cli/src/receipt_file_codec.rs similarity index 99% rename from crates/optimism/cli/src/file_codec_ovm_receipt.rs rename to crates/optimism/cli/src/receipt_file_codec.rs index b2643b8400..c86bfa12a6 100644 --- a/crates/optimism/cli/src/file_codec_ovm_receipt.rs +++ b/crates/optimism/cli/src/receipt_file_codec.rs @@ -1,9 +1,10 @@ //! Codec for reading raw receipts from a file. +use alloy_primitives::B256; use alloy_rlp::{Decodable, RlpDecodable}; use reth_primitives::{ bytes::{Buf, BytesMut}, - Address, Bloom, Bytes, Log, Receipt, TxType, B256, + Address, Bloom, Bytes, Log, Receipt, TxType, }; use tokio_util::codec::Decoder; @@ -94,7 +95,8 @@ impl TryFrom for ReceiptWithBlockNumber { #[cfg(test)] pub(crate) mod test { - use reth_primitives::{alloy_primitives::LogData, hex}; + use alloy_primitives::hex; + use reth_primitives::alloy_primitives::LogData; use super::*; diff --git a/crates/optimism/consensus/Cargo.toml b/crates/optimism/consensus/Cargo.toml index bd538a167f..82a9e953ce 100644 --- a/crates/optimism/consensus/Cargo.toml +++ b/crates/optimism/consensus/Cargo.toml @@ -13,12 +13,22 @@ workspace = true [dependencies] # reth -reth-consensus-common.workspace = true reth-chainspec.workspace = true -reth-primitives.workspace = true +reth-consensus-common.workspace = true reth-consensus.workspace = true +reth-primitives.workspace = true +reth-trie-common.workspace = true + +# ethereum +alloy-primitives.workspace = true tracing.workspace = true +[dev-dependencies] +alloy-primitives.workspace = true +reth-optimism-chainspec.workspace = true + [features] optimism = ["reth-primitives/optimism"] + +opbnb = ["reth-primitives/opbnb"] diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs index a2a930f96d..5f382b3912 100644 --- a/crates/optimism/consensus/src/lib.rs +++ b/crates/optimism/consensus/src/lib.rs @@ -9,19 +9,21 @@ // The `optimism` feature must be enabled to use this crate. #![cfg(feature = "optimism")] +use alloy_primitives::U256; use reth_chainspec::{ChainSpec, EthereumHardforks, OptimismHardforks}; use reth_consensus::{Consensus, ConsensusError, PostExecutionInput}; use reth_consensus_common::validation::{ validate_against_parent_4844, validate_against_parent_eip1559_base_fee, - validate_against_parent_hash_number, validate_against_parent_timestamp, - validate_block_pre_execution, validate_header_base_fee, validate_header_extradata, - validate_header_gas, + validate_against_parent_hash_number, validate_against_parent_timestamp, validate_cancun_gas, + validate_header_base_fee, validate_header_extradata, validate_header_gas, + validate_shanghai_withdrawals, }; use reth_primitives::{ - BlockWithSenders, GotExpected, Header, SealedBlock, SealedHeader, EMPTY_OMMER_ROOT_HASH, U256, + BlockWithSenders, GotExpected, Header, SealedBlock, SealedHeader, EMPTY_OMMER_ROOT_HASH, }; use std::{sync::Arc, time::SystemTime}; +mod proof; mod validation; pub use validation::validate_block_post_execution; @@ -127,7 +129,29 @@ impl Consensus for OptimismBeaconConsensus { } fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), ConsensusError> { - validate_block_pre_execution(block, &self.chain_spec) + // Check ommers hash + let ommers_hash = reth_primitives::proofs::calculate_ommers_root(&block.ommers); + if block.header.ommers_hash != ommers_hash { + return Err(ConsensusError::BodyOmmersHashDiff( + GotExpected { got: ommers_hash, expected: block.header.ommers_hash }.into(), + )) + } + + // Check transaction root + if let Err(error) = block.ensure_transaction_root_valid() { + return Err(ConsensusError::BodyTransactionRootDiff(error.into())) + } + + // EIP-4895: Beacon chain push withdrawals as operations + if self.chain_spec.is_shanghai_active_at_timestamp(block.timestamp) { + validate_shanghai_withdrawals(block)?; + } + + if self.chain_spec.is_cancun_active_at_timestamp(block.timestamp) { + validate_cancun_gas(block)?; + } + + Ok(()) } fn validate_block_post_execution( diff --git a/crates/optimism/consensus/src/proof.rs b/crates/optimism/consensus/src/proof.rs new file mode 100644 index 0000000000..ad3f5f971b --- /dev/null +++ b/crates/optimism/consensus/src/proof.rs @@ -0,0 +1,322 @@ +//! Helper function for Receipt root calculation for Optimism hardforks. + +use alloy_primitives::B256; +use reth_chainspec::{ChainSpec, OptimismHardfork}; +use reth_primitives::ReceiptWithBloom; +use reth_trie_common::root::ordered_trie_root_with_encoder; + +/// Calculates the receipt root for a header. +pub(crate) fn calculate_receipt_root_optimism( + receipts: &[ReceiptWithBloom], + chain_spec: &ChainSpec, + timestamp: u64, +) -> B256 { + // There is a minor bug in op-geth and op-erigon where in the Regolith hardfork, + // the receipt root calculation does not include the deposit nonce in the receipt + // encoding. In the Regolith Hardfork, we must strip the deposit nonce from the + // receipts before calculating the receipt root. This was corrected in the Canyon + // hardfork. + if chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Regolith, timestamp) && + !chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Canyon, timestamp) + { + let receipts = receipts + .iter() + .cloned() + .map(|mut r| { + r.receipt.deposit_nonce = None; + r + }) + .collect::>(); + + return ordered_trie_root_with_encoder(receipts.as_slice(), |r, buf| { + r.encode_inner(buf, false) + }) + } + + ordered_trie_root_with_encoder(receipts, |r, buf| r.encode_inner(buf, false)) +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::{b256, bloom, hex, Address, Bloom, Bytes, Log, LogData}; + use reth_optimism_chainspec::BASE_SEPOLIA; + use reth_primitives::{Receipt, ReceiptWithBloom, TxType}; + + /// Tests that the receipt root is computed correctly for the regolith block. + /// This was implemented due to a minor bug in op-geth and op-erigon where in + /// the Regolith hardfork, the receipt root calculation does not include the + /// deposit nonce in the receipt encoding. + /// To fix this an op-reth patch was applied to the receipt root calculation + /// to strip the deposit nonce from each receipt before calculating the root. + #[test] + fn check_optimism_receipt_root() { + let cases = [ + // Deposit nonces didn't exist in Bedrock; No need to strip. For the purposes of this + // test, we do have them, so we should get the same root as Canyon. + ( + "bedrock", + 1679079599, + b256!("e255fed45eae7ede0556fe4fabc77b0d294d18781a5a581cab09127bc4cd9ffb"), + ), + // Deposit nonces introduced in Regolith. They weren't included in the receipt RLP, + // so we need to strip them - the receipt root will differ. + ( + "regolith", + 1679079600, + b256!("e255fed45eae7ede0556fe4fabc77b0d294d18781a5a581cab09127bc4cd9ffb"), + ), + // Receipt root hashing bug fixed in Canyon. Back to including the deposit nonce + // in the receipt RLP when computing the receipt root. + ( + "canyon", + 1699981200, + b256!("6eefbb5efb95235476654a8bfbf8cb64a4f5f0b0c80b700b0c5964550beee6d7"), + ), + ]; + + for case in cases { + let receipts = vec![ + // 0xb0d6ee650637911394396d81172bd1c637d568ed1fbddab0daddfca399c58b53 + ReceiptWithBloom { + receipt: Receipt { + tx_type: TxType::Deposit, + success: true, + cumulative_gas_used: 46913, + logs: vec![], + deposit_nonce: Some(4012991u64), + deposit_receipt_version: None, + }, + bloom: Bloom(hex!("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").into()), + }, + // 0x2f433586bae30573c393adfa02bc81d2a1888a3d6c9869f473fb57245166bd9a + ReceiptWithBloom { + receipt: Receipt { + tx_type: TxType::Eip1559, + success: true, + cumulative_gas_used: 118083, + logs: vec![ + Log { + address: hex!("ddb6dcce6b794415145eb5caa6cd335aeda9c272").into(), + data: LogData::new_unchecked( + vec![ + b256!("c3d58168c5ae7397731d063d5bbf3d657854427343f4c083240f7aacaa2d0f62"), + b256!("000000000000000000000000c498902843af527e674846bb7edefa8ad62b8fb9"), + b256!("000000000000000000000000c498902843af527e674846bb7edefa8ad62b8fb9"), + b256!("0000000000000000000000000000000000000000000000000000000000000000"), + ], + Bytes::from_static(&hex!("00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001")) + ) + }, + Log { + address: hex!("ddb6dcce6b794415145eb5caa6cd335aeda9c272").into(), + data: LogData::new_unchecked( + vec![ + b256!("c3d58168c5ae7397731d063d5bbf3d657854427343f4c083240f7aacaa2d0f62"), + b256!("000000000000000000000000c498902843af527e674846bb7edefa8ad62b8fb9"), + b256!("0000000000000000000000000000000000000000000000000000000000000000"), + b256!("000000000000000000000000c498902843af527e674846bb7edefa8ad62b8fb9"), + ], + Bytes::from_static(&hex!("00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000001")) + ) + }, + Log { + address: hex!("ddb6dcce6b794415145eb5caa6cd335aeda9c272").into(), + data: LogData::new_unchecked( + vec![ + b256!("0eb774bb9698a73583fe07b6972cf2dcc08d1d97581a22861f45feb86b395820"), + b256!("000000000000000000000000c498902843af527e674846bb7edefa8ad62b8fb9"), + b256!("000000000000000000000000c498902843af527e674846bb7edefa8ad62b8fb9"), + ], Bytes::from_static(&hex!("0000000000000000000000000000000000000000000000000000000000000003"))) + }, + ], + deposit_nonce: None, + deposit_receipt_version: None, + }, + bloom: Bloom(hex!("00001000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000800000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000040000000000004000000000080000000000000000000000000000000000000000000000000000008000000000000080020000000000000000000000000002000000000000000000000000000080000010000").into()), + }, + // 0x6c33676e8f6077f46a62eabab70bc6d1b1b18a624b0739086d77093a1ecf8266 + ReceiptWithBloom { + receipt: Receipt { + tx_type: TxType::Eip1559, + success: true, + cumulative_gas_used: 189253, + logs: vec![ + Log { + address: hex!("ddb6dcce6b794415145eb5caa6cd335aeda9c272").into(), + data: LogData::new_unchecked(vec![ + b256!("c3d58168c5ae7397731d063d5bbf3d657854427343f4c083240f7aacaa2d0f62"), + b256!("0000000000000000000000009d521a04bee134ff8136d2ec957e5bc8c50394ec"), + b256!("0000000000000000000000009d521a04bee134ff8136d2ec957e5bc8c50394ec"), + b256!("0000000000000000000000000000000000000000000000000000000000000000"), + ], + Bytes::from_static(&hex!("00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001"))) + }, + Log { + address: hex!("ddb6dcce6b794415145eb5caa6cd335aeda9c272").into(), + data: LogData::new_unchecked(vec![ + b256!("c3d58168c5ae7397731d063d5bbf3d657854427343f4c083240f7aacaa2d0f62"), + b256!("0000000000000000000000009d521a04bee134ff8136d2ec957e5bc8c50394ec"), + b256!("0000000000000000000000000000000000000000000000000000000000000000"), + b256!("0000000000000000000000009d521a04bee134ff8136d2ec957e5bc8c50394ec"), + ], + Bytes::from_static(&hex!("00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000001"))) + }, + Log { + address: hex!("ddb6dcce6b794415145eb5caa6cd335aeda9c272").into(), + data: LogData::new_unchecked(vec![ + b256!("0eb774bb9698a73583fe07b6972cf2dcc08d1d97581a22861f45feb86b395820"), + b256!("0000000000000000000000009d521a04bee134ff8136d2ec957e5bc8c50394ec"), + b256!("0000000000000000000000009d521a04bee134ff8136d2ec957e5bc8c50394ec"), + ], + Bytes::from_static(&hex!("0000000000000000000000000000000000000000000000000000000000000003"))) + }, + ], + deposit_nonce: None, + deposit_receipt_version: None, + }, + bloom: Bloom(hex!("00000000000000000000200000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000002000000000020000000000000000000000000000000000000000000000000000000000000000020000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000040000000000004000000000080000000000000000000000000000000000000000000000000000008000000000000080020000000000000000000000000002000000000000000000000000000080000000000").into()), + }, + // 0x4d3ecbef04ba7ce7f5ab55be0c61978ca97c117d7da448ed9771d4ff0c720a3f + ReceiptWithBloom { + receipt: Receipt { + tx_type: TxType::Eip1559, + success: true, + cumulative_gas_used: 346969, + logs: vec![ + Log { + address: hex!("4200000000000000000000000000000000000006").into(), + data: LogData::new_unchecked( vec![ + b256!("ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef"), + b256!("000000000000000000000000c3feb4ef4c2a5af77add15c95bd98f6b43640cc8"), + b256!("0000000000000000000000002992607c1614484fe6d865088e5c048f0650afd4"), + ], + Bytes::from_static(&hex!("0000000000000000000000000000000000000000000000000018de76816d8000"))) + }, + Log { + address: hex!("cf8e7e6b26f407dee615fc4db18bf829e7aa8c09").into(), + data: LogData::new_unchecked( vec![ + b256!("ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef"), + b256!("0000000000000000000000002992607c1614484fe6d865088e5c048f0650afd4"), + b256!("0000000000000000000000008dbffe4c8bf3caf5deae3a99b50cfcf3648cbc09"), + ], + Bytes::from_static(&hex!("000000000000000000000000000000000000000000000002d24d8e9ac1aa79e2"))) + }, + Log { + address: hex!("2992607c1614484fe6d865088e5c048f0650afd4").into(), + data: LogData::new_unchecked( vec![ + b256!("1c411e9a96e071241c2f21f7726b17ae89e3cab4c78be50e062b03a9fffbbad1"), + ], + Bytes::from_static(&hex!("000000000000000000000000000000000000000000000009bd50642785c15736000000000000000000000000000000000000000000011bb7ac324f724a29bbbf"))) + }, + Log { + address: hex!("2992607c1614484fe6d865088e5c048f0650afd4").into(), + data: LogData::new_unchecked( vec![ + b256!("d78ad95fa46c994b6551d0da85fc275fe613ce37657fb8d5e3d130840159d822"), + b256!("00000000000000000000000029843613c7211d014f5dd5718cf32bcd314914cb"), + b256!("0000000000000000000000008dbffe4c8bf3caf5deae3a99b50cfcf3648cbc09"), + ], + Bytes::from_static(&hex!("0000000000000000000000000000000000000000000000000018de76816d800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002d24d8e9ac1aa79e2"))) + }, + Log { + address: hex!("6d0f8d488b669aa9ba2d0f0b7b75a88bf5051cd3").into(), + data: LogData::new_unchecked( vec![ + b256!("ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef"), + b256!("0000000000000000000000008dbffe4c8bf3caf5deae3a99b50cfcf3648cbc09"), + b256!("000000000000000000000000c3feb4ef4c2a5af77add15c95bd98f6b43640cc8"), + ], + Bytes::from_static(&hex!("00000000000000000000000000000000000000000000000014bc73062aea8093"))) + }, + Log { + address: hex!("8dbffe4c8bf3caf5deae3a99b50cfcf3648cbc09").into(), + data: LogData::new_unchecked( vec![ + b256!("1c411e9a96e071241c2f21f7726b17ae89e3cab4c78be50e062b03a9fffbbad1"), + ], + Bytes::from_static(&hex!("00000000000000000000000000000000000000000000002f122cfadc1ca82a35000000000000000000000000000000000000000000000665879dc0609945d6d1"))) + }, + Log { + address: hex!("8dbffe4c8bf3caf5deae3a99b50cfcf3648cbc09").into(), + data: LogData::new_unchecked( vec![ + b256!("d78ad95fa46c994b6551d0da85fc275fe613ce37657fb8d5e3d130840159d822"), + b256!("00000000000000000000000029843613c7211d014f5dd5718cf32bcd314914cb"), + b256!("000000000000000000000000c3feb4ef4c2a5af77add15c95bd98f6b43640cc8"), + ], + Bytes::from_static(&hex!("0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002d24d8e9ac1aa79e200000000000000000000000000000000000000000000000014bc73062aea80930000000000000000000000000000000000000000000000000000000000000000"))) + }, + ], + deposit_nonce: None, + deposit_receipt_version: None, + }, + bloom: Bloom(hex!("00200000000000000000000080000000000000000000000000040000100004000000000000000000000000100000000000000000000000000000100000000000000000000000000002000008000000200000000200000000020000000000000040000000000000000400000200000000000000000000000000000010000000000400000000010400000000000000000000000000002000c80000004080002000000000000000400200000000800000000000000000000000000000000000000000000002000000000000000000000000000000000100001000000000000000000000002000000000000000000000010000000000000000000000800000800000").into()), + }, + // 0xf738af5eb00ba23dbc1be2dbce41dbc0180f0085b7fb46646e90bf737af90351 + ReceiptWithBloom { + receipt: Receipt { + tx_type: TxType::Eip1559, + success: true, + cumulative_gas_used: 623249, + logs: vec![ + Log { + address: hex!("ac6564f3718837caadd42eed742d75c12b90a052").into(), + data: LogData::new_unchecked( vec![ + b256!("ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef"), + b256!("0000000000000000000000000000000000000000000000000000000000000000"), + b256!("000000000000000000000000a4fa7f3fbf0677f254ebdb1646146864c305b76e"), + b256!("000000000000000000000000000000000000000000000000000000000011a1d3"), + ], + Default::default()) + }, + Log { + address: hex!("ac6564f3718837caadd42eed742d75c12b90a052").into(), + data: LogData::new_unchecked( vec![ + b256!("9d89e36eadf856db0ad9ffb5a569e07f95634dddd9501141ecf04820484ad0dc"), + b256!("000000000000000000000000a4fa7f3fbf0677f254ebdb1646146864c305b76e"), + b256!("000000000000000000000000000000000000000000000000000000000011a1d3"), + ], + Bytes::from_static(&hex!("00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000037697066733a2f2f516d515141646b33736538396b47716577395256567a316b68643548375562476d4d4a485a62566f386a6d346f4a2f30000000000000000000"))) + }, + Log { + address: hex!("ac6564f3718837caadd42eed742d75c12b90a052").into(), + data: LogData::new_unchecked( vec![ + b256!("110d160a1bedeea919a88fbc4b2a9fb61b7e664084391b6ca2740db66fef80fe"), + b256!("00000000000000000000000084d47f6eea8f8d87910448325519d1bb45c2972a"), + b256!("000000000000000000000000a4fa7f3fbf0677f254ebdb1646146864c305b76e"), + b256!("000000000000000000000000000000000000000000000000000000000011a1d3"), + ], + Bytes::from_static(&hex!("0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000a4fa7f3fbf0677f254ebdb1646146864c305b76e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007717500762343034303661353035646234633961386163316433306335633332303265370000000000000000000000000000000000000000000000000000000000000037697066733a2f2f516d515141646b33736538396b47716577395256567a316b68643548375562476d4d4a485a62566f386a6d346f4a2f30000000000000000000"))) + }, + ], + deposit_nonce: None, + deposit_receipt_version: None, + }, + bloom: Bloom(hex!("00000000000000000000000000000000400000000000000000000000000000000000004000000000000001000000000000000002000000000100000000000000000000000000000000000008000000000000000000000000000000000000000004000000020000000000000000000800000000000000000000000010200100200008000002000000000000000000800000000000000000000002000000000000000000000000000000080000000000000000000000004000000000000000000000000002000000000000000000000000000000000000200000000000000020002000000000000000002000000000000000000000000000000000000000000000").into()), + }, + ]; + let root = calculate_receipt_root_optimism(&receipts, BASE_SEPOLIA.as_ref(), case.1); + assert_eq!(root, case.2); + } + } + + #[test] + fn check_receipt_root_optimism() { + let logs = vec![Log { + address: Address::ZERO, + data: LogData::new_unchecked(vec![], Default::default()), + }]; + let bloom = bloom!("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"); + let receipt = ReceiptWithBloom { + receipt: Receipt { + tx_type: TxType::Eip2930, + success: true, + cumulative_gas_used: 102068, + logs, + deposit_nonce: None, + deposit_receipt_version: None, + }, + bloom, + }; + let receipt = vec![receipt]; + let root = calculate_receipt_root_optimism(&receipt, BASE_SEPOLIA.as_ref(), 0); + assert_eq!(root, b256!("fe70ae4a136d98944951b2123859698d59ad251a381abc9960fa81cae3d0d4a0")); + } +} diff --git a/crates/optimism/consensus/src/validation.rs b/crates/optimism/consensus/src/validation.rs index d7bb7681c5..3a76ec1385 100644 --- a/crates/optimism/consensus/src/validation.rs +++ b/crates/optimism/consensus/src/validation.rs @@ -1,9 +1,8 @@ +use crate::proof::calculate_receipt_root_optimism; +use alloy_primitives::{Bloom, B256}; use reth_chainspec::{ChainSpec, EthereumHardforks}; use reth_consensus::ConsensusError; -use reth_primitives::{ - gas_spent_by_transactions, proofs::calculate_receipt_root_optimism, BlockWithSenders, Bloom, - GotExpected, Receipt, B256, -}; +use reth_primitives::{gas_spent_by_transactions, BlockWithSenders, GotExpected, Receipt}; /// Validate a block with regard to execution results: /// diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index 4436126bf2..98fa146375 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -21,8 +21,12 @@ reth-execution-errors.workspace = true reth-execution-types.workspace = true reth-prune-types.workspace = true +# ethereum +alloy-primitives.workspace = true + # Optimism reth-optimism-consensus.workspace = true +reth-optimism-chainspec.workspace = true # revm revm.workspace = true diff --git a/crates/optimism/evm/src/config.rs b/crates/optimism/evm/src/config.rs index ac15ad78f9..e604e58b06 100644 --- a/crates/optimism/evm/src/config.rs +++ b/crates/optimism/evm/src/config.rs @@ -1,9 +1,11 @@ use reth_chainspec::{ChainSpec, OptimismHardfork}; use reth_ethereum_forks::{EthereumHardfork, Head}; -/// Returns the spec id at the given timestamp. +/// Returns the revm [`SpecId`](revm_primitives::SpecId) at the given timestamp. /// -/// Note: This is only intended to be used after the merge, when hardforks are activated by +/// # Note +/// +/// This is only intended to be used after the Bedrock, when hardforks are activated by /// timestamp. pub fn revm_spec_by_timestamp_after_bedrock( chain_spec: &ChainSpec, @@ -30,7 +32,7 @@ pub fn revm_spec_by_timestamp_after_bedrock( } } -/// return `revm_spec` from spec configuration. +/// Map the latest active hardfork at the given block to a revm [`SpecId`](revm_primitives::SpecId). pub fn revm_spec(chain_spec: &ChainSpec, block: &Head) -> revm_primitives::SpecId { if chain_spec.fork(OptimismHardfork::Granite).active_at_head(block) { revm_primitives::GRANITE diff --git a/crates/optimism/evm/src/error.rs b/crates/optimism/evm/src/error.rs index 57958b5ec5..c5c6a0a4a3 100644 --- a/crates/optimism/evm/src/error.rs +++ b/crates/optimism/evm/src/error.rs @@ -19,7 +19,7 @@ pub enum OptimismBlockExecutionError { BlobTransactionRejected, /// Thrown when a database account could not be loaded. #[error("failed to load account {0}")] - AccountLoadFailed(reth_primitives::Address), + AccountLoadFailed(alloy_primitives::Address), } impl From for BlockExecutionError { diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index 0ea21cc84b..bda2dd2e15 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -1,8 +1,9 @@ //! Optimism block executor. -use std::{collections::HashMap, str::FromStr, sync::Arc}; - -use crate::{l1::ensure_create2_deployer, OptimismBlockExecutionError, OptimismEvmConfig}; +use crate::{ + l1::ensure_create2_deployer, OpChainSpec, OptimismBlockExecutionError, OptimismEvmConfig, +}; +use alloy_primitives::{BlockNumber, U256}; use reth_chainspec::{ChainSpec, EthereumHardforks, OptimismHardfork}; use reth_evm::{ execute::{ @@ -14,23 +15,31 @@ use reth_evm::{ }; use reth_execution_types::ExecutionOutcome; use reth_optimism_consensus::validate_block_post_execution; -use reth_primitives::{ - Address, BlockNumber, BlockWithSenders, Header, Receipt, Receipts, TxType, U256, -}; +use reth_primitives::{Address, BlockWithSenders, Header, Receipt, Receipts, TxType}; use reth_prune_types::PruneModes; use reth_revm::{ - batch::BlockBatchRecord, db::states::bundle_state::BundleRetention, - state_change::post_block_balance_increments, Evm, State, + batch::BlockBatchRecord, + db::{ + states::{bundle_state::BundleRetention, StorageSlot}, + BundleAccount, + }, + state_change::post_block_balance_increments, + Evm, State, }; -use revm::db::states::StorageSlot; use revm_primitives::{ db::{Database, DatabaseCommit}, - BlockEnv, CfgEnvWithHandlerCfg, EVMError, EnvWithHandlerCfg, EvmState, ResultAndState, + BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, EvmState, ResultAndState, +}; +use std::{ + collections::{hash_map::Entry, HashMap}, + fmt::Display, + str::FromStr, + sync::Arc, }; use tokio::sync::mpsc::UnboundedSender; use tracing::{debug, trace}; -/// Provides executors to execute regular ethereum blocks +/// Provides executors to execute regular optimism blocks #[derive(Debug, Clone)] pub struct OpExecutorProvider { chain_spec: Arc, @@ -40,7 +49,10 @@ pub struct OpExecutorProvider { impl OpExecutorProvider { /// Creates a new default optimism executor provider. pub fn optimism(chain_spec: Arc) -> Self { - Self::new(chain_spec, Default::default()) + Self::new( + chain_spec.clone(), + OptimismEvmConfig::new(Arc::new(OpChainSpec { inner: (*chain_spec).clone() })), + ) } } @@ -53,7 +65,7 @@ impl OpExecutorProvider { impl OpExecutorProvider where - EvmConfig: ConfigureEvm, + EvmConfig: ConfigureEvm

, { fn op_executor( &self, @@ -61,7 +73,7 @@ where prefetch_tx: Option>, ) -> OpBlockExecutor where - DB: Database + std::fmt::Display>, + DB: Database + Display>, { if let Some(tx) = prefetch_tx { OpBlockExecutor::new_with_prefetch_tx( @@ -90,12 +102,12 @@ where impl BlockExecutorProvider for OpExecutorProvider where - EvmConfig: ConfigureEvm, + EvmConfig: ConfigureEvm
, { - type Executor + std::fmt::Display>> = + type Executor + Display>> = OpBlockExecutor; - type BatchExecutor + std::fmt::Display>> = + type BatchExecutor + Display>> = OpBatchExecutor; fn executor( &self, @@ -103,14 +115,14 @@ where prefetch_tx: Option>, ) -> Self::Executor where - DB: Database + std::fmt::Display>, + DB: Database + Display>, { self.op_executor(db, prefetch_tx) } fn batch_executor(&self, db: DB) -> Self::BatchExecutor where - DB: Database + std::fmt::Display>, + DB: Database + Display>, { let executor = self.op_executor(db, None); OpBatchExecutor { executor, batch_record: BlockBatchRecord::default() } @@ -119,7 +131,7 @@ where /// Helper container type for EVM with chain spec. #[derive(Debug, Clone)] -struct OpEvmExecutor { +pub struct OpEvmExecutor { /// The chainspec chain_spec: Arc, /// How to create an EVM. @@ -128,7 +140,7 @@ struct OpEvmExecutor { impl OpEvmExecutor where - EvmConfig: ConfigureEvm, + EvmConfig: ConfigureEvm
, { /// Executes the transactions in the block and returns the receipts. /// @@ -144,7 +156,7 @@ where tx: Option>, ) -> Result<(Vec, u64), BlockExecutionError> where - DB: Database + std::fmt::Display>, + DB: Database + Display>, { // apply pre execution changes apply_beacon_root_contract_call( @@ -206,13 +218,7 @@ where // Execute transaction. let ResultAndState { result, state } = evm.transact().map_err(move |err| { - let new_err = match err { - EVMError::Transaction(e) => EVMError::Transaction(e), - EVMError::Header(e) => EVMError::Header(e), - EVMError::Database(e) => EVMError::Database(e.into()), - EVMError::Custom(e) => EVMError::Custom(e), - EVMError::Precompile(e) => EVMError::Precompile(e), - }; + let new_err = err.map_db_err(|e| e.into()); // Ensure hash is calculated for error log, if not already done BlockValidationError::EVM { hash: transaction.recalculate_hash(), @@ -261,7 +267,7 @@ where } } -/// A basic Ethereum block executor. +/// A basic Optimism block executor. /// /// Expected usage: /// - Create a new instance of the executor. @@ -292,22 +298,22 @@ impl OpBlockExecutor { Self { executor: OpEvmExecutor { chain_spec, evm_config }, state, prefetch_tx: Some(tx) } } + /// Returns the chain spec. #[inline] - fn chain_spec(&self) -> &ChainSpec { + pub fn chain_spec(&self) -> &ChainSpec { &self.executor.chain_spec } /// Returns mutable reference to the state that wraps the underlying database. - #[allow(unused)] - fn state_mut(&mut self) -> &mut State { + pub fn state_mut(&mut self) -> &mut State { &mut self.state } } impl OpBlockExecutor where - EvmConfig: ConfigureEvm, - DB: Database + std::fmt::Display>, + EvmConfig: ConfigureEvm
, + DB: Database + Display>, { /// Configures a new evm configuration and block environment for the given block. /// @@ -318,7 +324,6 @@ where self.executor.evm_config.fill_cfg_and_block_env( &mut cfg, &mut block_env, - self.chain_spec(), header, total_difficulty, ); @@ -424,8 +429,8 @@ where impl Executor for OpBlockExecutor where - EvmConfig: ConfigureEvm, - DB: Database + std::fmt::Display>, + EvmConfig: ConfigureEvm
, + DB: Database + Display>, { type Input<'a> = BlockExecutionInput<'a, BlockWithSenders, Header>; type Output = BlockExecutionOutput; @@ -455,6 +460,94 @@ where } } +/// An executor that retains all cache state from execution in its bundle state. +#[derive(Debug)] +pub struct OpBlockAccessListExecutor { + /// The executor used to execute single blocks + /// + /// All state changes are committed to the [State]. + executor: OpBlockExecutor, +} + +impl Executor for OpBlockAccessListExecutor +where + EvmConfig: ConfigureEvm
, + DB: Database + Display>, +{ + type Input<'a> = BlockExecutionInput<'a, BlockWithSenders, Header>; + type Output = BlockExecutionOutput; + type Error = BlockExecutionError; + + /// Executes the block and commits the changes to the internal state. + /// + /// Returns the receipts of the transactions in the block. + /// + /// This also returns the accounts from the internal state cache in the bundle state, allowing + /// access to not only the state that changed during execution, but also the state accessed + /// during execution. + /// + /// Returns an error if the block could not be executed or failed verification. + fn execute(mut self, input: Self::Input<'_>) -> Result { + let BlockExecutionInput { block, total_difficulty, .. } = input; + let (receipts, gas_used) = + self.executor.execute_without_verification(block, total_difficulty)?; + + // NOTE: we need to merge keep the reverts for the bundle retention + self.executor.state.merge_transitions(BundleRetention::Reverts); + + // now, ensure each account from the state is included in the bundle state + let mut bundle_state = self.executor.state.take_bundle(); + for (address, account) in self.executor.state.cache.accounts { + // convert all slots, insert all slots + let account_info = account.account_info(); + let account_storage = account.account.map(|a| a.storage).unwrap_or_default(); + + match bundle_state.state.entry(address) { + Entry::Vacant(entry) => { + // we have to add the entire account here + let extracted_storage = account_storage + .into_iter() + .map(|(k, v)| { + (k, StorageSlot { previous_or_original_value: v, present_value: v }) + }) + .collect(); + + let bundle_account = BundleAccount { + info: account_info.clone(), + original_info: account_info, + storage: extracted_storage, + status: account.status, + }; + entry.insert(bundle_account); + } + Entry::Occupied(mut entry) => { + // only add slots that are unchanged + let current_account = entry.get_mut(); + + // iterate over all storage slots, checking keys that are not in the bundle + // state + for (k, v) in account_storage { + if let Entry::Vacant(storage_entry) = current_account.storage.entry(k) { + storage_entry.insert(StorageSlot { + previous_or_original_value: v, + present_value: v, + }); + } + } + } + } + } + + Ok(BlockExecutionOutput { + state: bundle_state, + receipts, + requests: vec![], + gas_used, + snapshot: None, + }) + } +} + /// An executor for a batch of blocks. /// /// State changes are tracked until the executor is finalized. @@ -473,16 +566,15 @@ impl OpBatchExecutor { } /// Returns mutable reference to the state that wraps the underlying database. - #[allow(unused)] - fn state_mut(&mut self) -> &mut State { + pub fn state_mut(&mut self) -> &mut State { self.executor.state_mut() } } impl BatchExecutor for OpBatchExecutor where - EvmConfig: ConfigureEvm, - DB: Database + std::fmt::Display>, + EvmConfig: ConfigureEvm
, + DB: Database + Display>, { type Input<'a> = BlockExecutionInput<'a, BlockWithSenders, Header>; type Output = ExecutionOutcome; @@ -534,10 +626,11 @@ where #[cfg(test)] mod tests { use super::*; - use reth_chainspec::ChainSpecBuilder; + use crate::OpChainSpec; + use alloy_primitives::{b256, Address, StorageKey, StorageValue}; + use reth_chainspec::{ChainSpecBuilder, MIN_TRANSACTION_GAS}; use reth_primitives::{ - b256, Account, Address, Block, Signature, StorageKey, StorageValue, Transaction, - TransactionSigned, TxEip1559, BASE_MAINNET, + Account, Block, Signature, Transaction, TransactionSigned, TxEip1559, BASE_MAINNET, }; use reth_revm::{ database::StateProviderDatabase, test_utils::StateProviderTest, L1_BLOCK_CONTRACT, @@ -572,7 +665,12 @@ mod tests { } fn executor_provider(chain_spec: Arc) -> OpExecutorProvider { - OpExecutorProvider { chain_spec, evm_config: Default::default() } + OpExecutorProvider { + evm_config: OptimismEvmConfig::new(Arc::new(OpChainSpec { + inner: (*chain_spec).clone(), + })), + chain_spec, + } } #[test] @@ -604,7 +702,7 @@ mod tests { Transaction::Eip1559(TxEip1559 { chain_id: chain_spec.chain.id(), nonce: 0, - gas_limit: 21_000, + gas_limit: MIN_TRANSACTION_GAS as u128, to: addr.into(), ..Default::default() }), @@ -615,7 +713,7 @@ mod tests { Transaction::Deposit(reth_primitives::TxDeposit { from: addr, to: addr.into(), - gas_limit: 21_000, + gas_limit: MIN_TRANSACTION_GAS as u128, ..Default::default() }), Signature::default(), @@ -690,7 +788,7 @@ mod tests { Transaction::Eip1559(TxEip1559 { chain_id: chain_spec.chain.id(), nonce: 0, - gas_limit: 21_000, + gas_limit: MIN_TRANSACTION_GAS as u128, to: addr.into(), ..Default::default() }), @@ -701,7 +799,7 @@ mod tests { Transaction::Deposit(reth_primitives::TxDeposit { from: addr, to: addr.into(), - gas_limit: 21_000, + gas_limit: MIN_TRANSACTION_GAS as u128, ..Default::default() }), Signature::optimism_deposit_tx_signature(), diff --git a/crates/optimism/evm/src/l1.rs b/crates/optimism/evm/src/l1.rs index ecd847edaa..07d03f8a9a 100644 --- a/crates/optimism/evm/src/l1.rs +++ b/crates/optimism/evm/src/l1.rs @@ -1,9 +1,10 @@ //! Optimism-specific implementation and utilities for the executor use crate::OptimismBlockExecutionError; +use alloy_primitives::{address, b256, hex, Address, Bytes, B256, U256}; use reth_chainspec::{ChainSpec, OptimismHardfork}; use reth_execution_errors::BlockExecutionError; -use reth_primitives::{address, b256, hex, Address, Block, Bytes, B256, U256}; +use reth_primitives::Block; use revm::{ primitives::{Bytecode, HashMap, SpecId}, DatabaseCommit, L1BlockInfo, @@ -43,13 +44,20 @@ pub fn extract_l1_info(block: &Block) -> Result Result { // If the first 4 bytes of the calldata are the L1BlockInfoEcotone selector, then we parse the // calldata as an Ecotone hardfork L1BlockInfo transaction. Otherwise, we parse it as a // Bedrock hardfork L1BlockInfo transaction. - if l1_info_tx_data[0..4] == L1_BLOCK_ECOTONE_SELECTOR { - parse_l1_info_tx_ecotone(l1_info_tx_data[4..].as_ref()) + if input[0..4] == L1_BLOCK_ECOTONE_SELECTOR { + parse_l1_info_tx_ecotone(input[4..].as_ref()) } else { - parse_l1_info_tx_bedrock(l1_info_tx_data[4..].as_ref()) + parse_l1_info_tx_bedrock(input[4..].as_ref()) } } @@ -95,21 +103,20 @@ pub fn parse_l1_info_tx_bedrock(data: &[u8]) -> Result pub fn parse_l1_info_tx_ecotone(data: &[u8]) -> Result { if data.len() != 160 { return Err(OptimismBlockExecutionError::L1BlockInfoError { @@ -117,14 +124,29 @@ pub fn parse_l1_info_tx_ecotone(data: &[u8]) -> Result + // 4 uint32 _basefeeScalar (start offset in this scope) + // 8 uint32 _blobBaseFeeScalar + // 12 uint64 _sequenceNumber, + // 20 uint64 _timestamp, + // 28 uint64 _l1BlockNumber + // 36 uint256 _basefee, + // 68 uint256 _blobBaseFee, + // 100 bytes32 _hash, + // 132 bytes32 _batcherHash, + + let l1_base_fee_scalar = U256::try_from_be_slice(&data[..4]).ok_or_else(|| { OptimismBlockExecutionError::L1BlockInfoError { - message: "could not convert l1 blob base fee scalar".to_string(), + message: "could not convert l1 base fee scalar".to_string(), } })?; - let l1_base_fee_scalar = U256::try_from_be_slice(&data[12..16]).ok_or_else(|| { + let l1_blob_base_fee_scalar = U256::try_from_be_slice(&data[4..8]).ok_or_else(|| { OptimismBlockExecutionError::L1BlockInfoError { - message: "could not convert l1 base fee scalar".to_string(), + message: "could not convert l1 blob base fee scalar".to_string(), } })?; let l1_base_fee = U256::try_from_be_slice(&data[32..64]).ok_or_else(|| { @@ -274,11 +296,16 @@ where #[cfg(test)] mod tests { + use reth_chainspec::OptimismHardforks; + use reth_optimism_chainspec::OP_MAINNET; + use reth_primitives::TransactionSigned; + use super::*; #[test] fn sanity_l1_block() { - use reth_primitives::{hex_literal::hex, Bytes, Header, TransactionSigned}; + use alloy_primitives::{hex_literal::hex, Bytes}; + use reth_primitives::{Header, TransactionSigned}; let bytes = Bytes::from_static(&hex!("7ef9015aa044bae9d41b8380d781187b426c6fe43df5fb2fb57bd4466ef6a701e1f01e015694deaddeaddeaddeaddeaddeaddeaddeaddead000194420000000000000000000000000000000000001580808408f0d18001b90104015d8eb900000000000000000000000000000000000000000000000000000000008057650000000000000000000000000000000000000000000000000000000063d96d10000000000000000000000000000000000000000000000000000000000009f35273d89754a1e0387b89520d989d3be9c37c1f32495a88faf1ea05c61121ab0d1900000000000000000000000000000000000000000000000000000000000000010000000000000000000000002d679b567db6187c0c8323fa982cfb88b74dbcc7000000000000000000000000000000000000000000000000000000000000083400000000000000000000000000000000000000000000000000000000000f4240")); let l1_info_tx = TransactionSigned::decode_enveloped(&mut bytes.as_ref()).unwrap(); @@ -301,24 +328,66 @@ mod tests { #[test] fn sanity_l1_block_ecotone() { - use reth_primitives::{hex_literal::hex, Bytes, Header, TransactionSigned}; - - let bytes = Bytes::from_static(&hex!("7ef8f8a0b84fa363879a2159e341c50a32da3ea0d21765b7bd43db37f2e5e04e8848b1ee94deaddeaddeaddeaddeaddeaddeaddeaddead00019442000000000000000000000000000000000000158080830f424080b8a4440a5e20000f42400000000000000000000000040000000065c41f680000000000a03f6b00000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000535f4d983dea59eac60478a64ecfdcde8571e611404295350de7ed4ccb404296c1a84ab7a00000000000000000000000073b4168cc87f35cc239200a20eb841cded23493b")); - let l1_info_tx = TransactionSigned::decode_enveloped(&mut bytes.as_ref()).unwrap(); - let mock_block = Block { - header: Header::default(), - body: vec![l1_info_tx], - ommers: Vec::default(), - withdrawals: None, - sidecars: None, - requests: None, - }; + // rig + + // OP mainnet ecotone block 118024092 + // + const TIMESTAMP: u64 = 1711603765; + assert!(OP_MAINNET.is_ecotone_active_at_timestamp(TIMESTAMP)); + + // First transaction in OP mainnet block 118024092 + // + // https://optimistic.etherscan.io/getRawTx?tx=0x88501da5d5ca990347c2193be90a07037af1e3820bb40774c8154871c7669150 + const TX: [u8; 251] = hex!("7ef8f8a0a539eb753df3b13b7e386e147d45822b67cb908c9ddc5618e3dbaa22ed00850b94deaddeaddeaddeaddeaddeaddeaddeaddead00019442000000000000000000000000000000000000158080830f424080b8a4440a5e2000000558000c5fc50000000000000000000000006605a89f00000000012a10d90000000000000000000000000000000000000000000000000000000af39ac3270000000000000000000000000000000000000000000000000000000d5ea528d24e582fa68786f080069bdbfe06a43f8e67bfd31b8e4d8a8837ba41da9a82a54a0000000000000000000000006887246668a3b87f54deb3b94ba47a6f63f32985"); + + let tx = TransactionSigned::decode_enveloped(&mut TX.as_slice()).unwrap(); + let block = Block { body: vec![tx], ..Default::default() }; + + // expected l1 block info + let expected_l1_base_fee = U256::from_be_bytes(hex!( + "0000000000000000000000000000000000000000000000000000000af39ac327" // 47036678951 + )); + let expected_l1_base_fee_scalar = U256::from(1368); + let expected_l1_blob_base_fee = U256::from_be_bytes(hex!( + "0000000000000000000000000000000000000000000000000000000d5ea528d2" // 57422457042 + )); + let expecte_l1_blob_base_fee_scalar = U256::from(810949); + + // test + + let l1_block_info: L1BlockInfo = extract_l1_info(&block).unwrap(); + + assert_eq!(l1_block_info.l1_base_fee, expected_l1_base_fee); + assert_eq!(l1_block_info.l1_base_fee_scalar, expected_l1_base_fee_scalar); + assert_eq!(l1_block_info.l1_blob_base_fee, Some(expected_l1_blob_base_fee)); + assert_eq!(l1_block_info.l1_blob_base_fee_scalar, Some(expecte_l1_blob_base_fee_scalar)); + } - let l1_info: L1BlockInfo = extract_l1_info(&mock_block).unwrap(); - assert_eq!(l1_info.l1_base_fee, U256::from(8)); - assert_eq!(l1_info.l1_base_fee_scalar, U256::from(4)); - assert_eq!(l1_info.l1_blob_base_fee, Some(U256::from(22_380_075_395u64))); - assert_eq!(l1_info.l1_blob_base_fee_scalar, Some(U256::from(0))); - assert_eq!(l1_info.l1_fee_overhead, None); + #[test] + fn parse_l1_info_fjord() { + // rig + + // L1 block info for OP mainnet block 124665056 (stored in input of tx at index 0) + // + // https://optimistic.etherscan.io/tx/0x312e290cf36df704a2217b015d6455396830b0ce678b860ebfcc30f41403d7b1 + const DATA: &[u8] = &hex!("440a5e200000146b000f79c500000000000000040000000066d052e700000000013ad8a3000000000000000000000000000000000000000000000000000000003ef1278700000000000000000000000000000000000000000000000000000000000000012fdf87b89884a61e74b322bbcf60386f543bfae7827725efaaf0ab1de2294a590000000000000000000000006887246668a3b87f54deb3b94ba47a6f63f32985"); + + // expected l1 block info verified against expected l1 fee for tx. l1 tx fee listed on OP + // mainnet block scanner + // + // https://github.com/bluealloy/revm/blob/fa5650ee8a4d802f4f3557014dd157adfb074460/crates/revm/src/optimism/l1block.rs#L414-L443 + let l1_base_fee = U256::from(1055991687); + let l1_base_fee_scalar = U256::from(5227); + let l1_blob_base_fee = Some(U256::from(1)); + let l1_blob_base_fee_scalar = Some(U256::from(1014213)); + + // test + + let l1_block_info = parse_l1_info(DATA).unwrap(); + + assert_eq!(l1_block_info.l1_base_fee, l1_base_fee); + assert_eq!(l1_block_info.l1_base_fee_scalar, l1_base_fee_scalar); + assert_eq!(l1_block_info.l1_blob_base_fee, l1_blob_base_fee); + assert_eq!(l1_block_info.l1_blob_base_fee_scalar, l1_blob_base_fee_scalar); } } diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index f55616df26..14cb4dc3ac 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -9,14 +9,16 @@ // The `optimism` feature must be enabled to use this crate. #![cfg(feature = "optimism")] -use reth_chainspec::ChainSpec; -use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; +use alloy_primitives::{Address, U256}; +use reth_evm::{ConfigureEvm, ConfigureEvmEnv, NextBlockEnvAttributes}; +use reth_optimism_chainspec::OpChainSpec; use reth_primitives::{ revm_primitives::{AnalysisKind, CfgEnvWithHandlerCfg, TxEnv}, transaction::FillTxEnv, - Address, Head, Header, TransactionSigned, U256, + Head, Header, TransactionSigned, }; use reth_revm::{inspector_handle_register, Database, Evm, EvmBuilder, GetInspector}; +use std::sync::Arc; mod config; pub use config::{revm_spec, revm_spec_by_timestamp_after_bedrock}; @@ -27,14 +29,31 @@ pub use l1::*; mod error; pub use error::OptimismBlockExecutionError; -use revm_primitives::{Bytes, Env, OptimismFields, TxKind}; +use revm_primitives::{ + BlobExcessGasAndPrice, BlockEnv, Bytes, CfgEnv, Env, HandlerCfg, OptimismFields, SpecId, TxKind, +}; /// Optimism-related EVM configuration. -#[derive(Debug, Default, Clone, Copy)] -#[non_exhaustive] -pub struct OptimismEvmConfig; +#[derive(Debug, Clone)] +pub struct OptimismEvmConfig { + chain_spec: Arc, +} + +impl OptimismEvmConfig { + /// Creates a new [`OptimismEvmConfig`] with the given chain spec. + pub const fn new(chain_spec: Arc) -> Self { + Self { chain_spec } + } + + /// Returns the chain spec associated with this configuration. + pub fn chain_spec(&self) -> &OpChainSpec { + &self.chain_spec + } +} impl ConfigureEvmEnv for OptimismEvmConfig { + type Header = Header; + fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { transaction.fill_tx_env(tx_env, sender); } @@ -87,12 +106,11 @@ impl ConfigureEvmEnv for OptimismEvmConfig { fn fill_cfg_env( &self, cfg_env: &mut CfgEnvWithHandlerCfg, - chain_spec: &ChainSpec, - header: &Header, + header: &Self::Header, total_difficulty: U256, ) { let spec_id = revm_spec( - chain_spec, + self.chain_spec(), &Head { number: header.number, timestamp: header.timestamp, @@ -102,11 +120,66 @@ impl ConfigureEvmEnv for OptimismEvmConfig { }, ); - cfg_env.chain_id = chain_spec.chain().id(); + cfg_env.chain_id = self.chain_spec.chain().id(); cfg_env.perf_analyse_created_bytecodes = AnalysisKind::Analyse; cfg_env.handler_cfg.spec_id = spec_id; - cfg_env.handler_cfg.is_optimism = chain_spec.is_optimism(); + cfg_env.handler_cfg.is_optimism = self.chain_spec.is_optimism(); + } + + fn next_cfg_and_block_env( + &self, + parent: &Self::Header, + attributes: NextBlockEnvAttributes, + ) -> (CfgEnvWithHandlerCfg, BlockEnv) { + // configure evm env based on parent block + let cfg = CfgEnv::default().with_chain_id(self.chain_spec.chain().id()); + + // ensure we're not missing any timestamp based hardforks + let spec_id = revm_spec_by_timestamp_after_bedrock(&self.chain_spec, attributes.timestamp); + + // if the parent block did not have excess blob gas (i.e. it was pre-cancun), but it is + // cancun now, we need to set the excess blob gas to the default value + let blob_excess_gas_and_price = parent + .next_block_excess_blob_gas() + .or_else(|| { + if spec_id.is_enabled_in(SpecId::CANCUN) { + // default excess blob gas is zero + Some(0) + } else { + None + } + }) + .map(BlobExcessGasAndPrice::new); + + let block_env = BlockEnv { + number: U256::from(parent.number + 1), + coinbase: attributes.suggested_fee_recipient, + timestamp: U256::from(attributes.timestamp), + difficulty: U256::ZERO, + prevrandao: Some(attributes.prev_randao), + gas_limit: U256::from(parent.gas_limit), + // calculate basefee based on parent block's gas usage + basefee: U256::from( + parent + .next_block_base_fee( + self.chain_spec.base_fee_params_at_timestamp(attributes.timestamp), + ) + .unwrap_or_default(), + ), + // calculate excess gas based on parent block's blob gas usage + blob_excess_gas_and_price, + }; + + let cfg_with_handler_cfg; + { + cfg_with_handler_cfg = CfgEnvWithHandlerCfg { + cfg_env: cfg, + handler_cfg: HandlerCfg { spec_id, is_optimism: true }, + }; + } + + (cfg_with_handler_cfg, block_env) } } @@ -136,11 +209,12 @@ impl ConfigureEvm for OptimismEvmConfig { #[cfg(test)] mod tests { use super::*; + use alloy_primitives::{B256, U256}; use reth_chainspec::{Chain, ChainSpec}; use reth_evm::execute::ProviderError; use reth_primitives::{ revm_primitives::{BlockEnv, CfgEnv, SpecId}, - Genesis, Header, B256, KECCAK_EMPTY, U256, + Genesis, Header, BASE_MAINNET, KECCAK_EMPTY, }; use reth_revm::{ db::{CacheDB, EmptyDBTyped}, @@ -148,7 +222,11 @@ mod tests { JournaledState, }; use revm_primitives::{CfgEnvWithHandlerCfg, EnvWithHandlerCfg, HandlerCfg}; - use std::collections::HashSet; + use std::{collections::HashSet, sync::Arc}; + + fn test_evm_config() -> OptimismEvmConfig { + OptimismEvmConfig::new(BASE_MAINNET.clone()) + } #[test] fn test_fill_cfg_and_block_env() { @@ -176,13 +254,8 @@ mod tests { // Use the `OptimismEvmConfig` to fill the `cfg_env` and `block_env` based on the ChainSpec, // Header, and total difficulty - OptimismEvmConfig::default().fill_cfg_and_block_env( - &mut cfg_env, - &mut block_env, - &chain_spec, - &header, - total_difficulty, - ); + OptimismEvmConfig::new(Arc::new(OpChainSpec { inner: chain_spec.clone() })) + .fill_cfg_and_block_env(&mut cfg_env, &mut block_env, &header, total_difficulty); // Assert that the chain ID in the `cfg_env` is correctly set to the chain ID of the // ChainSpec @@ -192,7 +265,7 @@ mod tests { #[test] fn test_evm_configure() { // Create a default `OptimismEvmConfig` - let evm_config = OptimismEvmConfig::default(); + let evm_config = test_evm_config(); // Initialize an empty database wrapped in CacheDB let db = CacheDB::>::default(); @@ -232,7 +305,7 @@ mod tests { #[test] fn test_evm_with_env_default_spec() { - let evm_config = OptimismEvmConfig::default(); + let evm_config = test_evm_config(); let db = CacheDB::>::default(); @@ -252,7 +325,7 @@ mod tests { #[test] fn test_evm_with_env_custom_cfg() { - let evm_config = OptimismEvmConfig::default(); + let evm_config = test_evm_config(); let db = CacheDB::>::default(); @@ -282,7 +355,7 @@ mod tests { #[test] fn test_evm_with_env_custom_block_and_tx() { - let evm_config = OptimismEvmConfig::default(); + let evm_config = test_evm_config(); let db = CacheDB::>::default(); @@ -315,7 +388,7 @@ mod tests { #[test] fn test_evm_with_spec_id() { - let evm_config = OptimismEvmConfig::default(); + let evm_config = test_evm_config(); let db = CacheDB::>::default(); @@ -334,7 +407,7 @@ mod tests { #[test] fn test_evm_with_inspector() { - let evm_config = OptimismEvmConfig::default(); + let evm_config = test_evm_config(); let db = CacheDB::>::default(); @@ -378,7 +451,7 @@ mod tests { #[test] fn test_evm_with_env_and_default_inspector() { - let evm_config = OptimismEvmConfig::default(); + let evm_config = test_evm_config(); let db = CacheDB::>::default(); let env_with_handler = EnvWithHandlerCfg::default(); @@ -397,7 +470,7 @@ mod tests { #[test] fn test_evm_with_env_inspector_and_custom_cfg() { - let evm_config = OptimismEvmConfig::default(); + let evm_config = test_evm_config(); let db = CacheDB::>::default(); let cfg = CfgEnv::default().with_chain_id(111); @@ -421,7 +494,7 @@ mod tests { #[test] fn test_evm_with_env_inspector_and_custom_block_tx() { - let evm_config = OptimismEvmConfig::default(); + let evm_config = test_evm_config(); let db = CacheDB::>::default(); // Create custom block and tx environment @@ -452,7 +525,7 @@ mod tests { #[test] fn test_evm_with_env_inspector_and_spec_id() { - let evm_config = OptimismEvmConfig::default(); + let evm_config = test_evm_config(); let db = CacheDB::>::default(); let handler_cfg = HandlerCfg { spec_id: SpecId::ECOTONE, ..Default::default() }; diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index c5c0b53625..6c07856186 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -39,6 +39,9 @@ reth-optimism-rpc.workspace = true reth-rpc.workspace = true reth-optimism-chainspec.workspace = true +# ethereum +alloy-primitives.workspace = true + # async async-trait.workspace = true reqwest = { workspace = true, features = ["rustls-tls-native-roots"] } @@ -85,4 +88,5 @@ opbnb = [ "reth-evm-optimism/opbnb", "reth-optimism-payload-builder/opbnb", ] -test-utils = ["reth-node-builder/test-utils"] \ No newline at end of file +asm-keccak = ["reth-primitives/asm-keccak"] +test-utils = ["reth-node-builder/test-utils"] diff --git a/crates/optimism/node/src/args.rs b/crates/optimism/node/src/args.rs index 30f309656c..54be83dc51 100644 --- a/crates/optimism/node/src/args.rs +++ b/crates/optimism/node/src/args.rs @@ -2,8 +2,12 @@ //! clap [Args](clap::Args) for optimism rollup configuration +use reth_node_builder::engine_tree_config::{ + DEFAULT_MEMORY_BLOCK_BUFFER_TARGET, DEFAULT_PERSISTENCE_THRESHOLD, +}; + /// Parameters for rollup configuration -#[derive(Debug, Clone, Default, PartialEq, Eq, clap::Args)] +#[derive(Debug, Clone, PartialEq, Eq, clap::Args)] #[command(next_help_heading = "Rollup")] pub struct RollupArgs { /// HTTP endpoint for the sequencer mempool @@ -37,6 +41,29 @@ pub struct RollupArgs { /// Enable the engine2 experimental features on op-reth binary #[arg(long = "engine.experimental", default_value = "false")] pub experimental: bool, + + /// Configure persistence threshold for engine experimental. + #[arg(long = "engine.persistence-threshold", requires = "experimental", default_value_t = DEFAULT_PERSISTENCE_THRESHOLD)] + pub persistence_threshold: u64, + + /// Configure the target number of blocks to keep in memory. + #[arg(long = "engine.memory-block-buffer-target", requires = "experimental", default_value_t = DEFAULT_MEMORY_BLOCK_BUFFER_TARGET)] + pub memory_block_buffer_target: u64, +} + +impl Default for RollupArgs { + fn default() -> Self { + Self { + sequencer_http: None, + disable_txpool_gossip: false, + enable_genesis_walkback: false, + compute_pending_block: false, + discovery_v4: false, + experimental: false, + persistence_threshold: DEFAULT_PERSISTENCE_THRESHOLD, + memory_block_buffer_target: DEFAULT_MEMORY_BLOCK_BUFFER_TARGET, + } + } } #[cfg(test)] diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 823395d4ac..fec6d50654 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -13,12 +13,14 @@ use reth_node_builder::{ ComponentsBuilder, ConsensusBuilder, ExecutorBuilder, NetworkBuilder, PayloadServiceBuilder, PoolBuilder, }, - node::{FullNodeTypes, NodeTypes}, + node::{FullNodeTypes, NodeTypes, NodeTypesWithEngine}, BuilderContext, Node, PayloadBuilderConfig, }; +use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::OptimismBeaconConsensus; use reth_optimism_rpc::OpEthApi; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; +use reth_primitives::Header; use reth_provider::CanonStateSubscriptions; use reth_tracing::tracing::{debug, info}; use reth_transaction_pool::{ @@ -58,16 +60,15 @@ impl OptimismNode { OptimismConsensusBuilder, > where - Node: FullNodeTypes, + Node: FullNodeTypes< + Types: NodeTypesWithEngine, + >, { let RollupArgs { disable_txpool_gossip, compute_pending_block, discovery_v4, .. } = args; ComponentsBuilder::default() .node_types::() .pool(OptimismPoolBuilder::default()) - .payload(OptimismPayloadBuilder::new( - compute_pending_block, - OptimismEvmConfig::default(), - )) + .payload(OptimismPayloadBuilder::new(compute_pending_block)) .network(OptimismNetworkBuilder { disable_txpool_gossip, disable_discovery_v4: !discovery_v4, @@ -79,7 +80,9 @@ impl OptimismNode { impl Node for OptimismNode where - N: FullNodeTypes, + N: FullNodeTypes< + Types: NodeTypesWithEngine, + >, { type ComponentsBuilder = ComponentsBuilder< N, @@ -100,10 +103,13 @@ where impl NodeTypes for OptimismNode { type Primitives = (); - type Engine = OptimismEngineTypes; type ChainSpec = ChainSpec; } +impl NodeTypesWithEngine for OptimismNode { + type Engine = OptimismEngineTypes; +} + /// Add-ons w.r.t. optimism. #[derive(Debug, Clone)] pub struct OptimismAddOns; @@ -119,7 +125,7 @@ pub struct OptimismExecutorBuilder; impl ExecutorBuilder for OptimismExecutorBuilder where - Node: FullNodeTypes, + Node: FullNodeTypes>, { type EVM = OptimismEvmConfig; type Executor = OpExecutorProvider; @@ -129,8 +135,9 @@ where ctx: &BuilderContext, ) -> eyre::Result<(Self::EVM, Self::Executor)> { let chain_spec = ctx.chain_spec(); - let evm_config = OptimismEvmConfig::default(); - let executor = OpExecutorProvider::new(chain_spec, evm_config); + let evm_config = + OptimismEvmConfig::new(Arc::new(OpChainSpec { inner: (*chain_spec).clone() })); + let executor = OpExecutorProvider::new(chain_spec, evm_config.clone()); Ok((evm_config, executor)) } @@ -146,7 +153,7 @@ pub struct OptimismPoolBuilder; impl PoolBuilder for OptimismPoolBuilder where - Node: FullNodeTypes, + Node: FullNodeTypes>, { type Pool = OpTransactionPool; @@ -218,7 +225,7 @@ where /// A basic optimism payload service builder #[derive(Debug, Default, Clone)] -pub struct OptimismPayloadBuilder { +pub struct OptimismPayloadBuilder { /// By default the pending block equals the latest block /// to save resources and not leak txs from the tx-pool, /// this flag enables computing of the pending block @@ -228,30 +235,30 @@ pub struct OptimismPayloadBuilder { /// will use the payload attributes from the latest block. Note /// that this flag is not yet functional. pub compute_pending_block: bool, - /// The EVM configuration to use for the payload builder. - pub evm_config: EVM, } -impl OptimismPayloadBuilder { - /// Create a new instance with the given `compute_pending_block` flag and evm config. - pub const fn new(compute_pending_block: bool, evm_config: EVM) -> Self { - Self { compute_pending_block, evm_config } +impl OptimismPayloadBuilder { + /// Create a new instance with the given `compute_pending_block` flag. + pub const fn new(compute_pending_block: bool) -> Self { + Self { compute_pending_block } } -} -impl PayloadServiceBuilder for OptimismPayloadBuilder -where - Node: FullNodeTypes, - Pool: TransactionPool + Unpin + 'static, - EVM: ConfigureEvm, -{ - async fn spawn_payload_service( + /// A helper method to initialize [`PayloadBuilderService`] with the given EVM config. + pub fn spawn( self, + evm_config: Evm, ctx: &BuilderContext, pool: Pool, - ) -> eyre::Result> { + ) -> eyre::Result> + where + Node: FullNodeTypes< + Types: NodeTypesWithEngine, + >, + Pool: TransactionPool + Unpin + 'static, + Evm: ConfigureEvm
, + { let payload_builder = - reth_optimism_payload_builder::OptimismPayloadBuilder::new(self.evm_config) + reth_optimism_payload_builder::OptimismPayloadBuilder::new(evm_config) .set_compute_pending_block(self.compute_pending_block); let conf = ctx.payload_builder_config(); @@ -279,6 +286,26 @@ where } } +impl PayloadServiceBuilder for OptimismPayloadBuilder +where + Node: FullNodeTypes< + Types: NodeTypesWithEngine, + >, + Pool: TransactionPool + Unpin + 'static, +{ + async fn spawn_payload_service( + self, + ctx: &BuilderContext, + pool: Pool, + ) -> eyre::Result> { + self.spawn( + OptimismEvmConfig::new(Arc::new(OpChainSpec { inner: (*ctx.chain_spec()).clone() })), + ctx, + pool, + ) + } +} + /// A basic optimism network builder. #[derive(Debug, Default, Clone)] pub struct OptimismNetworkBuilder { @@ -290,7 +317,7 @@ pub struct OptimismNetworkBuilder { impl NetworkBuilder for OptimismNetworkBuilder where - Node: FullNodeTypes, + Node: FullNodeTypes>, Pool: TransactionPool + Unpin + 'static, { async fn build_network( @@ -347,7 +374,7 @@ pub struct OptimismConsensusBuilder; impl ConsensusBuilder for OptimismConsensusBuilder where - Node: FullNodeTypes, + Node: FullNodeTypes>, { type Consensus = Arc; diff --git a/crates/optimism/node/src/txpool.rs b/crates/optimism/node/src/txpool.rs index 743206f456..0ba4e79cc0 100644 --- a/crates/optimism/node/src/txpool.rs +++ b/crates/optimism/node/src/txpool.rs @@ -41,6 +41,11 @@ impl OpTransactionValidator { self.inner.chain_spec() } + /// Returns the configured client + pub fn client(&self) -> &Client { + self.inner.client() + } + /// Returns the current block timestamp. fn block_timestamp(&self) -> u64 { self.block_info.timestamp.load(Ordering::Relaxed) @@ -233,10 +238,11 @@ pub struct OpL1BlockInfo { #[cfg(test)] mod tests { use crate::txpool::OpTransactionValidator; + use alloy_primitives::{TxKind, U256}; + use reth::primitives::Signature; use reth_chainspec::MAINNET; use reth_primitives::{ - Signature, Transaction, TransactionSigned, TransactionSignedEcRecovered, TxDeposit, TxKind, - U256, + Transaction, TransactionSigned, TransactionSignedEcRecovered, TxDeposit, }; use reth_provider::test_utils::MockEthProvider; use reth_transaction_pool::{ @@ -261,7 +267,7 @@ mod tests { to: TxKind::Create, mint: None, value: U256::ZERO, - gas_limit: 0u64, + gas_limit: 0, is_system_transaction: false, input: Default::default(), }); diff --git a/crates/optimism/node/tests/e2e/utils.rs b/crates/optimism/node/tests/e2e/utils.rs index 89933af63d..6b8e07a42e 100644 --- a/crates/optimism/node/tests/e2e/utils.rs +++ b/crates/optimism/node/tests/e2e/utils.rs @@ -1,6 +1,7 @@ use std::sync::Arc; use alloy_genesis::Genesis; +use alloy_primitives::{Address, B256}; use reth::{rpc::types::engine::PayloadAttributes, tasks::TaskManager}; use reth_chainspec::ChainSpecBuilder; use reth_e2e_test_utils::{transaction::TransactionTestContext, wallet::Wallet, NodeHelperType}; @@ -9,7 +10,6 @@ use reth_node_optimism::{ }; use reth_optimism_chainspec::BASE_MAINNET; use reth_payload_builder::EthPayloadBuilderAttributes; -use reth_primitives::{Address, B256}; use tokio::sync::Mutex; /// Optimism Node Helper type diff --git a/crates/optimism/payload/Cargo.toml b/crates/optimism/payload/Cargo.toml index ab3292c2b5..e07bde715d 100644 --- a/crates/optimism/payload/Cargo.toml +++ b/crates/optimism/payload/Cargo.toml @@ -31,6 +31,7 @@ reth-chain-state.workspace = true # ethereum revm.workspace = true +alloy-primitives.workspace = true alloy-rlp.workspace = true # misc diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index a72f7fefe9..b1eba7be96 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -4,20 +4,29 @@ use crate::{ error::OptimismPayloadBuilderError, payload::{OptimismBuiltPayload, OptimismPayloadBuilderAttributes}, }; +use alloy_primitives::U256; use reth_basic_payload_builder::*; use reth_chain_state::ExecutedBlock; use reth_chainspec::{EthereumHardforks, OptimismHardfork}; -use reth_evm::{system_calls::pre_block_beacon_root_contract_call, ConfigureEvm}; +use reth_evm::{ + system_calls::pre_block_beacon_root_contract_call, ConfigureEvm, ConfigureEvmEnv, + NextBlockEnvAttributes, +}; use reth_execution_types::ExecutionOutcome; use reth_payload_builder::error::PayloadBuilderError; +use reth_payload_primitives::PayloadBuilderAttributes; use reth_primitives::{ - constants::{BEACON_NONCE, EMPTY_RECEIPTS, EMPTY_TRANSACTIONS}, + constants::BEACON_NONCE, eip4844::calculate_excess_blob_gas, - proofs, Block, Header, IntoRecoveredTransaction, Receipt, TxType, EMPTY_OMMER_ROOT_HASH, U256, + proofs, + revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}, + Block, Header, IntoRecoveredTransaction, Receipt, TxType, EMPTY_OMMER_ROOT_HASH, }; use reth_provider::StateProviderFactory; use reth_revm::database::StateProviderDatabase; -use reth_transaction_pool::{BestTransactionsAttributes, TransactionPool}; +use reth_transaction_pool::{ + noop::NoopTransactionPool, BestTransactionsAttributes, TransactionPool, +}; use reth_trie::HashedPostState; use revm::{ db::states::bundle_state::BundleRetention, @@ -59,13 +68,32 @@ impl OptimismPayloadBuilder { self.compute_pending_block } } +impl OptimismPayloadBuilder +where + EvmConfig: ConfigureEvmEnv
, +{ + /// Returns the configured [`CfgEnvWithHandlerCfg`] and [`BlockEnv`] for the targeted payload + /// (that has the `parent` as its parent). + fn cfg_and_block_env( + &self, + config: &PayloadConfig, + parent: &Header, + ) -> (CfgEnvWithHandlerCfg, BlockEnv) { + let next_attributes = NextBlockEnvAttributes { + timestamp: config.attributes.timestamp(), + suggested_fee_recipient: config.attributes.suggested_fee_recipient(), + prev_randao: config.attributes.prev_randao(), + }; + self.evm_config.next_cfg_and_block_env(parent, next_attributes) + } +} /// Implementation of the [`PayloadBuilder`] trait for [`OptimismPayloadBuilder`]. impl PayloadBuilder for OptimismPayloadBuilder where Client: StateProviderFactory, Pool: TransactionPool, - EvmConfig: ConfigureEvm, + EvmConfig: ConfigureEvm
, { type Attributes = OptimismPayloadBuilderAttributes; type BuiltPayload = OptimismBuiltPayload; @@ -74,7 +102,14 @@ where &self, args: BuildArguments, ) -> Result, PayloadBuilderError> { - optimism_payload_builder(self.evm_config.clone(), args, self.compute_pending_block) + let (cfg_env, block_env) = self.cfg_and_block_env(&args.config, &args.config.parent_block); + optimism_payload( + self.evm_config.clone(), + args, + cfg_env, + block_env, + self.compute_pending_block, + ) } fn on_missing_payload( @@ -86,144 +121,26 @@ where MissingPayloadBehaviour::AwaitInProgress } + // NOTE: this should only be used for testing purposes because this doesn't have access to L1 + // system txs, hence on_missing_payload we return [MissingPayloadBehaviour::AwaitInProgress]. fn build_empty_payload( &self, client: &Client, config: PayloadConfig, ) -> Result { - let extra_data = config.extra_data(); - let PayloadConfig { - initialized_block_env, - parent_block, - attributes, - chain_spec, - initialized_cfg, - .. - } = config; - - debug!(target: "payload_builder", parent_hash = ?parent_block.hash(), parent_number = parent_block.number, "building empty payload"); - - let state = client.state_by_block_hash(parent_block.hash()).map_err(|err| { - warn!(target: "payload_builder", parent_hash=%parent_block.hash(), %err, "failed to get state for empty payload"); - err - })?; - let mut db = State::builder() - .with_database(StateProviderDatabase::new(state)) - .with_bundle_update() - .build(); - - let base_fee = initialized_block_env.basefee.to::(); - let block_gas_limit: u64 = - initialized_block_env.gas_limit.try_into().unwrap_or(chain_spec.max_gas_limit); - - // apply eip-4788 pre block contract call - pre_block_beacon_root_contract_call( - &mut db, - &self.evm_config, - &chain_spec, - &initialized_cfg, - &initialized_block_env, - attributes.payload_attributes.parent_beacon_block_root, - ) - .map_err(|err| { - warn!(target: "payload_builder", - parent_hash=%parent_block.hash(), - %err, - "failed to apply beacon root contract call for empty payload" - ); - PayloadBuilderError::Internal(err.into()) - })?; - - let WithdrawalsOutcome { withdrawals_root, withdrawals } = commit_withdrawals( - &mut db, - &chain_spec, - attributes.payload_attributes.timestamp, - attributes.payload_attributes.withdrawals.clone(), - ) - .map_err(|err| { - warn!(target: "payload_builder", - parent_hash=%parent_block.hash(), - %err, - "failed to commit withdrawals for empty payload" - ); - err - })?; - - // merge all transitions into bundle state, this would apply the withdrawal balance - // changes and 4788 contract call - db.merge_transitions(BundleRetention::PlainState); - - // calculate the state root - let bundle_state = db.take_bundle(); - let hashed_state = HashedPostState::from_bundle_state(&bundle_state.state); - let state_root = db.database.state_root(hashed_state).map_err(|err| { - warn!(target: "payload_builder", - parent_hash=%parent_block.hash(), - %err, - "failed to calculate state root for empty payload" - ); - err - })?; - - let mut excess_blob_gas = None; - let mut blob_gas_used = None; - - if chain_spec.is_cancun_active_at_timestamp(attributes.payload_attributes.timestamp) { - excess_blob_gas = if chain_spec.is_cancun_active_at_timestamp(parent_block.timestamp) { - let parent_excess_blob_gas = parent_block.excess_blob_gas.unwrap_or_default(); - let parent_blob_gas_used = parent_block.blob_gas_used.unwrap_or_default(); - Some(calculate_excess_blob_gas(parent_excess_blob_gas, parent_blob_gas_used)) - } else { - // for the first post-fork block, both parent.blob_gas_used and - // parent.excess_blob_gas are evaluated as 0 - Some(calculate_excess_blob_gas(0, 0)) - }; - - blob_gas_used = Some(0); - } - - let header = Header { - parent_hash: parent_block.hash(), - ommers_hash: EMPTY_OMMER_ROOT_HASH, - beneficiary: initialized_block_env.coinbase, - state_root, - transactions_root: EMPTY_TRANSACTIONS, - withdrawals_root, - receipts_root: EMPTY_RECEIPTS, - logs_bloom: Default::default(), - timestamp: attributes.payload_attributes.timestamp, - mix_hash: attributes.payload_attributes.prev_randao, - nonce: BEACON_NONCE, - base_fee_per_gas: Some(base_fee), - number: parent_block.number + 1, - gas_limit: block_gas_limit, - difficulty: U256::ZERO, - gas_used: 0, - extra_data, - blob_gas_used, - excess_blob_gas, - parent_beacon_block_root: attributes.payload_attributes.parent_beacon_block_root, - requests_root: None, + let args = BuildArguments { + client, + config, + // we use defaults here because for the empty payload we don't need to execute anything + pool: NoopTransactionPool::default(), + cached_reads: Default::default(), + cancel: Default::default(), + best_payload: None, }; - - let block = Block { - header, - body: vec![], - ommers: vec![], - withdrawals, - sidecars: None, - requests: None, - }; - let sealed_block = block.seal_slow(); - - Ok(OptimismBuiltPayload::new( - attributes.payload_attributes.payload_id(), - sealed_block, - U256::ZERO, - chain_spec, - attributes, - None, - )) + let (cfg_env, block_env) = self.cfg_and_block_env(&args.config, &args.config.parent_block); + optimism_payload(self.evm_config.clone(), args, cfg_env, block_env, false)? + .into_payload() + .ok_or_else(|| PayloadBuilderError::MissingPayload) } } @@ -236,13 +153,15 @@ where /// and configuration, this function creates a transaction payload. Returns /// a result indicating success with the payload or an error in case of failure. #[inline] -pub(crate) fn optimism_payload_builder( +pub(crate) fn optimism_payload( evm_config: EvmConfig, args: BuildArguments, + initialized_cfg: CfgEnvWithHandlerCfg, + initialized_block_env: BlockEnv, _compute_pending_block: bool, ) -> Result, PayloadBuilderError> where - EvmConfig: ConfigureEvm, + EvmConfig: ConfigureEvm
, Client: StateProviderFactory, Pool: TransactionPool, { @@ -252,15 +171,7 @@ where let state = StateProviderDatabase::new(state_provider); let mut db = State::builder().with_database_ref(cached_reads.as_db(state)).with_bundle_update().build(); - let extra_data = config.extra_data(); - let PayloadConfig { - initialized_block_env, - initialized_cfg, - parent_block, - attributes, - chain_spec, - .. - } = config; + let PayloadConfig { parent_block, attributes, chain_spec, extra_data } = config; debug!(target: "payload_builder", id=%attributes.payload_attributes.payload_id(), parent_hash = ?parent_block.hash(), parent_number = parent_block.number, "building new payload"); @@ -520,10 +431,14 @@ where // merge all transitions into bundle state, this would apply the withdrawal balance changes // and 4788 contract call - db.merge_transitions(BundleRetention::PlainState); + db.merge_transitions(BundleRetention::Reverts); - let execution_outcome = - ExecutionOutcome::new(db.take_bundle(), vec![receipts].into(), block_number, Vec::new()); + let execution_outcome = ExecutionOutcome::new( + db.take_bundle(), + vec![receipts.clone()].into(), + block_number, + Vec::new(), + ); let receipts_root = execution_outcome .optimism_receipts_root_slow( block_number, diff --git a/crates/optimism/payload/src/error.rs b/crates/optimism/payload/src/error.rs index 38f5bb10b7..2016fdc6dd 100644 --- a/crates/optimism/payload/src/error.rs +++ b/crates/optimism/payload/src/error.rs @@ -13,7 +13,7 @@ pub enum OptimismPayloadBuilderError { L1BlockInfoParseFailed, /// Thrown when a database account could not be loaded. #[error("failed to load account {0}")] - AccountLoadFailed(reth_primitives::Address), + AccountLoadFailed(alloy_primitives::Address), /// Thrown when force deploy of create2deployer code fails. #[error("failed to force create2deployer account code")] ForceCreate2DeployerFail, diff --git a/crates/optimism/payload/src/payload.rs b/crates/optimism/payload/src/payload.rs index 1746b66493..2acf875a56 100644 --- a/crates/optimism/payload/src/payload.rs +++ b/crates/optimism/payload/src/payload.rs @@ -2,17 +2,14 @@ //! Optimism builder support +use alloy_primitives::{Address, B256, U256}; use alloy_rlp::Encodable; use reth_chain_state::ExecutedBlock; use reth_chainspec::{ChainSpec, EthereumHardforks}; -use reth_evm_optimism::revm_spec_by_timestamp_after_bedrock; use reth_payload_builder::EthPayloadBuilderAttributes; use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes}; use reth_primitives::{ - revm_primitives::{BlobExcessGasAndPrice, BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, SpecId}, - transaction::WithEncoded, - Address, BlobTransactionSidecar, Header, SealedBlock, TransactionSigned, Withdrawals, B256, - U256, + transaction::WithEncoded, BlobTransactionSidecar, SealedBlock, TransactionSigned, Withdrawals, }; /// Re-export for use in downstream arguments. pub use reth_rpc_types::optimism::OptimismPayloadAttributes; @@ -24,7 +21,6 @@ use reth_rpc_types_compat::engine::payload::{ block_to_payload_v1, block_to_payload_v3, block_to_payload_v4, convert_block_to_payload_field_v2, }; -use revm::primitives::HandlerCfg; use std::sync::Arc; /// Optimism Payload Builder Attributes @@ -106,59 +102,6 @@ impl PayloadBuilderAttributes for OptimismPayloadBuilderAttributes { fn withdrawals(&self) -> &Withdrawals { &self.payload_attributes.withdrawals } - - fn cfg_and_block_env( - &self, - chain_spec: &ChainSpec, - parent: &Header, - ) -> (CfgEnvWithHandlerCfg, BlockEnv) { - // configure evm env based on parent block - let cfg = CfgEnv::default().with_chain_id(chain_spec.chain().id()); - - // ensure we're not missing any timestamp based hardforks - let spec_id = revm_spec_by_timestamp_after_bedrock(chain_spec, self.timestamp()); - - // if the parent block did not have excess blob gas (i.e. it was pre-cancun), but it is - // cancun now, we need to set the excess blob gas to the default value - let blob_excess_gas_and_price = parent - .next_block_excess_blob_gas() - .or_else(|| { - if spec_id.is_enabled_in(SpecId::CANCUN) { - // default excess blob gas is zero - Some(0) - } else { - None - } - }) - .map(BlobExcessGasAndPrice::new); - - let block_env = BlockEnv { - number: U256::from(parent.number + 1), - coinbase: self.suggested_fee_recipient(), - timestamp: U256::from(self.timestamp()), - difficulty: U256::ZERO, - prevrandao: Some(self.prev_randao()), - gas_limit: U256::from(parent.gas_limit), - // calculate basefee based on parent block's gas usage - basefee: U256::from( - parent - .next_block_base_fee(chain_spec.base_fee_params_at_timestamp(self.timestamp())) - .unwrap_or_default(), - ), - // calculate excess gas based on parent block's blob gas usage - blob_excess_gas_and_price, - }; - - let cfg_with_handler_cfg; - { - cfg_with_handler_cfg = CfgEnvWithHandlerCfg { - cfg_env: cfg, - handler_cfg: HandlerCfg { spec_id, is_optimism: true }, - }; - } - - (cfg_with_handler_cfg, block_env) - } } /// Contains the built payload. diff --git a/crates/optimism/primitives/Cargo.toml b/crates/optimism/primitives/Cargo.toml index 262cb58815..73a3bab1e4 100644 --- a/crates/optimism/primitives/Cargo.toml +++ b/crates/optimism/primitives/Cargo.toml @@ -10,3 +10,8 @@ description = "OP primitive types" [lints] workspace = true + +[dependencies] +reth-primitives.workspace = true +reth-primitives-traits.workspace = true +alloy-primitives.workspace = true diff --git a/crates/optimism/primitives/src/bedrock.rs b/crates/optimism/primitives/src/bedrock.rs new file mode 100644 index 0000000000..f63fc19179 --- /dev/null +++ b/crates/optimism/primitives/src/bedrock.rs @@ -0,0 +1,98 @@ +//! OP mainnet bedrock related data. + +use alloy_primitives::{b256, bloom, bytes, B256, U256}; +use reth_primitives::{address, Header}; +use reth_primitives_traits::constants::EMPTY_OMMER_ROOT_HASH; + +/// Transaction 0x9ed8f713b2cc6439657db52dcd2fdb9cc944915428f3c6e2a7703e242b259cb9 in block 985, +/// replayed in blocks: +/// +/// 19 022 +/// 45 036 +pub const TX_BLOCK_985: [u64; 2] = [19_022, 45_036]; + +/// Transaction 0xc033250c5a45f9d104fc28640071a776d146d48403cf5e95ed0015c712e26cb6 in block +/// 123 322, replayed in block: +/// +/// 123 542 +pub const TX_BLOCK_123_322: u64 = 123_542; + +/// Transaction 0x86f8c77cfa2b439e9b4e92a10f6c17b99fce1220edf4001e4158b57f41c576e5 in block +/// 1 133 328, replayed in blocks: +/// +/// 1 135 391 +/// 1 144 468 +pub const TX_BLOCK_1_133_328: [u64; 2] = [1_135_391, 1_144_468]; + +/// Transaction 0x3cc27e7cc8b7a9380b2b2f6c224ea5ef06ade62a6af564a9dd0bcca92131cd4e in block +/// 1 244 152, replayed in block: +/// +/// 1 272 994 +pub const TX_BLOCK_1_244_152: u64 = 1_272_994; + +/// The six blocks with replayed transactions. +pub const BLOCK_NUMS_REPLAYED_TX: [u64; 6] = [ + TX_BLOCK_985[0], + TX_BLOCK_985[1], + TX_BLOCK_123_322, + TX_BLOCK_1_133_328[0], + TX_BLOCK_1_133_328[1], + TX_BLOCK_1_244_152, +]; + +/// Returns `true` if transaction is the second or third appearance of the transaction. The blocks +/// with replayed transaction happen to only contain the single transaction. +pub fn is_dup_tx(block_number: u64) -> bool { + if block_number > BLOCK_NUMS_REPLAYED_TX[5] { + return false + } + + // these blocks just have one transaction! + if BLOCK_NUMS_REPLAYED_TX.contains(&block_number) { + return true + } + + false +} + +/// Bedrock hash on Optimism Mainnet. +pub const BEDROCK_HEADER_HASH: B256 = + b256!("dbf6a80fef073de06add9b0d14026d6e5a86c85f6d102c36d3d8e9cf89c2afd3"); + +/// Bedrock on Optimism Mainnet. (`105_235_063`) +pub const BEDROCK_HEADER: Header = Header { + difficulty: U256::ZERO, + extra_data: bytes!("424544524f434b"), + gas_limit: 30000000, + gas_used: 0, + logs_bloom: bloom!("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), + nonce: 0, + number: 105235063, + parent_hash: b256!("21a168dfa5e727926063a28ba16fd5ee84c814e847c81a699c7a0ea551e4ca50"), + receipts_root: b256!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"), + state_root: b256!("920314c198da844a041d63bf6cbe8b59583165fd2229d1b3f599da812fd424cb"), + timestamp: 1686068903, + transactions_root: b256!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"), + ommers_hash: EMPTY_OMMER_ROOT_HASH, + beneficiary: address!("4200000000000000000000000000000000000011"), + withdrawals_root: None, + mix_hash: B256::ZERO, + base_fee_per_gas: Some(0x3b9aca00), + blob_gas_used: None, + excess_blob_gas: None, + parent_beacon_block_root: None, + requests_root: None, +}; + +/// Bedrock total difficulty on Optimism Mainnet. +pub const BEDROCK_HEADER_TTD: U256 = U256::ZERO; + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_bedrock_header() { + assert_eq!(BEDROCK_HEADER.hash_slow(), BEDROCK_HEADER_HASH); + } +} diff --git a/crates/optimism/primitives/src/bedrock_import.rs b/crates/optimism/primitives/src/bedrock_import.rs deleted file mode 100644 index 17020f9f2b..0000000000 --- a/crates/optimism/primitives/src/bedrock_import.rs +++ /dev/null @@ -1,52 +0,0 @@ -//! Replayed OP mainnet OVM transactions (in blocks below Bedrock). - -/// Transaction 0x9ed8f713b2cc6439657db52dcd2fdb9cc944915428f3c6e2a7703e242b259cb9 in block 985, -/// replayed in blocks: -/// -/// 19 022 -/// 45 036 -pub const TX_BLOCK_985: [u64; 2] = [19_022, 45_036]; - -/// Transaction 0xc033250c5a45f9d104fc28640071a776d146d48403cf5e95ed0015c712e26cb6 in block -/// 123 322, replayed in block: -/// -/// 123 542 -pub const TX_BLOCK_123_322: u64 = 123_542; - -/// Transaction 0x86f8c77cfa2b439e9b4e92a10f6c17b99fce1220edf4001e4158b57f41c576e5 in block -/// 1 133 328, replayed in blocks: -/// -/// 1 135 391 -/// 1 144 468 -pub const TX_BLOCK_1_133_328: [u64; 2] = [1_135_391, 1_144_468]; - -/// Transaction 0x3cc27e7cc8b7a9380b2b2f6c224ea5ef06ade62a6af564a9dd0bcca92131cd4e in block -/// 1 244 152, replayed in block: -/// -/// 1 272 994 -pub const TX_BLOCK_1_244_152: u64 = 1_272_994; - -/// The six blocks with replayed transactions. -pub const BLOCK_NUMS_REPLAYED_TX: [u64; 6] = [ - TX_BLOCK_985[0], - TX_BLOCK_985[1], - TX_BLOCK_123_322, - TX_BLOCK_1_133_328[0], - TX_BLOCK_1_133_328[1], - TX_BLOCK_1_244_152, -]; - -/// Returns `true` if transaction is the second or third appearance of the transaction. The blocks -/// with replayed transaction happen to only contain the single transaction. -pub fn is_dup_tx(block_number: u64) -> bool { - if block_number > BLOCK_NUMS_REPLAYED_TX[5] { - return false - } - - // these blocks just have one transaction! - if BLOCK_NUMS_REPLAYED_TX.contains(&block_number) { - return true - } - - false -} diff --git a/crates/optimism/primitives/src/lib.rs b/crates/optimism/primitives/src/lib.rs index 5cdb53def7..659900b9ad 100644 --- a/crates/optimism/primitives/src/lib.rs +++ b/crates/optimism/primitives/src/lib.rs @@ -7,4 +7,4 @@ )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -pub mod bedrock_import; +pub mod bedrock; diff --git a/crates/optimism/rpc/Cargo.toml b/crates/optimism/rpc/Cargo.toml index 8b1f643300..0f8c743f91 100644 --- a/crates/optimism/rpc/Cargo.toml +++ b/crates/optimism/rpc/Cargo.toml @@ -32,6 +32,8 @@ reth-chainspec.workspace = true # ethereum alloy-primitives.workspace = true op-alloy-network.workspace = true +op-alloy-rpc-types.workspace = true +op-alloy-consensus.workspace = true revm.workspace = true # async @@ -47,6 +49,9 @@ serde_json.workspace = true thiserror.workspace = true tracing.workspace = true +[dev-dependencies] +reth-optimism-chainspec.workspace = true + [features] optimism = [ "reth-evm-optimism/optimism", @@ -54,4 +59,10 @@ optimism = [ "reth-provider/optimism", "reth-rpc-eth-api/optimism", "revm/optimism", -] \ No newline at end of file +] + +opbnb = [ + "reth-evm-optimism/opbnb", + "reth-primitives/opbnb", + "revm/opbnb", +] diff --git a/crates/optimism/rpc/src/error.rs b/crates/optimism/rpc/src/error.rs index 29a348ab74..2d9cd559c2 100644 --- a/crates/optimism/rpc/src/error.rs +++ b/crates/optimism/rpc/src/error.rs @@ -1,5 +1,7 @@ //! RPC errors specific to OP. +use jsonrpsee_types::error::INTERNAL_ERROR_CODE; +use reth_evm_optimism::OptimismBlockExecutionError; use reth_primitives::revm_primitives::{InvalidTransaction, OptimismInvalidTransaction}; use reth_rpc_eth_api::AsEthApiError; use reth_rpc_eth_types::EthApiError; @@ -12,6 +14,9 @@ pub enum OpEthApiError { /// L1 ethereum error. #[error(transparent)] Eth(#[from] EthApiError), + /// EVM error originating from invalid optimism data. + #[error(transparent)] + Evm(#[from] OptimismBlockExecutionError), /// Thrown when calculating L1 gas fee. #[error("failed to calculate l1 gas fee")] L1BlockFeeError, @@ -20,7 +25,10 @@ pub enum OpEthApiError { L1BlockGasError, /// Wrapper for [`revm_primitives::InvalidTransaction`](InvalidTransaction). #[error(transparent)] - InvalidTransaction(OptimismInvalidTransactionError), + InvalidTransaction(#[from] OptimismInvalidTransactionError), + /// Sequencer client error. + #[error(transparent)] + Sequencer(#[from] SequencerClientError), } impl AsEthApiError for OpEthApiError { @@ -36,10 +44,11 @@ impl From for jsonrpsee_types::error::ErrorObject<'static> { fn from(err: OpEthApiError) -> Self { match err { OpEthApiError::Eth(err) => err.into(), - OpEthApiError::L1BlockFeeError | OpEthApiError::L1BlockGasError => { - internal_rpc_err(err.to_string()) - } OpEthApiError::InvalidTransaction(err) => err.into(), + OpEthApiError::Evm(_) | + OpEthApiError::L1BlockFeeError | + OpEthApiError::L1BlockGasError => internal_rpc_err(err.to_string()), + OpEthApiError::Sequencer(err) => err.into(), } } } @@ -83,3 +92,24 @@ impl TryFrom for OptimismInvalidTransactionError { } } } + +/// Error type when interacting with the Sequencer +#[derive(Debug, thiserror::Error)] +pub enum SequencerClientError { + /// Wrapper around an [`reqwest::Error`]. + #[error(transparent)] + HttpError(#[from] reqwest::Error), + /// Thrown when serializing transaction to forward to sequencer + #[error("invalid sequencer transaction")] + InvalidSequencerTransaction, +} + +impl From for jsonrpsee_types::error::ErrorObject<'static> { + fn from(err: SequencerClientError) -> Self { + jsonrpsee_types::error::ErrorObject::owned( + INTERNAL_ERROR_CODE, + err.to_string(), + None::, + ) + } +} diff --git a/crates/optimism/rpc/src/eth/block.rs b/crates/optimism/rpc/src/eth/block.rs index c7a7b45ab4..6f60297de8 100644 --- a/crates/optimism/rpc/src/eth/block.rs +++ b/crates/optimism/rpc/src/eth/block.rs @@ -1,6 +1,7 @@ -//! Loads and formats OP block RPC response. +//! Loads and formats OP block RPC response. -use reth_node_api::FullNodeComponents; +use reth_chainspec::ChainSpec; +use reth_node_api::{FullNodeComponents, NodeTypes}; use reth_primitives::TransactionMeta; use reth_provider::{BlockReaderIdExt, HeaderProvider}; use reth_rpc_eth_api::{ @@ -13,13 +14,12 @@ use reth_rpc_eth_api::{ use reth_rpc_eth_types::{EthStateCache, ReceiptBuilder}; use reth_rpc_types::{AnyTransactionReceipt, BlockId}; -use crate::{op_receipt_fields, OpEthApi, OpEthApiError}; +use crate::{OpEthApi, OpEthApiError}; impl EthBlocks for OpEthApi where - Self: LoadBlock + EthApiSpec + LoadTransaction, - Self::Error: From, - N: FullNodeComponents, + Self: EthApiSpec + LoadBlock + LoadTransaction, + N: FullNodeComponents>, { #[inline] fn provider(&self) -> impl HeaderProvider { @@ -41,14 +41,15 @@ where let timestamp = block.timestamp; let block = block.unseal(); - let l1_block_info = reth_evm_optimism::extract_l1_info(&block).ok(); + let l1_block_info = + reth_evm_optimism::extract_l1_info(&block).map_err(OpEthApiError::from)?; let receipts = block .body .into_iter() .zip(receipts.iter()) .enumerate() - .map(|(idx, (ref tx, receipt))| { + .map(|(idx, (ref tx, receipt))| -> Result<_, _> { let meta = TransactionMeta { tx_hash: tx.hash, index: idx as u64, @@ -59,14 +60,13 @@ where timestamp, }; - let optimism_tx_meta = - self.build_op_tx_meta(tx, l1_block_info.clone(), timestamp)?; + let op_tx_meta = + self.build_op_receipt_meta(tx, l1_block_info.clone(), receipt)?; - ReceiptBuilder::new(tx, meta, receipt, &receipts) - .map(|builder| { - op_receipt_fields(builder, tx, receipt, optimism_tx_meta).build() - }) - .map_err(Self::Error::from_eth_err) + Ok(ReceiptBuilder::new(tx, meta, receipt, &receipts) + .map_err(Self::Error::from_eth_err)? + .add_other_fields(op_tx_meta.into()) + .build()) }) .collect::, Self::Error>>(); return receipts.map(Some) diff --git a/crates/optimism/rpc/src/eth/call.rs b/crates/optimism/rpc/src/eth/call.rs index fbcb403acc..dd9a4f2e81 100644 --- a/crates/optimism/rpc/src/eth/call.rs +++ b/crates/optimism/rpc/src/eth/call.rs @@ -1,8 +1,10 @@ +use alloy_primitives::{Bytes, TxKind, U256}; +use reth_chainspec::ChainSpec; use reth_evm::ConfigureEvm; -use reth_node_api::FullNodeComponents; +use reth_node_api::{FullNodeComponents, NodeTypes}; use reth_primitives::{ revm_primitives::{BlockEnv, OptimismFields, TxEnv}, - Bytes, TxKind, U256, + Header, }; use reth_rpc_eth_api::{ helpers::{Call, EthCall, LoadState, SpawnBlocking}, @@ -16,7 +18,7 @@ use crate::{OpEthApi, OpEthApiError}; impl EthCall for OpEthApi where Self: Call, - N: FullNodeComponents, + N: FullNodeComponents>, { } @@ -32,7 +34,12 @@ where } #[inline] - fn evm_config(&self) -> &impl ConfigureEvm { + fn max_simulate_blocks(&self) -> u64 { + self.inner.max_simulate_blocks() + } + + #[inline] + fn evm_config(&self) -> &impl ConfigureEvm
{ self.inner.evm_config() } diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index 8d6690b07e..109a0c8095 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -6,21 +6,22 @@ pub mod transaction; mod block; mod call; mod pending_block; -pub mod rpc; + +pub use receipt::{OpReceiptBuilder, OpReceiptFieldsBuilder}; use std::{fmt, sync::Arc}; -use crate::eth::rpc::SequencerClient; use alloy_primitives::U256; use op_alloy_network::AnyNetwork; use reth_chainspec::ChainSpec; use reth_evm::ConfigureEvm; use reth_network_api::NetworkInfo; -use reth_node_api::{BuilderProvider, FullNodeComponents, FullNodeTypes}; +use reth_node_api::{BuilderProvider, FullNodeComponents, FullNodeTypes, NodeTypes}; use reth_node_builder::EthApiBuilderCtx; +use reth_primitives::Header; use reth_provider::{ - BlockIdReader, BlockNumReader, BlockReaderIdExt, ChainSpecProvider, HeaderProvider, - StageCheckpointReader, StateProviderFactory, + BlockIdReader, BlockNumReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, + HeaderProvider, StageCheckpointReader, StateProviderFactory, }; use reth_rpc::eth::{core::EthApiInner, DevSigner}; use reth_rpc_eth_api::{ @@ -36,8 +37,9 @@ use reth_tasks::{ TaskSpawner, }; use reth_transaction_pool::TransactionPool; +use tokio::sync::OnceCell; -use crate::OpEthApiError; +use crate::{OpEthApiError, SequencerClient}; /// Adapter for [`EthApiInner`], which holds all the data required to serve core `eth_` API. pub type EthApiNodeBackend = EthApiInner< @@ -57,12 +59,21 @@ pub type EthApiNodeBackend = EthApiInner< /// /// This type implements the [`FullEthApi`](reth_rpc_eth_api::helpers::FullEthApi) by implemented /// all the `Eth` helper traits and prerequisite traits. +#[derive(Clone)] pub struct OpEthApi { + /// Gateway to node's core components. inner: Arc>, - sequencer_client: parking_lot::RwLock>, + /// Sequencer client, configured to forward submitted transactions to sequencer of given OP + /// network. + sequencer_client: OnceCell, } -impl OpEthApi { +impl OpEthApi +where + N: FullNodeComponents< + Provider: BlockReaderIdExt + ChainSpecProvider + CanonStateSubscriptions + Clone + 'static, + >, +{ /// Creates a new instance for given context. #[allow(clippy::type_complexity)] pub fn with_spawner(ctx: &EthApiBuilderCtx) -> Self { @@ -76,6 +87,7 @@ impl OpEthApi { ctx.cache.clone(), ctx.new_gas_price_oracle(), ctx.config.rpc_gas_cap, + ctx.config.rpc_max_simulate_blocks, ctx.config.eth_proof_window, blocking_task_pool, ctx.new_fee_history_cache(), @@ -84,20 +96,7 @@ impl OpEthApi { ctx.config.proof_permits, ); - Self { inner: Arc::new(inner), sequencer_client: parking_lot::RwLock::new(None) } - } -} - -impl Clone for OpEthApi -where - N: FullNodeComponents, - Self: Send + Sync, -{ - fn clone(&self) -> Self { - Self { - inner: self.inner.clone(), - sequencer_client: parking_lot::RwLock::new(self.sequencer_client.read().clone()), - } + Self { inner: Arc::new(inner), sequencer_client: OnceCell::new() } } } @@ -113,7 +112,7 @@ where impl EthApiSpec for OpEthApi where Self: Send + Sync, - N: FullNodeComponents, + N: FullNodeComponents>, { #[inline] fn provider( @@ -163,7 +162,7 @@ where impl LoadFee for OpEthApi where Self: LoadBlock, - N: FullNodeComponents, + N: FullNodeComponents>, { #[inline] fn provider( @@ -191,7 +190,7 @@ where impl LoadState for OpEthApi where Self: Send + Sync, - N: FullNodeComponents, + N: FullNodeComponents>, { #[inline] fn provider(&self) -> impl StateProviderFactory + ChainSpecProvider { @@ -233,12 +232,12 @@ where N: FullNodeComponents, { #[inline] - fn evm_config(&self) -> &impl ConfigureEvm { + fn evm_config(&self) -> &impl ConfigureEvm
{ self.inner.evm_config() } } -impl AddDevSigners for OpEthApi { +impl>> AddDevSigners for OpEthApi { fn with_dev_accounts(&self) { *self.signers().write() = DevSigner::random_signers(20) } diff --git a/crates/optimism/rpc/src/eth/pending_block.rs b/crates/optimism/rpc/src/eth/pending_block.rs index 513e04da95..43542baf52 100644 --- a/crates/optimism/rpc/src/eth/pending_block.rs +++ b/crates/optimism/rpc/src/eth/pending_block.rs @@ -1,10 +1,11 @@ -//! Loads OP pending block for a RPC response. +//! Loads OP pending block for a RPC response. +use alloy_primitives::{BlockNumber, B256}; use reth_chainspec::ChainSpec; use reth_evm::ConfigureEvm; -use reth_node_api::FullNodeComponents; +use reth_node_api::{FullNodeComponents, NodeTypes}; use reth_primitives::{ - revm_primitives::BlockEnv, BlockNumber, Receipt, SealedBlockWithSenders, B256, + revm_primitives::BlockEnv, BlockNumberOrTag, Header, Receipt, SealedBlockWithSenders, }; use reth_provider::{ BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ExecutionOutcome, @@ -22,7 +23,7 @@ use crate::OpEthApi; impl LoadPendingBlock for OpEthApi where Self: SpawnBlocking, - N: FullNodeComponents, + N: FullNodeComponents>, { #[inline] fn provider( @@ -45,7 +46,7 @@ where } #[inline] - fn evm_config(&self) -> &impl ConfigureEvm { + fn evm_config(&self) -> &impl ConfigureEvm
{ self.inner.evm_config() } @@ -58,19 +59,21 @@ where .provider() .latest_header() .map_err(Self::Error::from_eth_err)? - .ok_or_else(|| EthApiError::UnknownBlockNumber)?; + .ok_or(EthApiError::HeaderNotFound(BlockNumberOrTag::Latest.into()))?; + let block_id = latest.hash().into(); let block = self .provider() - .block_with_senders(latest.hash().into(), Default::default()) + .block_with_senders(block_id, Default::default()) .map_err(Self::Error::from_eth_err)? - .ok_or_else(|| EthApiError::UnknownBlockNumber)? + .ok_or(EthApiError::HeaderNotFound(block_id.into()))? .seal(latest.hash()); let receipts = self .provider() - .receipts_by_block(block.hash().into()) + .receipts_by_block(block_id) .map_err(Self::Error::from_eth_err)? - .ok_or_else(|| EthApiError::UnknownBlockNumber)?; + .ok_or(EthApiError::ReceiptsNotFound(block_id.into()))?; + Ok(Some((block, receipts))) } diff --git a/crates/optimism/rpc/src/eth/receipt.rs b/crates/optimism/rpc/src/eth/receipt.rs index a97b1350c8..2cf4fc0a78 100644 --- a/crates/optimism/rpc/src/eth/receipt.rs +++ b/crates/optimism/rpc/src/eth/receipt.rs @@ -1,22 +1,27 @@ //! Loads and formats OP receipt RPC response. -use reth_chainspec::{ChainSpecProvider, OptimismHardforks}; -use reth_node_api::FullNodeComponents; -use reth_primitives::{Receipt, TransactionMeta, TransactionSigned}; +use op_alloy_consensus::{OpDepositReceipt, OpDepositReceiptWithBloom, OpReceiptEnvelope}; +use op_alloy_rpc_types::{ + receipt::L1BlockInfo, OpTransactionReceipt, OptimismTransactionReceiptFields, +}; +use reth_chainspec::{ChainSpec, OptimismHardforks}; +use reth_evm_optimism::RethL1BlockInfo; +use reth_node_api::{FullNodeComponents, NodeTypes}; +use reth_primitives::{Receipt, TransactionMeta, TransactionSigned, TxType}; +use reth_provider::ChainSpecProvider; use reth_rpc_eth_api::{ helpers::{EthApiSpec, LoadReceipt, LoadTransaction}, FromEthApiError, }; use reth_rpc_eth_types::{EthApiError, EthStateCache, ReceiptBuilder}; -use reth_rpc_types::{optimism::OptimismTransactionReceiptFields, AnyTransactionReceipt}; +use reth_rpc_types::{AnyReceiptEnvelope, AnyTransactionReceipt, Log, TransactionReceipt}; -use crate::{OpEthApi, OpEthApiError, OptimismTxMeta}; +use crate::{OpEthApi, OpEthApiError}; impl LoadReceipt for OpEthApi where - Self: EthApiSpec + LoadTransaction, - Self::Error: From, - N: FullNodeComponents, + Self: EthApiSpec + LoadTransaction, + N: FullNodeComponents>, { #[inline] fn cache(&self) -> &EthStateCache { @@ -33,64 +38,378 @@ where .get_block_and_receipts(meta.block_hash) .await .map_err(Self::Error::from_eth_err)? - .ok_or(Self::Error::from_eth_err(EthApiError::UnknownBlockNumber))?; + .ok_or(Self::Error::from_eth_err(EthApiError::HeaderNotFound( + meta.block_hash.into(), + )))?; let block = block.unseal(); - let l1_block_info = reth_evm_optimism::extract_l1_info(&block).ok(); - let mut optimism_tx_meta = self.build_op_tx_meta(&tx, l1_block_info, block.timestamp)?; + let l1_block_info = + reth_evm_optimism::extract_l1_info(&block).map_err(OpEthApiError::from)?; + let mut op_receipt_meta = self + .build_op_receipt_meta(&tx, l1_block_info, &receipt) + .map_err(OpEthApiError::from)?; if self.inner.provider().chain_spec().is_wright_active_at_timestamp(block.timestamp) && tx.effective_gas_price(meta.base_fee) == 0 { - optimism_tx_meta.l1_fee = Some(0); + op_receipt_meta.l1_block_info.l1_fee = Some(0); } - let resp_builder = ReceiptBuilder::new(&tx, meta, &receipt, &receipts) - .map_err(Self::Error::from_eth_err)?; - let resp_builder = op_receipt_fields(resp_builder, &tx, &receipt, optimism_tx_meta); + let receipt_resp = ReceiptBuilder::new(&tx, meta, &receipt, &receipts) + .map_err(Self::Error::from_eth_err)? + .add_other_fields(op_receipt_meta.into()) + .build(); - Ok(resp_builder.build()) + Ok(receipt_resp) + } +} + +impl OpEthApi +where + N: FullNodeComponents>, +{ + /// Builds a receipt w.r.t. chain spec. + pub fn build_op_receipt_meta( + &self, + tx: &TransactionSigned, + l1_block_info: revm::L1BlockInfo, + receipt: &Receipt, + ) -> Result { + Ok(OpReceiptFieldsBuilder::default() + .l1_block_info(&self.inner.provider().chain_spec(), tx, l1_block_info)? + .deposit_nonce(receipt.deposit_nonce) + .deposit_version(receipt.deposit_receipt_version) + .build()) } } -/// Applies OP specific fields to a receipt builder. -pub fn op_receipt_fields( - resp_builder: ReceiptBuilder, - tx: &TransactionSigned, - receipt: &Receipt, - optimism_tx_meta: OptimismTxMeta, -) -> ReceiptBuilder { - let mut op_fields = OptimismTransactionReceiptFields::default(); - - if tx.is_deposit() { - op_fields.deposit_nonce = receipt.deposit_nonce; - op_fields.deposit_receipt_version = receipt.deposit_receipt_version; - } else if let Some(l1_block_info) = optimism_tx_meta.l1_block_info { - // always present - op_fields.l1_fee = optimism_tx_meta.l1_fee; - op_fields.l1_gas_price = Some(l1_block_info.l1_base_fee.saturating_to()); - op_fields.l1_gas_used = optimism_tx_meta.l1_data_gas.map(|dg| { - dg.saturating_add( - l1_block_info.l1_fee_overhead.unwrap_or_default().saturating_to::(), - ) - }); - - // we know if we're __pre__ Ecotone by checking the l1 fee overhead value which is - // None if ecotone is active - if l1_block_info.l1_fee_overhead.is_some() { - // only pre Ecotone - op_fields.l1_fee_scalar = - Some(f64::from(l1_block_info.l1_base_fee_scalar) / 1_000_000.0); - } else { - // base fee scalar is enabled post Ecotone - op_fields.l1_base_fee_scalar = Some(l1_block_info.l1_base_fee_scalar.saturating_to()); +/// L1 fee and data gas for a non-deposit transaction, or deposit nonce and receipt version for a +/// deposit transaction. +#[derive(Debug, Default, Clone)] +pub struct OpReceiptFieldsBuilder { + /// Block timestamp. + pub l1_block_timestamp: u64, + /// The L1 fee for transaction. + pub l1_fee: Option, + /// L1 gas used by transaction. + pub l1_data_gas: Option, + /// L1 fee scalar. + pub l1_fee_scalar: Option, + /* ---------------------------------------- Bedrock ---------------------------------------- */ + /// The base fee of the L1 origin block. + pub l1_base_fee: Option, + /* --------------------------------------- Regolith ---------------------------------------- */ + /// Deposit nonce, if this is a deposit transaction. + pub deposit_nonce: Option, + /* ---------------------------------------- Canyon ----------------------------------------- */ + /// Deposit receipt version, if this is a deposit transaction. + pub deposit_receipt_version: Option, + /* ---------------------------------------- Ecotone ---------------------------------------- */ + /// The current L1 fee scalar. + pub l1_base_fee_scalar: Option, + /// The current L1 blob base fee. + pub l1_blob_base_fee: Option, + /// The current L1 blob base fee scalar. + pub l1_blob_base_fee_scalar: Option, +} + +impl OpReceiptFieldsBuilder { + /// Returns a new builder. + pub fn new(block_timestamp: u64) -> Self { + Self { l1_block_timestamp: block_timestamp, ..Default::default() } + } + + /// Applies [`L1BlockInfo`](revm::L1BlockInfo). + pub fn l1_block_info( + mut self, + chain_spec: &ChainSpec, + tx: &TransactionSigned, + l1_block_info: revm::L1BlockInfo, + ) -> Result { + let raw_tx = tx.envelope_encoded(); + let timestamp = self.l1_block_timestamp; + + self.l1_fee = Some( + l1_block_info + .l1_tx_data_fee(chain_spec, timestamp, &raw_tx, tx.is_deposit()) + .map_err(|_| OpEthApiError::L1BlockFeeError)? + .saturating_to(), + ); + + self.l1_data_gas = Some( + l1_block_info + .l1_data_gas(chain_spec, timestamp, &raw_tx) + .map_err(|_| OpEthApiError::L1BlockGasError)? + .saturating_add(l1_block_info.l1_fee_overhead.unwrap_or_default()) + .saturating_to(), + ); + + self.l1_fee_scalar = (!chain_spec.hardforks.is_ecotone_active_at_timestamp(timestamp)) + .then_some(f64::from(l1_block_info.l1_base_fee_scalar) / 1_000_000.0); + + self.l1_base_fee = Some(l1_block_info.l1_base_fee.saturating_to()); + self.l1_base_fee_scalar = Some(l1_block_info.l1_base_fee_scalar.saturating_to()); + self.l1_blob_base_fee = l1_block_info.l1_blob_base_fee.map(|fee| fee.saturating_to()); + self.l1_blob_base_fee_scalar = + l1_block_info.l1_blob_base_fee_scalar.map(|scalar| scalar.saturating_to()); + + Ok(self) + } + + /// Applies deposit transaction metadata: deposit nonce. + pub const fn deposit_nonce(mut self, nonce: Option) -> Self { + self.deposit_nonce = nonce; + self + } + + /// Applies deposit transaction metadata: deposit receipt version. + pub const fn deposit_version(mut self, version: Option) -> Self { + self.deposit_receipt_version = version; + self + } + + /// Builds the [`OptimismTransactionReceiptFields`] object. + pub const fn build(self) -> OptimismTransactionReceiptFields { + let Self { + l1_block_timestamp: _, // used to compute other fields + l1_fee, + l1_data_gas: l1_gas_used, + l1_fee_scalar, + l1_base_fee: l1_gas_price, + deposit_nonce, + deposit_receipt_version, + l1_base_fee_scalar, + l1_blob_base_fee, + l1_blob_base_fee_scalar, + } = self; + + OptimismTransactionReceiptFields { + l1_block_info: L1BlockInfo { + l1_gas_price, + l1_gas_used, + l1_fee, + l1_fee_scalar, + l1_base_fee_scalar, + l1_blob_base_fee, + l1_blob_base_fee_scalar, + }, + deposit_nonce, + deposit_receipt_version, } + } +} + +/// Builds an [`OpTransactionReceipt`]. +#[derive(Debug)] +pub struct OpReceiptBuilder { + /// Core receipt, has all the fields of an L1 receipt and is the basis for the OP receipt. + pub core_receipt: TransactionReceipt>, + /// Transaction type. + pub tx_type: TxType, + /// Additional OP receipt fields. + pub op_receipt_fields: OptimismTransactionReceiptFields, +} + +impl OpReceiptBuilder { + /// Returns a new builder. + pub fn new( + chain_spec: &ChainSpec, + transaction: &TransactionSigned, + meta: TransactionMeta, + receipt: &Receipt, + all_receipts: &[Receipt], + l1_block_info: revm::L1BlockInfo, + ) -> Result { + let ReceiptBuilder { base: core_receipt, .. } = + ReceiptBuilder::new(transaction, meta, receipt, all_receipts) + .map_err(OpEthApiError::Eth)?; + + let tx_type = transaction.tx_type(); + + let op_receipt_fields = OpReceiptFieldsBuilder::default() + .l1_block_info(chain_spec, transaction, l1_block_info)? + .deposit_nonce(receipt.deposit_nonce) + .deposit_version(receipt.deposit_receipt_version) + .build(); + + Ok(Self { core_receipt, tx_type, op_receipt_fields }) + } - // 4844 post Ecotone - op_fields.l1_blob_base_fee = l1_block_info.l1_blob_base_fee.map(|v| v.saturating_to()); - op_fields.l1_blob_base_fee_scalar = - l1_block_info.l1_blob_base_fee_scalar.map(|v| v.saturating_to()); + /// Builds [`OpTransactionReceipt`] by combing core (l1) receipt fields and additional OP + /// receipt fields. + pub fn build(self) -> OpTransactionReceipt { + let Self { core_receipt, tx_type, op_receipt_fields } = self; + + let OptimismTransactionReceiptFields { + l1_block_info, + deposit_nonce, + deposit_receipt_version, + } = op_receipt_fields; + + let TransactionReceipt { + inner: AnyReceiptEnvelope { inner: receipt_with_bloom, .. }, + transaction_hash, + transaction_index, + block_hash, + block_number, + gas_used, + effective_gas_price, + blob_gas_used, + blob_gas_price, + from, + to, + contract_address, + state_root, + authorization_list, + } = core_receipt; + + let inner = match tx_type { + TxType::Legacy => OpReceiptEnvelope::::Legacy(receipt_with_bloom), + TxType::Eip2930 => OpReceiptEnvelope::::Eip2930(receipt_with_bloom), + TxType::Eip1559 => OpReceiptEnvelope::::Eip1559(receipt_with_bloom), + TxType::Eip4844 => OpReceiptEnvelope::::Eip4844(receipt_with_bloom), + TxType::Eip7702 => { + unimplemented!("not implemented yet for OpReceiptEnvelope") + } + TxType::Deposit => { + OpReceiptEnvelope::::Deposit(OpDepositReceiptWithBloom:: { + receipt: OpDepositReceipt:: { + inner: receipt_with_bloom.receipt, + deposit_nonce, + deposit_receipt_version, + }, + logs_bloom: receipt_with_bloom.logs_bloom, + }) + } + }; + + let inner = TransactionReceipt::> { + inner, + transaction_hash, + transaction_index, + block_hash, + block_number, + gas_used, + effective_gas_price, + blob_gas_used, + blob_gas_price, + from, + to, + contract_address, + state_root, + authorization_list, + }; + + OpTransactionReceipt { inner, l1_block_info } } +} + +#[cfg(test)] +mod test { + use alloy_primitives::hex; + use reth_optimism_chainspec::OP_MAINNET; + use reth_primitives::Block; - resp_builder.add_other_fields(op_fields.into()) + use super::*; + + /// OP Mainnet transaction at index 0 in block 124665056. + /// + /// + const TX_SET_L1_BLOCK_OP_MAINNET_BLOCK_124665056: [u8; 251] = hex!("7ef8f8a0683079df94aa5b9cf86687d739a60a9b4f0835e520ec4d664e2e415dca17a6df94deaddeaddeaddeaddeaddeaddeaddeaddead00019442000000000000000000000000000000000000158080830f424080b8a4440a5e200000146b000f79c500000000000000040000000066d052e700000000013ad8a3000000000000000000000000000000000000000000000000000000003ef1278700000000000000000000000000000000000000000000000000000000000000012fdf87b89884a61e74b322bbcf60386f543bfae7827725efaaf0ab1de2294a590000000000000000000000006887246668a3b87f54deb3b94ba47a6f63f32985"); + + /// OP Mainnet transaction at index 1 in block 124665056. + /// + /// + const TX_1_OP_MAINNET_BLOCK_124665056: [u8; 1176] = hex!("02f904940a8303fba78401d6d2798401db2b6d830493e0943e6f4f7866654c18f536170780344aa8772950b680b904246a761202000000000000000000000000087000a300de7200382b55d40045000000e5d60e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000014000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003a0000000000000000000000000000000000000000000000000000000000000022482ad56cb0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000120000000000000000000000000dc6ff44d5d932cbd77b52e5612ba0529dc6226f1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000044095ea7b300000000000000000000000021c4928109acb0659a88ae5329b5374a3024694c0000000000000000000000000000000000000000000000049b9ca9a6943400000000000000000000000000000000000000000000000000000000000000000000000000000000000021c4928109acb0659a88ae5329b5374a3024694c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000024b6b55f250000000000000000000000000000000000000000000000049b9ca9a694340000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000415ec214a3950bea839a7e6fbb0ba1540ac2076acd50820e2d5ef83d0902cdffb24a47aff7de5190290769c4f0a9c6fabf63012986a0d590b1b571547a8c7050ea1b00000000000000000000000000000000000000000000000000000000000000c080a06db770e6e25a617fe9652f0958bd9bd6e49281a53036906386ed39ec48eadf63a07f47cf51a4a40b4494cf26efc686709a9b03939e20ee27e59682f5faa536667e"); + + /// Timestamp of OP mainnet block 124665056. + /// + /// + const BLOCK_124665056_TIMESTAMP: u64 = 1724928889; + + /// L1 block info for transaction at index 1 in block 124665056. + /// + /// + const TX_META_TX_1_OP_MAINNET_BLOCK_124665056: OptimismTransactionReceiptFields = + OptimismTransactionReceiptFields { + l1_block_info: L1BlockInfo { + l1_gas_price: Some(1055991687), // since bedrock l1 base fee + l1_gas_used: Some(4471), + l1_fee: Some(24681034813), + l1_fee_scalar: None, + l1_base_fee_scalar: Some(5227), + l1_blob_base_fee: Some(1), + l1_blob_base_fee_scalar: Some(1014213), + }, + deposit_nonce: None, + deposit_receipt_version: None, + }; + + #[test] + fn op_receipt_fields_from_block_and_tx() { + // rig + let tx_0 = TransactionSigned::decode_enveloped( + &mut TX_SET_L1_BLOCK_OP_MAINNET_BLOCK_124665056.as_slice(), + ) + .unwrap(); + + let tx_1 = + TransactionSigned::decode_enveloped(&mut TX_1_OP_MAINNET_BLOCK_124665056.as_slice()) + .unwrap(); + + let block = Block { body: [tx_0, tx_1.clone()].to_vec(), ..Default::default() }; + + let l1_block_info = + reth_evm_optimism::extract_l1_info(&block).expect("should extract l1 info"); + + // test + assert!(OP_MAINNET.hardforks.is_fjord_active_at_timestamp(BLOCK_124665056_TIMESTAMP)); + + let receipt_meta = OpReceiptFieldsBuilder::new(BLOCK_124665056_TIMESTAMP) + .l1_block_info(&OP_MAINNET, &tx_1, l1_block_info) + .expect("should parse revm l1 info") + .build(); + + let L1BlockInfo { + l1_gas_price, + l1_gas_used, + l1_fee, + l1_fee_scalar, + l1_base_fee_scalar, + l1_blob_base_fee, + l1_blob_base_fee_scalar, + } = receipt_meta.l1_block_info; + + assert_eq!( + l1_gas_price, TX_META_TX_1_OP_MAINNET_BLOCK_124665056.l1_block_info.l1_gas_price, + "incorrect l1 base fee (former gas price)" + ); + assert_eq!( + l1_gas_used, TX_META_TX_1_OP_MAINNET_BLOCK_124665056.l1_block_info.l1_gas_used, + "incorrect l1 gas used" + ); + assert_eq!( + l1_fee, TX_META_TX_1_OP_MAINNET_BLOCK_124665056.l1_block_info.l1_fee, + "incorrect l1 fee" + ); + assert_eq!( + l1_fee_scalar, TX_META_TX_1_OP_MAINNET_BLOCK_124665056.l1_block_info.l1_fee_scalar, + "incorrect l1 fee scalar" + ); + assert_eq!( + l1_base_fee_scalar, + TX_META_TX_1_OP_MAINNET_BLOCK_124665056.l1_block_info.l1_base_fee_scalar, + "incorrect l1 base fee scalar" + ); + assert_eq!( + l1_blob_base_fee, + TX_META_TX_1_OP_MAINNET_BLOCK_124665056.l1_block_info.l1_blob_base_fee, + "incorrect l1 blob base fee" + ); + assert_eq!( + l1_blob_base_fee_scalar, + TX_META_TX_1_OP_MAINNET_BLOCK_124665056.l1_block_info.l1_blob_base_fee_scalar, + "incorrect l1 blob base fee scalar" + ); + } } diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index 40da80c96a..0a796151bf 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -1,20 +1,16 @@ //! Loads and formats OP transaction RPC response. use alloy_primitives::{Bytes, B256}; - -use reth_evm_optimism::RethL1BlockInfo; use reth_node_api::FullNodeComponents; -use reth_primitives::TransactionSigned; use reth_provider::{BlockReaderIdExt, TransactionsProvider}; use reth_rpc_eth_api::{ - helpers::{EthApiSpec, EthSigner, EthTransactions, LoadTransaction, SpawnBlocking}, - EthApiTypes, FromEthApiError, + helpers::{EthSigner, EthTransactions, LoadTransaction, SpawnBlocking}, + FromEthApiError, }; use reth_rpc_eth_types::{utils::recover_raw_transaction, EthStateCache}; use reth_transaction_pool::{PoolTransaction, TransactionOrigin, TransactionPool}; -use revm::L1BlockInfo; -use crate::{eth::rpc::SequencerClient, OpEthApi, OpEthApiError}; +use crate::{OpEthApi, SequencerClient}; impl EthTransactions for OpEthApi where @@ -76,78 +72,20 @@ where } } -/// L1 fee and data gas for a transaction, along with the L1 block info. -#[derive(Debug, Default, Clone)] -pub struct OptimismTxMeta { - /// The L1 block info. - pub l1_block_info: Option, - /// The L1 fee for the block. - pub l1_fee: Option, - /// The L1 data gas for the block. - pub l1_data_gas: Option, -} - -impl OptimismTxMeta { - /// Creates a new [`OptimismTxMeta`]. - pub const fn new( - l1_block_info: Option, - l1_fee: Option, - l1_data_gas: Option, - ) -> Self { - Self { l1_block_info, l1_fee, l1_data_gas } - } -} - impl OpEthApi where - Self: EthApiSpec + LoadTransaction, - ::Error: From, N: FullNodeComponents, { - /// Builds [`OptimismTxMeta`] object using the provided [`TransactionSigned`], L1 block - /// info and block timestamp. The [`L1BlockInfo`] is used to calculate the l1 fee and l1 data - /// gas for the transaction. If the [`L1BlockInfo`] is not provided, the meta info will be - /// empty. - pub fn build_op_tx_meta( + /// Sets a [`SequencerClient`] for `eth_sendRawTransaction` to forward transactions to. + pub fn set_sequencer_client( &self, - tx: &TransactionSigned, - l1_block_info: Option, - block_timestamp: u64, - ) -> Result::Error> { - let Some(l1_block_info) = l1_block_info else { return Ok(OptimismTxMeta::default()) }; - - let (l1_fee, l1_data_gas) = if tx.is_deposit() { - (None, None) - } else { - let envelope_buf = tx.envelope_encoded(); - - let inner_l1_fee = l1_block_info - .l1_tx_data_fee(&self.chain_spec(), block_timestamp, &envelope_buf, tx.is_deposit()) - .map_err(|_| OpEthApiError::L1BlockFeeError)?; - let inner_l1_data_gas = l1_block_info - .l1_data_gas(&self.chain_spec(), block_timestamp, &envelope_buf) - .map_err(|_| OpEthApiError::L1BlockGasError)?; - ( - Some(inner_l1_fee.saturating_to::()), - Some(inner_l1_data_gas.saturating_to::()), - ) - }; - - Ok(OptimismTxMeta::new(Some(l1_block_info), l1_fee, l1_data_gas)) - } -} - -impl OpEthApi -where - N: FullNodeComponents, -{ - /// Sets a `SequencerClient` for `eth_sendRawTransaction` to forward transactions to. - pub fn set_sequencer_client(&self, sequencer_client: SequencerClient) { - *self.sequencer_client.write() = Some(sequencer_client); + sequencer_client: SequencerClient, + ) -> Result<(), tokio::sync::SetError> { + self.sequencer_client.set(sequencer_client) } - /// Returns the `SequencerClient` if one is set. + /// Returns the [`SequencerClient`] if one is set. pub fn raw_tx_forwarder(&self) -> Option { - self.sequencer_client.read().clone() + self.sequencer_client.get().cloned() } } diff --git a/crates/optimism/rpc/src/lib.rs b/crates/optimism/rpc/src/lib.rs index e70194a899..e8b7340b5d 100644 --- a/crates/optimism/rpc/src/lib.rs +++ b/crates/optimism/rpc/src/lib.rs @@ -12,6 +12,8 @@ pub mod error; pub mod eth; +pub mod sequencer; -pub use error::OpEthApiError; -pub use eth::{receipt::op_receipt_fields, transaction::OptimismTxMeta, OpEthApi}; +pub use error::{OpEthApiError, OptimismInvalidTransactionError, SequencerClientError}; +pub use eth::OpEthApi; +pub use sequencer::SequencerClient; diff --git a/crates/optimism/rpc/src/eth/rpc.rs b/crates/optimism/rpc/src/sequencer.rs similarity index 63% rename from crates/optimism/rpc/src/eth/rpc.rs rename to crates/optimism/rpc/src/sequencer.rs index 02f227cbe9..43a23cb9e1 100644 --- a/crates/optimism/rpc/src/eth/rpc.rs +++ b/crates/optimism/rpc/src/sequencer.rs @@ -1,34 +1,16 @@ //! Helpers for optimism specific RPC implementations. -use std::sync::{atomic::AtomicUsize, Arc}; +use std::sync::{ + atomic::{self, AtomicUsize}, + Arc, +}; -use jsonrpsee_types::error::{ErrorObject, INTERNAL_ERROR_CODE}; +use alloy_primitives::hex; use reqwest::Client; -use reth_rpc_eth_types::error::EthApiError; -use reth_rpc_types::ToRpcError; +use serde_json::json; +use tracing::warn; -/// Error type when interacting with the Sequencer -#[derive(Debug, thiserror::Error)] -pub enum SequencerRpcError { - /// Wrapper around an [`reqwest::Error`]. - #[error(transparent)] - HttpError(#[from] reqwest::Error), - /// Thrown when serializing transaction to forward to sequencer - #[error("invalid sequencer transaction")] - InvalidSequencerTransaction, -} - -impl ToRpcError for SequencerRpcError { - fn to_rpc_error(&self) -> ErrorObject<'static> { - ErrorObject::owned(INTERNAL_ERROR_CODE, self.to_string(), None::) - } -} - -impl From for EthApiError { - fn from(err: SequencerRpcError) -> Self { - Self::other(err) - } -} +use crate::SequencerClientError; /// A client to interact with a Sequencer #[derive(Debug, Clone)] @@ -65,23 +47,23 @@ impl SequencerClient { /// Returns the next id for the request fn next_request_id(&self) -> usize { - self.inner.id.fetch_add(1, std::sync::atomic::Ordering::SeqCst) + self.inner.id.fetch_add(1, atomic::Ordering::SeqCst) } /// Forwards a transaction to the sequencer endpoint. - pub async fn forward_raw_transaction(&self, tx: &[u8]) -> Result<(), SequencerRpcError> { - let body = serde_json::to_string(&serde_json::json!({ + pub async fn forward_raw_transaction(&self, tx: &[u8]) -> Result<(), SequencerClientError> { + let body = serde_json::to_string(&json!({ "jsonrpc": "2.0", "method": "eth_sendRawTransaction", - "params": [format!("0x{}", reth_primitives::hex::encode(tx))], + "params": [format!("0x{}", hex::encode(tx))], "id": self.next_request_id() })) .map_err(|_| { - tracing::warn!( + warn!( target = "rpc::eth", "Failed to serialize transaction for forwarding to sequencer" ); - SequencerRpcError::InvalidSequencerTransaction + SequencerClientError::InvalidSequencerTransaction })?; self.http_client() @@ -91,13 +73,12 @@ impl SequencerClient { .send() .await .inspect_err(|err| { - tracing::warn!( + warn!( target = "rpc::eth", %err, "Failed to forward transaction to sequencer", ); - }) - .map_err(SequencerRpcError::HttpError)?; + })?; Ok(()) } diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index 5df63fe4b7..b565e37fe3 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -27,10 +27,7 @@ use reth_provider::{ use reth_revm::state_change::post_block_withdrawals_balance_increments; use reth_tasks::TaskSpawner; use reth_transaction_pool::TransactionPool; -use revm::{ - primitives::{BlockEnv, CfgEnvWithHandlerCfg}, - Database, State, -}; +use revm::{Database, State}; use std::{ fmt, future::Future, @@ -673,10 +670,6 @@ impl Drop for Cancelled { /// Static config for how to build a payload. #[derive(Clone, Debug)] pub struct PayloadConfig { - /// Pre-configured block environment. - pub initialized_block_env: BlockEnv, - /// Configuration for the environment. - pub initialized_cfg: CfgEnvWithHandlerCfg, /// The parent block. pub parent_block: Arc, /// Block extra data. @@ -699,24 +692,13 @@ where Attributes: PayloadBuilderAttributes, { /// Create new payload config. - pub fn new( + pub const fn new( parent_block: Arc, extra_data: Bytes, attributes: Attributes, chain_spec: Arc, ) -> Self { - // configure evm env based on parent block - let (initialized_cfg, initialized_block_env) = - attributes.cfg_and_block_env(&chain_spec, &parent_block); - - Self { - initialized_block_env, - initialized_cfg, - parent_block, - extra_data, - attributes, - chain_spec, - } + Self { parent_block, extra_data, attributes, chain_spec } } /// Returns the payload id. @@ -746,6 +728,31 @@ pub enum BuildOutcome { Cancelled, } +impl BuildOutcome { + /// Consumes the type and returns the payload if the outcome is `Better`. + pub fn into_payload(self) -> Option { + match self { + Self::Better { payload, .. } => Some(payload), + _ => None, + } + } + + /// Returns true if the outcome is `Better`. + pub const fn is_better(&self) -> bool { + matches!(self, Self::Better { .. }) + } + + /// Returns true if the outcome is `Aborted`. + pub const fn is_aborted(&self) -> bool { + matches!(self, Self::Aborted { .. }) + } + + /// Returns true if the outcome is `Cancelled`. + pub const fn is_cancelled(&self) -> bool { + matches!(self, Self::Cancelled) + } +} + /// A collection of arguments used for building payloads. /// /// This struct encapsulates the essential components and configuration required for the payload @@ -756,6 +763,8 @@ pub struct BuildArguments { /// How to interact with the chain. pub client: Client, /// The transaction pool. + /// + /// Or the type that provides the transactions to build the payload. pub pool: Pool, /// Previously cached disk reads pub cached_reads: CachedReads, @@ -779,6 +788,18 @@ impl BuildArguments Self { Self { client, pool, cached_reads, config, cancel, best_payload } } + + /// Maps the transaction pool to a new type. + pub fn with_pool

(self, pool: P) -> BuildArguments { + BuildArguments { + client: self.client, + pool, + cached_reads: self.cached_reads, + config: self.config, + cancel: self.cancel, + best_payload: self.best_payload, + } + } } /// A trait for building payloads that encapsulate Ethereum transactions. diff --git a/crates/payload/builder/src/events.rs b/crates/payload/builder/src/events.rs index 57e9365e0f..33e6021bf4 100644 --- a/crates/payload/builder/src/events.rs +++ b/crates/payload/builder/src/events.rs @@ -12,41 +12,41 @@ use tracing::debug; /// Payload builder events. #[derive(Clone, Debug)] -pub enum Events { +pub enum Events { /// The payload attributes as /// they are received from the CL through the engine api. - Attributes(Engine::PayloadBuilderAttributes), + Attributes(T::PayloadBuilderAttributes), /// The built payload that has been just built. /// Triggered by the CL whenever it asks for an execution payload. /// This event is only thrown if the CL is a validator. - BuiltPayload(Engine::BuiltPayload), + BuiltPayload(T::BuiltPayload), } /// Represents a receiver for various payload events. #[derive(Debug)] -pub struct PayloadEvents { +pub struct PayloadEvents { /// The receiver for the payload events. - pub receiver: broadcast::Receiver>, + pub receiver: broadcast::Receiver>, } -impl PayloadEvents { +impl PayloadEvents { /// Convert this receiver into a stream of `PayloadEvents`. - pub fn into_stream(self) -> BroadcastStream> { + pub fn into_stream(self) -> BroadcastStream> { BroadcastStream::new(self.receiver) } /// Asynchronously receives the next payload event. - pub async fn recv(self) -> Option, BroadcastStreamRecvError>> { + pub async fn recv(self) -> Option, BroadcastStreamRecvError>> { let mut event_stream = self.into_stream(); event_stream.next().await } /// Returns a new stream that yields all built payloads. - pub fn into_built_payload_stream(self) -> BuiltPayloadStream { + pub fn into_built_payload_stream(self) -> BuiltPayloadStream { BuiltPayloadStream { st: self.into_stream() } } /// Returns a new stream that yields received payload attributes - pub fn into_attributes_stream(self) -> PayloadAttributeStream { + pub fn into_attributes_stream(self) -> PayloadAttributeStream { PayloadAttributeStream { st: self.into_stream() } } } diff --git a/crates/payload/builder/src/lib.rs b/crates/payload/builder/src/lib.rs index 2a29fe916e..fa78e912cf 100644 --- a/crates/payload/builder/src/lib.rs +++ b/crates/payload/builder/src/lib.rs @@ -65,7 +65,7 @@ //! }, //! ..Default::default() //! }; -//! let payload = EthBuiltPayload::new(self.attributes.id, payload.seal_slow(), U256::ZERO); +//! let payload = EthBuiltPayload::new(self.attributes.id, payload.seal_slow(), U256::ZERO, None); //! Ok(payload) //! } //! diff --git a/crates/payload/builder/src/noop.rs b/crates/payload/builder/src/noop.rs index 91ab907321..3fe036cc1b 100644 --- a/crates/payload/builder/src/noop.rs +++ b/crates/payload/builder/src/noop.rs @@ -13,17 +13,17 @@ use tokio_stream::wrappers::UnboundedReceiverStream; /// A service task that does not build any payloads. #[derive(Debug)] -pub struct NoopPayloadBuilderService { +pub struct NoopPayloadBuilderService { /// Receiver half of the command channel. - command_rx: UnboundedReceiverStream>, + command_rx: UnboundedReceiverStream>, } -impl NoopPayloadBuilderService +impl NoopPayloadBuilderService where - Engine: PayloadTypes + 'static, + T: PayloadTypes + 'static, { /// Creates a new [`NoopPayloadBuilderService`]. - pub fn new() -> (Self, PayloadBuilderHandle) { + pub fn new() -> (Self, PayloadBuilderHandle) { let (service_tx, command_rx) = mpsc::unbounded_channel(); ( Self { command_rx: UnboundedReceiverStream::new(command_rx) }, @@ -32,9 +32,9 @@ where } } -impl Future for NoopPayloadBuilderService +impl Future for NoopPayloadBuilderService where - Engine: PayloadTypes, + T: PayloadTypes, { type Output = (); diff --git a/crates/payload/builder/src/service.rs b/crates/payload/builder/src/service.rs index 8946cd5875..7f22cef990 100644 --- a/crates/payload/builder/src/service.rs +++ b/crates/payload/builder/src/service.rs @@ -31,15 +31,15 @@ type PayloadFuture

= Pin { - inner: PayloadBuilderHandle, +pub struct PayloadStore { + inner: PayloadBuilderHandle, } // === impl PayloadStore === -impl PayloadStore +impl PayloadStore where - Engine: PayloadTypes + 'static, + T: PayloadTypes + 'static, { /// Resolves the payload job and returns the best payload that has been built so far. /// @@ -48,7 +48,7 @@ where pub async fn resolve( &self, id: PayloadId, - ) -> Option> { + ) -> Option> { self.inner.resolve(id).await } @@ -58,7 +58,7 @@ where pub async fn best_payload( &self, id: PayloadId, - ) -> Option> { + ) -> Option> { self.inner.best_payload(id).await } @@ -68,25 +68,25 @@ where pub async fn payload_attributes( &self, id: PayloadId, - ) -> Option> { + ) -> Option> { self.inner.payload_attributes(id).await } } -impl Clone for PayloadStore +impl Clone for PayloadStore where - Engine: PayloadTypes, + T: PayloadTypes, { fn clone(&self) -> Self { Self { inner: self.inner.clone() } } } -impl From> for PayloadStore +impl From> for PayloadStore where - Engine: PayloadTypes, + T: PayloadTypes, { - fn from(inner: PayloadBuilderHandle) -> Self { + fn from(inner: PayloadBuilderHandle) -> Self { Self { inner } } } @@ -95,22 +95,22 @@ where /// /// This is the API used to create new payloads and to get the current state of existing ones. #[derive(Debug)] -pub struct PayloadBuilderHandle { +pub struct PayloadBuilderHandle { /// Sender half of the message channel to the [`PayloadBuilderService`]. - to_service: mpsc::UnboundedSender>, + to_service: mpsc::UnboundedSender>, } // === impl PayloadBuilderHandle === -impl PayloadBuilderHandle +impl PayloadBuilderHandle where - Engine: PayloadTypes + 'static, + T: PayloadTypes + 'static, { /// Creates a new payload builder handle for the given channel. /// /// Note: this is only used internally by the [`PayloadBuilderService`] to manage the payload /// building flow See [`PayloadBuilderService::poll`] for implementation details. - pub const fn new(to_service: mpsc::UnboundedSender>) -> Self { + pub const fn new(to_service: mpsc::UnboundedSender>) -> Self { Self { to_service } } @@ -118,10 +118,7 @@ where /// /// Note: depending on the installed [`PayloadJobGenerator`], this may or may not terminate the /// job, See [`PayloadJob::resolve`]. - async fn resolve( - &self, - id: PayloadId, - ) -> Option> { + async fn resolve(&self, id: PayloadId) -> Option> { let (tx, rx) = oneshot::channel(); self.to_service.send(PayloadServiceCommand::Resolve(id, tx)).ok()?; match rx.await.transpose()? { @@ -130,13 +127,27 @@ where } } + /// Sends a message to the service to start building a new payload for the given payload + /// attributes and returns a future that resolves to the payload. + pub async fn send_and_resolve_payload( + &self, + attr: T::PayloadBuilderAttributes, + ) -> Result, PayloadBuilderError> { + let rx = self.send_new_payload(attr); + let id = rx.await??; + + let (tx, rx) = oneshot::channel(); + let _ = self.to_service.send(PayloadServiceCommand::Resolve(id, tx)); + rx.await?.ok_or(PayloadBuilderError::MissingPayload) + } + /// Returns the best payload for the given identifier. /// /// Note: this does not resolve the job if it's still in progress. pub async fn best_payload( &self, id: PayloadId, - ) -> Option> { + ) -> Option> { let (tx, rx) = oneshot::channel(); self.to_service.send(PayloadServiceCommand::BestPayload(id, tx)).ok()?; rx.await.ok()? @@ -148,7 +159,7 @@ where async fn payload_attributes( &self, id: PayloadId, - ) -> Option> { + ) -> Option> { let (tx, rx) = oneshot::channel(); self.to_service.send(PayloadServiceCommand::PayloadAttributes(id, tx)).ok()?; rx.await.ok()? @@ -160,7 +171,7 @@ where /// and returns the receiver instead pub fn send_new_payload( &self, - attr: Engine::PayloadBuilderAttributes, + attr: T::PayloadBuilderAttributes, ) -> oneshot::Receiver> { let (tx, rx) = oneshot::channel(); let _ = self.to_service.send(PayloadServiceCommand::BuildNewPayload(attr, tx)); @@ -174,23 +185,23 @@ where /// Note: if there's already payload in progress with same identifier, it will be returned. pub async fn new_payload( &self, - attr: Engine::PayloadBuilderAttributes, + attr: T::PayloadBuilderAttributes, ) -> Result { self.send_new_payload(attr).await? } /// Sends a message to the service to subscribe to payload events. /// Returns a receiver that will receive them. - pub async fn subscribe(&self) -> Result, RecvError> { + pub async fn subscribe(&self) -> Result, RecvError> { let (tx, rx) = oneshot::channel(); let _ = self.to_service.send(PayloadServiceCommand::Subscribe(tx)); Ok(PayloadEvents { receiver: rx.await? }) } } -impl Clone for PayloadBuilderHandle +impl Clone for PayloadBuilderHandle where - Engine: PayloadTypes, + T: PayloadTypes, { fn clone(&self) -> Self { Self { to_service: self.to_service.clone() } @@ -207,38 +218,38 @@ where /// does know nothing about how to build them, it just drives their jobs to completion. #[derive(Debug)] #[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct PayloadBuilderService +pub struct PayloadBuilderService where - Engine: PayloadTypes, + T: PayloadTypes, Gen: PayloadJobGenerator, - Gen::Job: PayloadJob, + Gen::Job: PayloadJob, { /// The type that knows how to create new payloads. generator: Gen, /// All active payload jobs. payload_jobs: Vec<(Gen::Job, PayloadId)>, /// Copy of the sender half, so new [`PayloadBuilderHandle`] can be created on demand. - service_tx: mpsc::UnboundedSender>, + service_tx: mpsc::UnboundedSender>, /// Receiver half of the command channel. - command_rx: UnboundedReceiverStream>, + command_rx: UnboundedReceiverStream>, /// Metrics for the payload builder service metrics: PayloadBuilderServiceMetrics, /// Chain events notification stream chain_events: St, /// Payload events handler, used to broadcast and subscribe to payload events. - payload_events: broadcast::Sender>, + payload_events: broadcast::Sender>, } const PAYLOAD_EVENTS_BUFFER_SIZE: usize = 20; // === impl PayloadBuilderService === -impl PayloadBuilderService +impl PayloadBuilderService where - Engine: PayloadTypes + 'static, + T: PayloadTypes + 'static, Gen: PayloadJobGenerator, - Gen::Job: PayloadJob, - ::BuiltPayload: Into, + Gen::Job: PayloadJob, + ::BuiltPayload: Into, { /// Creates a new payload builder service and returns the [`PayloadBuilderHandle`] to interact /// with it. @@ -246,7 +257,7 @@ where /// This also takes a stream of chain events that will be forwarded to the generator to apply /// additional logic when new state is committed. See also /// [`PayloadJobGenerator::on_new_state`]. - pub fn new(generator: Gen, chain_events: St) -> (Self, PayloadBuilderHandle) { + pub fn new(generator: Gen, chain_events: St) -> (Self, PayloadBuilderHandle) { let (service_tx, command_rx) = mpsc::unbounded_channel(); let (payload_events, _) = broadcast::channel(PAYLOAD_EVENTS_BUFFER_SIZE); @@ -265,7 +276,7 @@ where } /// Returns a handle to the service. - pub fn handle(&self) -> PayloadBuilderHandle { + pub fn handle(&self) -> PayloadBuilderHandle { PayloadBuilderHandle::new(self.service_tx.clone()) } @@ -275,10 +286,7 @@ where } /// Returns the best payload for the given identifier that has been built so far. - fn best_payload( - &self, - id: PayloadId, - ) -> Option> { + fn best_payload(&self, id: PayloadId) -> Option> { let res = self .payload_jobs .iter() @@ -293,7 +301,7 @@ where /// Returns the best payload for the given identifier that has been built so far and terminates /// the job if requested. - fn resolve(&mut self, id: PayloadId) -> Option> { + fn resolve(&mut self, id: PayloadId) -> Option> { trace!(%id, "resolving payload job"); let job = self.payload_jobs.iter().position(|(_, job_id)| *job_id == id)?; @@ -324,12 +332,12 @@ where } } -impl PayloadBuilderService +impl PayloadBuilderService where - Engine: PayloadTypes, + T: PayloadTypes, Gen: PayloadJobGenerator, - Gen::Job: PayloadJob, - ::BuiltPayload: Into, + Gen::Job: PayloadJob, + ::BuiltPayload: Into, { /// Returns the payload attributes for the given payload. fn payload_attributes( @@ -350,14 +358,14 @@ where } } -impl Future for PayloadBuilderService +impl Future for PayloadBuilderService where - Engine: PayloadTypes + 'static, + T: PayloadTypes + 'static, Gen: PayloadJobGenerator + Unpin + 'static, ::Job: Unpin + 'static, St: Stream + Send + Unpin + 'static, - Gen::Job: PayloadJob, - ::BuiltPayload: Into, + Gen::Job: PayloadJob, + ::BuiltPayload: Into, { type Output = (); @@ -452,31 +460,28 @@ where } /// Message type for the [`PayloadBuilderService`]. -pub enum PayloadServiceCommand { +pub enum PayloadServiceCommand { /// Start building a new payload. BuildNewPayload( - Engine::PayloadBuilderAttributes, + T::PayloadBuilderAttributes, oneshot::Sender>, ), /// Get the best payload so far - BestPayload( - PayloadId, - oneshot::Sender>>, - ), + BestPayload(PayloadId, oneshot::Sender>>), /// Get the payload attributes for the given payload PayloadAttributes( PayloadId, - oneshot::Sender>>, + oneshot::Sender>>, ), /// Resolve the payload and return the payload - Resolve(PayloadId, oneshot::Sender>>), + Resolve(PayloadId, oneshot::Sender>>), /// Payload service events - Subscribe(oneshot::Sender>>), + Subscribe(oneshot::Sender>>), } -impl fmt::Debug for PayloadServiceCommand +impl fmt::Debug for PayloadServiceCommand where - Engine: PayloadTypes, + T: PayloadTypes, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { diff --git a/crates/payload/builder/src/test_utils.rs b/crates/payload/builder/src/test_utils.rs index 62f697ddd6..e4083d3ab3 100644 --- a/crates/payload/builder/src/test_utils.rs +++ b/crates/payload/builder/src/test_utils.rs @@ -15,16 +15,16 @@ use std::{ }; /// Creates a new [`PayloadBuilderService`] for testing purposes. -pub fn test_payload_service() -> ( +pub fn test_payload_service() -> ( PayloadBuilderService< TestPayloadJobGenerator, futures_util::stream::Empty, - Engine, + T, >, - PayloadBuilderHandle, + PayloadBuilderHandle, ) where - Engine: PayloadTypes< + T: PayloadTypes< PayloadBuilderAttributes = EthPayloadBuilderAttributes, BuiltPayload = EthBuiltPayload, > + 'static, @@ -33,9 +33,9 @@ where } /// Creates a new [`PayloadBuilderService`] for testing purposes and spawns it in the background. -pub fn spawn_test_payload_service() -> PayloadBuilderHandle +pub fn spawn_test_payload_service() -> PayloadBuilderHandle where - Engine: PayloadTypes< + T: PayloadTypes< PayloadBuilderAttributes = EthPayloadBuilderAttributes, BuiltPayload = EthBuiltPayload, > + 'static, @@ -82,7 +82,12 @@ impl PayloadJob for TestPayloadJob { type BuiltPayload = EthBuiltPayload; fn best_payload(&self) -> Result { - Ok(EthBuiltPayload::new(self.attr.payload_id(), Block::default().seal_slow(), U256::ZERO)) + Ok(EthBuiltPayload::new( + self.attr.payload_id(), + Block::default().seal_slow(), + U256::ZERO, + None, + )) } fn payload_attributes(&self) -> Result { diff --git a/crates/payload/primitives/src/traits.rs b/crates/payload/primitives/src/traits.rs index 80ae38127e..5c231d1a6c 100644 --- a/crates/payload/primitives/src/traits.rs +++ b/crates/payload/primitives/src/traits.rs @@ -1,19 +1,15 @@ +use crate::{ + validate_version_specific_fields, EngineApiMessageVersion, EngineObjectValidationError, +}; use reth_chain_state::ExecutedBlock; use reth_chainspec::ChainSpec; -use reth_primitives::{ - revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}, - Address, Header, SealedBlock, Withdrawals, B256, U256, -}; +use reth_primitives::{Address, SealedBlock, Withdrawals, B256, U256}; use reth_rpc_types::{ engine::{PayloadAttributes as EthPayloadAttributes, PayloadId}, optimism::OptimismPayloadAttributes, Withdrawal, }; -use crate::{ - validate_version_specific_fields, EngineApiMessageVersion, EngineObjectValidationError, -}; - /// Represents a built payload type that contains a built [`SealedBlock`] and can be converted into /// engine API execution payloads. pub trait BuiltPayload: Send + Sync + std::fmt::Debug { @@ -70,21 +66,6 @@ pub trait PayloadBuilderAttributes: Send + Sync + std::fmt::Debug { /// Returns the withdrawals for the running payload job. fn withdrawals(&self) -> &Withdrawals; - - /// Returns the configured [`CfgEnvWithHandlerCfg`] and [`BlockEnv`] for the targeted payload - /// (that has the `parent` as its parent). - /// - /// The `chain_spec` is used to determine the correct chain id and hardfork for the payload - /// based on its timestamp. - /// - /// Block related settings are derived from the `parent` block and the configured attributes. - /// - /// NOTE: This is only intended for beacon consensus (after merge). - fn cfg_and_block_env( - &self, - chain_spec: &ChainSpec, - parent: &Header, - ) -> (CfgEnvWithHandlerCfg, BlockEnv); } /// The execution payload attribute type the CL node emits via the engine API. diff --git a/crates/primitives-traits/src/account.rs b/crates/primitives-traits/src/account.rs index b2834fcf28..063504b2a0 100644 --- a/crates/primitives-traits/src/account.rs +++ b/crates/primitives-traits/src/account.rs @@ -5,9 +5,24 @@ use byteorder::{BigEndian, ReadBytesExt}; use bytes::Buf; use derive_more::Deref; use reth_codecs::{add_arbitrary_tests, Compact}; -use revm_primitives::{AccountInfo, Bytecode as RevmBytecode, JumpTable}; +use revm_primitives::{AccountInfo, Bytecode as RevmBytecode, BytecodeDecodeError, JumpTable}; use serde::{Deserialize, Serialize}; +/// Identifier for [`LegacyRaw`](RevmBytecode::LegacyRaw). +const LEGACY_RAW_BYTECODE_ID: u8 = 0; + +/// Identifier for removed bytecode variant. +const REMOVED_BYTECODE_ID: u8 = 1; + +/// Identifier for [`LegacyAnalyzed`](RevmBytecode::LegacyAnalyzed). +const LEGACY_ANALYZED_BYTECODE_ID: u8 = 2; + +/// Identifier for [`Eof`](RevmBytecode::Eof). +const EOF_BYTECODE_ID: u8 = 3; + +/// Identifier for [`Eip7702`](RevmBytecode::Eip7702). +const EIP7702_BYTECODE_ID: u8 = 4; + /// An Ethereum account. #[derive(Clone, Copy, Debug, PartialEq, Eq, Default, Serialize, Deserialize, Compact)] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] @@ -59,6 +74,14 @@ impl Bytecode { pub fn new_raw(bytes: Bytes) -> Self { Self(RevmBytecode::new_raw(bytes)) } + + /// Creates a new raw [`revm_primitives::Bytecode`]. + /// + /// Returns an error on incorrect Bytecode format. + #[inline] + pub fn new_raw_checked(bytecode: Bytes) -> Result { + RevmBytecode::new_raw_checked(bytecode).map(Self) + } } impl Compact for Bytecode { @@ -76,23 +99,23 @@ impl Compact for Bytecode { buf.put_slice(bytecode.as_ref()); let len = match &self.0 { RevmBytecode::LegacyRaw(_) => { - buf.put_u8(0); + buf.put_u8(LEGACY_RAW_BYTECODE_ID); 1 } - // `1` has been removed. + // [`REMOVED_BYTECODE_ID`] has been removed. RevmBytecode::LegacyAnalyzed(analyzed) => { - buf.put_u8(2); + buf.put_u8(LEGACY_ANALYZED_BYTECODE_ID); buf.put_u64(analyzed.original_len() as u64); let map = analyzed.jump_table().as_slice(); buf.put_slice(map); 1 + 8 + map.len() } RevmBytecode::Eof(_) => { - buf.put_u8(3); + buf.put_u8(EOF_BYTECODE_ID); 1 } RevmBytecode::Eip7702(_) => { - buf.put_u8(4); + buf.put_u8(EIP7702_BYTECODE_ID); 1 } }; @@ -108,16 +131,18 @@ impl Compact for Bytecode { let bytes = Bytes::from(buf.copy_to_bytes(len as usize)); let variant = buf.read_u8().expect("could not read bytecode variant"); let decoded = match variant { - 0 => Self(RevmBytecode::new_raw(bytes)), - 1 => unreachable!("Junk data in database: checked Bytecode variant was removed"), - 2 => Self(unsafe { + LEGACY_RAW_BYTECODE_ID => Self(RevmBytecode::new_raw(bytes)), + REMOVED_BYTECODE_ID => { + unreachable!("Junk data in database: checked Bytecode variant was removed") + } + LEGACY_ANALYZED_BYTECODE_ID => Self(unsafe { RevmBytecode::new_analyzed( bytes, buf.read_u64::().unwrap() as usize, JumpTable::from_slice(buf), ) }), - 3 | 4 => { + EOF_BYTECODE_ID | EIP7702_BYTECODE_ID => { // EOF and EIP-7702 bytecode objects will be decoded from the raw bytecode Self(RevmBytecode::new_raw(bytes)) } diff --git a/crates/primitives-traits/src/constants/gas_units.rs b/crates/primitives-traits/src/constants/gas_units.rs index ed287e6d29..312ae51cbc 100644 --- a/crates/primitives-traits/src/constants/gas_units.rs +++ b/crates/primitives-traits/src/constants/gas_units.rs @@ -1,7 +1,5 @@ -use core::time::Duration; - -#[cfg(not(feature = "std"))] use alloc::string::String; +use core::time::Duration; /// Represents one Kilogas, or `1_000` gas. pub const KILOGAS: u64 = 1_000; diff --git a/crates/primitives-traits/src/constants/mod.rs b/crates/primitives-traits/src/constants/mod.rs index 9d6ace2e58..fb29114aa0 100644 --- a/crates/primitives-traits/src/constants/mod.rs +++ b/crates/primitives-traits/src/constants/mod.rs @@ -37,8 +37,6 @@ pub const EPOCH_DURATION: Duration = Duration::from_secs(12 * EPOCH_SLOTS); pub const BEACON_NONCE: u64 = 0u64; /// The default Ethereum block gas limit. -// TODO: This should be a chain spec parameter. -/// See . pub const ETHEREUM_BLOCK_GAS_LIMIT: u64 = 30_000_000; /// The minimum tx fee below which the txpool will reject the transaction. @@ -141,6 +139,9 @@ pub const EMPTY_ROOT_HASH: B256 = /// From address from Optimism system txs: `0xdeaddeaddeaddeaddeaddeaddeaddeaddead0001` pub const OP_SYSTEM_TX_FROM_ADDR: Address = address!("deaddeaddeaddeaddeaddeaddeaddeaddead0001"); +/// To address from Optimism system txs: `0x4200000000000000000000000000000000000015` +pub const OP_SYSTEM_TX_TO_ADDR: Address = address!("4200000000000000000000000000000000000015"); + /// Transactions root of empty receipts set. pub const EMPTY_RECEIPTS: B256 = EMPTY_ROOT_HASH; diff --git a/crates/primitives-traits/src/error.rs b/crates/primitives-traits/src/error.rs index 144c210ba9..ef088a920e 100644 --- a/crates/primitives-traits/src/error.rs +++ b/crates/primitives-traits/src/error.rs @@ -1,13 +1,9 @@ +use alloc::boxed::Box; use core::{ fmt, ops::{Deref, DerefMut}, }; -#[cfg(not(feature = "std"))] -use alloc::boxed::Box; -#[cfg(not(feature = "std"))] -extern crate alloc; - /// A pair of values, one of which is expected and one of which is actual. #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct GotExpected { diff --git a/crates/primitives-traits/src/header/mod.rs b/crates/primitives-traits/src/header/mod.rs index ecc1e018b4..7b91b2e5bc 100644 --- a/crates/primitives-traits/src/header/mod.rs +++ b/crates/primitives-traits/src/header/mod.rs @@ -532,3 +532,64 @@ impl<'a> arbitrary::Arbitrary<'a> for Header { )) } } + +/// Trait for extracting specific Ethereum block data from a header +pub trait BlockHeader { + /// Retrieves the beneficiary (miner) of the block + fn beneficiary(&self) -> Address; + + /// Retrieves the difficulty of the block + fn difficulty(&self) -> U256; + + /// Retrieves the block number + fn number(&self) -> BlockNumber; + + /// Retrieves the gas limit of the block + fn gas_limit(&self) -> u64; + + /// Retrieves the timestamp of the block + fn timestamp(&self) -> u64; + + /// Retrieves the mix hash of the block + fn mix_hash(&self) -> B256; + + /// Retrieves the base fee per gas of the block, if available + fn base_fee_per_gas(&self) -> Option; + + /// Retrieves the excess blob gas of the block, if available + fn excess_blob_gas(&self) -> Option; +} + +impl BlockHeader for Header { + fn beneficiary(&self) -> Address { + self.beneficiary + } + + fn difficulty(&self) -> U256 { + self.difficulty + } + + fn number(&self) -> BlockNumber { + self.number + } + + fn gas_limit(&self) -> u64 { + self.gas_limit + } + + fn timestamp(&self) -> u64 { + self.timestamp + } + + fn mix_hash(&self) -> B256 { + self.mix_hash + } + + fn base_fee_per_gas(&self) -> Option { + self.base_fee_per_gas + } + + fn excess_blob_gas(&self) -> Option { + self.excess_blob_gas + } +} diff --git a/crates/primitives-traits/src/header/test_utils.rs b/crates/primitives-traits/src/header/test_utils.rs index 982b779d4b..ef5c0d0253 100644 --- a/crates/primitives-traits/src/header/test_utils.rs +++ b/crates/primitives-traits/src/header/test_utils.rs @@ -20,31 +20,23 @@ pub const fn generate_valid_header( excess_blob_gas: u64, parent_beacon_block_root: B256, ) -> Header { - // EIP-1559 logic + // Clear all related fields if EIP-1559 is inactive if header.base_fee_per_gas.is_none() { - // If EIP-1559 is not active, clear related fields header.withdrawals_root = None; - header.blob_gas_used = None; - header.excess_blob_gas = None; - header.parent_beacon_block_root = None; - } else if header.withdrawals_root.is_none() { - // If EIP-4895 is not active, clear related fields - header.blob_gas_used = None; - header.excess_blob_gas = None; - header.parent_beacon_block_root = None; - } else if eip_4844_active { - // Set fields based on EIP-4844 being active + } + + // Set fields based on EIP-4844 being active + if eip_4844_active { header.blob_gas_used = Some(blob_gas_used); header.excess_blob_gas = Some(excess_blob_gas); header.parent_beacon_block_root = Some(parent_beacon_block_root); } else { - // If EIP-4844 is not active, clear related fields header.blob_gas_used = None; header.excess_blob_gas = None; header.parent_beacon_block_root = None; } - // todo(onbjerg): adjust this for eip-7589 + // Placeholder for future EIP adjustments header.requests_root = None; header diff --git a/crates/primitives-traits/src/integer_list.rs b/crates/primitives-traits/src/integer_list.rs index df68d612aa..767fb3ec30 100644 --- a/crates/primitives-traits/src/integer_list.rs +++ b/crates/primitives-traits/src/integer_list.rs @@ -1,3 +1,4 @@ +use alloc::vec::Vec; use bytes::BufMut; use core::fmt; use derive_more::Deref; @@ -8,9 +9,6 @@ use serde::{ Deserialize, Deserializer, Serialize, Serializer, }; -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; - /// Uses Roaring Bitmaps to hold a list of integers. It provides really good compression with the /// capability to access its elements without decoding it. #[derive(Clone, PartialEq, Default, Deref)] @@ -140,7 +138,7 @@ impl<'a> Arbitrary<'a> for IntegerList { } /// Primitives error type. -#[derive(Debug, derive_more::Display)] +#[derive(Debug, derive_more::Display, derive_more::Error)] pub enum RoaringBitmapError { /// The provided input is invalid. #[display("the provided input is invalid")] @@ -150,9 +148,6 @@ pub enum RoaringBitmapError { FailedToDeserialize, } -#[cfg(feature = "std")] -impl std::error::Error for RoaringBitmapError {} - #[cfg(test)] mod tests { use super::*; diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index b0ae9b8921..7bf46986b8 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -9,7 +9,6 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![cfg_attr(not(feature = "std"), no_std)] -#[cfg(not(feature = "std"))] #[macro_use] extern crate alloc; @@ -50,4 +49,4 @@ pub use blob_sidecar::{BlobSidecar, BlobSidecars}; #[cfg(any(test, feature = "arbitrary", feature = "test-utils"))] pub use header::test_utils; -pub use header::{Header, HeaderError, SealedHeader}; +pub use header::{BlockHeader, Header, HeaderError, SealedHeader}; diff --git a/crates/primitives-traits/src/request.rs b/crates/primitives-traits/src/request.rs index 53e174d147..c08af3fd62 100644 --- a/crates/primitives-traits/src/request.rs +++ b/crates/primitives-traits/src/request.rs @@ -1,5 +1,6 @@ //! EIP-7685 requests. +use alloc::vec::Vec; pub use alloy_consensus::Request; use alloy_eips::eip7685::{Decodable7685, Encodable7685}; use alloy_rlp::{Decodable, Encodable}; @@ -8,9 +9,6 @@ use reth_codecs::{add_arbitrary_tests, Compact}; use revm_primitives::Bytes; use serde::{Deserialize, Serialize}; -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; - /// A list of EIP-7685 requests. #[derive( Debug, diff --git a/crates/primitives-traits/src/withdrawal.rs b/crates/primitives-traits/src/withdrawal.rs index 52e0495fc2..995e60292c 100644 --- a/crates/primitives-traits/src/withdrawal.rs +++ b/crates/primitives-traits/src/withdrawal.rs @@ -1,12 +1,10 @@ //! [EIP-4895](https://eips.ethereum.org/EIPS/eip-4895) Withdrawal types. +use alloc::vec::Vec; use alloy_rlp::{RlpDecodableWrapper, RlpEncodableWrapper}; use derive_more::{AsRef, Deref, DerefMut, From, IntoIterator}; use reth_codecs::{add_arbitrary_tests, Compact}; -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; - /// Re-export from `alloy_eips`. #[doc(inline)] pub use alloy_eips::eip4895::Withdrawal; diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index c2dc17aace..ff7bba7831 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -25,16 +25,19 @@ reth-optimism-chainspec = { workspace = true, optional = true } reth-bsc-chainspec = { workspace = true, optional = true } # ethereum +alloy-consensus.workspace = true alloy-primitives = { workspace = true, features = ["rand", "rlp"] } alloy-rlp = { workspace = true, features = ["arrayvec"] } alloy-rpc-types = { workspace = true, optional = true } alloy-serde = { workspace = true, optional = true } alloy-genesis.workspace = true alloy-eips = { workspace = true, features = ["serde"] } -alloy-consensus.workspace = true # optimism op-alloy-rpc-types = { workspace = true, optional = true } +op-alloy-consensus = { workspace = true, features = [ + "arbitrary", +], optional = true } # crypto secp256k1 = { workspace = true, features = [ @@ -53,8 +56,6 @@ modular-bitfield = { workspace = true, optional = true } rayon.workspace = true serde.workspace = true once_cell.workspace = true -tempfile = { workspace = true, optional = true } -thiserror = { workspace = true, optional = true } zstd = { workspace = true, features = ["experimental"], optional = true } # arbitrary utils @@ -65,6 +66,7 @@ proptest = { workspace = true, optional = true } alloy-chains = { workspace = true, optional = true } include_dir = { version = "0.7.4", optional = true } lazy_static = { version = "1.4.0", optional = true } +thiserror = { workspace = true, optional = true } [dev-dependencies] # eth @@ -92,7 +94,7 @@ pprof = { workspace = true, features = [ [features] default = ["c-kzg", "alloy-compat", "std", "reth-codec", "secp256k1"] -std = ["dep:thiserror", "reth-primitives-traits/std"] +std = ["reth-primitives-traits/std"] reth-codec = ["dep:reth-codecs", "dep:zstd", "dep:modular-bitfield", "std"] asm-keccak = ["alloy-primitives/asm-keccak"] arbitrary = [ @@ -106,12 +108,25 @@ arbitrary = [ "reth-codec", ] secp256k1 = ["dep:secp256k1"] -c-kzg = ["dep:c-kzg", "revm-primitives/c-kzg", "dep:tempfile", "alloy-eips/kzg"] +c-kzg = [ + "dep:c-kzg", + "revm-primitives/c-kzg", + "alloy-eips/kzg", + "alloy-consensus/kzg", +] optimism = [ "reth-chainspec/optimism", "reth-ethereum-forks/optimism", "revm-primitives/optimism", + "reth-codecs?/optimism", "dep:reth-optimism-chainspec", + "dep:op-alloy-consensus", +] +alloy-compat = [ + "reth-primitives-traits/alloy-compat", + "dep:alloy-rpc-types", + "dep:alloy-serde", + "dep:op-alloy-rpc-types", ] opbnb = [ "reth-chainspec/opbnb", @@ -128,11 +143,10 @@ bsc = [ "dep:include_dir", "dep:lazy_static", "dep:revm", - "dep:thiserror", "dep:reth-bsc-chainspec", + "dep:thiserror", ] -alloy-compat = ["reth-primitives-traits/alloy-compat", "dep:alloy-rpc-types", "dep:alloy-serde", "dep:op-alloy-rpc-types"] test-utils = ["reth-primitives-traits/test-utils"] [[bench]] @@ -143,4 +157,3 @@ harness = false name = "validate_blob_tx" required-features = ["arbitrary", "c-kzg"] harness = false - diff --git a/crates/primitives/src/alloy_compat.rs b/crates/primitives/src/alloy_compat.rs index 6e614769cc..6e21cc92ef 100644 --- a/crates/primitives/src/alloy_compat.rs +++ b/crates/primitives/src/alloy_compat.rs @@ -5,14 +5,12 @@ use crate::{ TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, TxEip1559, TxEip2930, TxEip4844, TxLegacy, TxType, }; +use alloc::{string::ToString, vec::Vec}; use alloy_primitives::TxKind; use alloy_rlp::Error as RlpError; use alloy_serde::WithOtherFields; use op_alloy_rpc_types as _; -#[cfg(not(feature = "std"))] -use alloc::{string::ToString, vec::Vec}; - impl TryFrom>> for Block { type Error = alloy_rpc_types::ConversionError; @@ -103,10 +101,7 @@ impl TryFrom> for Transaction { chain_id, nonce: tx.nonce, gas_price: tx.gas_price.ok_or(ConversionError::MissingGasPrice)?, - gas_limit: tx - .gas - .try_into() - .map_err(|_| ConversionError::Eip2718Error(RlpError::Overflow.into()))?, + gas_limit: tx.gas, to: tx.to.map_or(TxKind::Create, TxKind::Call), value: tx.value, input: tx.input, @@ -117,10 +112,7 @@ impl TryFrom> for Transaction { Ok(Self::Eip2930(TxEip2930 { chain_id: tx.chain_id.ok_or(ConversionError::MissingChainId)?, nonce: tx.nonce, - gas_limit: tx - .gas - .try_into() - .map_err(|_| ConversionError::Eip2718Error(RlpError::Overflow.into()))?, + gas_limit: tx.gas, to: tx.to.map_or(TxKind::Create, TxKind::Call), value: tx.value, input: tx.input, @@ -139,10 +131,7 @@ impl TryFrom> for Transaction { max_fee_per_gas: tx .max_fee_per_gas .ok_or(ConversionError::MissingMaxFeePerGas)?, - gas_limit: tx - .gas - .try_into() - .map_err(|_| ConversionError::Eip2718Error(RlpError::Overflow.into()))?, + gas_limit: tx.gas, to: tx.to.map_or(TxKind::Create, TxKind::Call), value: tx.value, access_list: tx.access_list.ok_or(ConversionError::MissingAccessList)?, @@ -160,11 +149,7 @@ impl TryFrom> for Transaction { max_fee_per_gas: tx .max_fee_per_gas .ok_or(ConversionError::MissingMaxFeePerGas)?, - gas_limit: tx - .gas - .try_into() - .map_err(|_| ConversionError::Eip2718Error(RlpError::Overflow.into()))?, - placeholder: tx.to.map(drop), + gas_limit: tx.gas, to: tx.to.unwrap_or_default(), value: tx.value, access_list: tx.access_list.ok_or(ConversionError::MissingAccessList)?, @@ -218,10 +203,7 @@ impl TryFrom> for Transaction { to: TxKind::from(tx.to), mint: fields.mint.filter(|n| *n != 0), value: tx.value, - gas_limit: tx - .gas - .try_into() - .map_err(|_| ConversionError::Eip2718Error(RlpError::Overflow.into()))?, + gas_limit: tx.gas, is_system_transaction: fields.is_system_tx.unwrap_or(false), input: tx.input, })) diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 04579fd14f..a5771a22a1 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -2,6 +2,7 @@ use crate::{ Address, BlobSidecars, Bytes, GotExpected, Header, Requests, SealedHeader, TransactionSigned, TransactionSignedEcRecovered, Withdrawals, B256, }; +use alloc::vec::Vec; pub use alloy_eips::eip1898::{ BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, ForkBlock, RpcBlockHash, }; @@ -13,9 +14,6 @@ use proptest::prelude::prop_compose; pub use reth_primitives_traits::test_utils::{generate_valid_header, valid_header_strategy}; use serde::{Deserialize, Serialize}; -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; - // HACK(onbjerg): we need this to always set `requests` to `None` since we might otherwise generate // a block with `None` withdrawals and `Some` requests, in which case we end up trying to decode the // requests as withdrawals @@ -823,7 +821,7 @@ mod tests { let bytes = hex!("f90288f90218a0fe21bb173f43067a9f90cfc59bbb6830a7a2929b5de4a61f372a9db28e87f9aea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a061effbbcca94f0d3e02e5bd22e986ad57142acabf0cb3d129a6ad8d0f8752e94a0d911c25e97e27898680d242b7780b6faef30995c355a2d5de92e6b9a7212ad3aa0056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008003834c4b408252081e80a00000000000000000000000000000000000000000000000000000000000000000880000000000000000842806be9da056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421f869f86702842806be9e82520894658bdf435d810c91414ec09147daa6db624063798203e880820a95a040ce7918eeb045ebf8c8b1887ca139d076bda00fa828a07881d442a72626c42da0156576a68e456e295e4c9cf67cf9f53151f329438916e0f24fc69d6bbb7fbacfc0c0"); let bytes_buf = &mut bytes.as_ref(); let block = Block::decode(bytes_buf).unwrap(); - let mut encoded_buf = Vec::new(); + let mut encoded_buf = Vec::with_capacity(bytes.len()); block.encode(&mut encoded_buf); assert_eq!(bytes[..], encoded_buf); } diff --git a/crates/primitives/src/compression/mod.rs b/crates/primitives/src/compression/mod.rs index f7af0acbe4..476f5d06b2 100644 --- a/crates/primitives/src/compression/mod.rs +++ b/crates/primitives/src/compression/mod.rs @@ -1,8 +1,7 @@ -use std::{cell::RefCell, thread_local}; -use zstd::bulk::{Compressor, Decompressor}; - -#[cfg(not(feature = "std"))] use alloc::vec::Vec; +use core::cell::RefCell; +use std::thread_local; +use zstd::bulk::{Compressor, Decompressor}; /// Compression/Decompression dictionary for `Receipt`. pub static RECEIPT_DICTIONARY: &[u8] = include_bytes!("./receipt_dictionary.bin"); diff --git a/crates/primitives/src/constants/eip4844.rs b/crates/primitives/src/constants/eip4844.rs index 59bfd5e769..14e892adfb 100644 --- a/crates/primitives/src/constants/eip4844.rs +++ b/crates/primitives/src/constants/eip4844.rs @@ -1,45 +1,7 @@ //! [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844#parameters) protocol constants and utils for shard Blob Transactions. -#[cfg(all(feature = "c-kzg", feature = "std"))] -pub use trusted_setup::*; pub use alloy_eips::eip4844::{ BLOB_GASPRICE_UPDATE_FRACTION, BLOB_TX_MIN_BLOB_GASPRICE, DATA_GAS_PER_BLOB, FIELD_ELEMENTS_PER_BLOB, FIELD_ELEMENT_BYTES, MAX_BLOBS_PER_BLOCK, MAX_DATA_GAS_PER_BLOCK, TARGET_BLOBS_PER_BLOCK, TARGET_DATA_GAS_PER_BLOCK, VERSIONED_HASH_VERSION_KZG, }; - -// These 2 to silence unused -#[cfg(all(feature = "c-kzg", not(feature = "std")))] -use tempfile as _; -#[cfg(all(not(feature = "c-kzg"), feature = "std"))] -use thiserror as _; - -#[cfg(all(feature = "c-kzg", feature = "std"))] -mod trusted_setup { - use crate::kzg::KzgSettings; - use std::io::Write; - - /// Loads the trusted setup parameters from the given bytes and returns the [`KzgSettings`]. - /// - /// This creates a temp file to store the bytes and then loads the [`KzgSettings`] from the file - /// via [`KzgSettings::load_trusted_setup_file`]. - pub fn load_trusted_setup_from_bytes( - bytes: &[u8], - ) -> Result { - let mut file = tempfile::NamedTempFile::new().map_err(LoadKzgSettingsError::TempFileErr)?; - file.write_all(bytes).map_err(LoadKzgSettingsError::TempFileErr)?; - KzgSettings::load_trusted_setup_file(file.path()).map_err(LoadKzgSettingsError::KzgError) - } - - /// Error type for loading the trusted setup. - #[derive(Debug, thiserror::Error)] - pub enum LoadKzgSettingsError { - /// Failed to create temp file to store bytes for loading [`KzgSettings`] via - /// [`KzgSettings::load_trusted_setup_file`]. - #[error("failed to setup temp file: {0}")] - TempFileErr(#[from] std::io::Error), - /// Kzg error - #[error("KZG error: {0:?}")] - KzgError(#[from] c_kzg::Error), - } -} diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 2469fddcfd..248c9c8907 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -19,7 +19,6 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![cfg_attr(not(feature = "std"), no_std)] -#[cfg(not(feature = "std"))] extern crate alloc; #[cfg(feature = "alloy-compat")] diff --git a/crates/primitives/src/proofs.rs b/crates/primitives/src/proofs.rs index ab57be8ffd..c7f34c2f2d 100644 --- a/crates/primitives/src/proofs.rs +++ b/crates/primitives/src/proofs.rs @@ -4,12 +4,10 @@ use crate::{ constants::EMPTY_OMMER_ROOT_HASH, keccak256, Header, Receipt, ReceiptWithBloom, ReceiptWithBloomRef, Request, TransactionSigned, Withdrawal, B256, }; +use alloc::vec::Vec; use alloy_eips::eip7685::Encodable7685; use reth_trie_common::root::{ordered_trie_root, ordered_trie_root_with_encoder}; -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; - /// Calculate a transaction root. /// /// `(rlp(index), encoded(tx))` pairs. @@ -37,39 +35,6 @@ pub fn calculate_requests_root(requests: &[Request]) -> B256 { ordered_trie_root_with_encoder(requests, |item, buf| item.encode_7685(buf)) } -/// Calculates the receipt root for a header. -#[cfg(feature = "optimism")] -pub fn calculate_receipt_root_optimism( - receipts: &[ReceiptWithBloom], - chain_spec: &reth_chainspec::ChainSpec, - timestamp: u64, -) -> B256 { - // There is a minor bug in op-geth and op-erigon where in the Regolith hardfork, - // the receipt root calculation does not include the deposit nonce in the receipt - // encoding. In the Regolith Hardfork, we must strip the deposit nonce from the - // receipts before calculating the receipt root. This was corrected in the Canyon - // hardfork. - if chain_spec.is_fork_active_at_timestamp(reth_chainspec::OptimismHardfork::Regolith, timestamp) && - !chain_spec - .is_fork_active_at_timestamp(reth_chainspec::OptimismHardfork::Canyon, timestamp) - { - let receipts = receipts - .iter() - .cloned() - .map(|mut r| { - r.receipt.deposit_nonce = None; - r - }) - .collect::>(); - - return ordered_trie_root_with_encoder(receipts.as_slice(), |r, buf| { - r.encode_inner(buf, false) - }) - } - - ordered_trie_root_with_encoder(receipts, |r, buf| r.encode_inner(buf, false)) -} - /// Calculates the receipt root for a header. pub fn calculate_receipt_root_ref(receipts: &[ReceiptWithBloomRef<'_>]) -> B256 { ordered_trie_root_with_encoder(receipts, |r, buf| r.encode_inner(buf, false)) @@ -86,7 +51,7 @@ pub fn calculate_receipt_root_no_memo(receipts: &[&Receipt]) -> B256 { /// Calculates the receipt root for a header for the reference type of [Receipt]. /// -/// NOTE: Prefer [`calculate_receipt_root_optimism`] if you have log blooms memoized. +/// NOTE: Prefer calculate receipt root optimism if you have log blooms memoized. #[cfg(feature = "optimism")] pub fn calculate_receipt_root_no_memo_optimism( receipts: &[&Receipt], @@ -136,14 +101,19 @@ pub fn calculate_ommers_root(ommers: &[Header]) -> B256 { #[cfg(test)] mod tests { use super::*; - use crate::{bloom, constants::EMPTY_ROOT_HASH, hex_literal::hex, Block, Log, TxType, U256}; + use crate::{constants::EMPTY_ROOT_HASH, hex_literal::hex, Block, U256}; use alloy_genesis::GenesisAccount; - use alloy_primitives::{b256, Address, LogData}; + use alloy_primitives::{b256, Address}; use alloy_rlp::Decodable; use reth_chainspec::{HOLESKY, MAINNET, SEPOLIA}; use reth_trie_common::root::{state_root_ref_unhashed, state_root_unhashed}; use std::collections::HashMap; + #[cfg(not(feature = "optimism"))] + use crate::TxType; + #[cfg(not(feature = "optimism"))] + use alloy_primitives::{bloom, Log, LogData}; + #[test] fn check_transaction_root() { let data = &hex!("f90262f901f9a092230ce5476ae868e98c7979cfc165a93f8b6ad1922acf2df62e340916efd49da01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa02307107a867056ca33b5087e77c4174f47625e48fb49f1c70ced34890ddd88f3a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba0c598f69a5674cae9337261b669970e24abc0b46e6d284372a239ec8ccbf20b0ab901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8618203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0"); @@ -154,297 +124,6 @@ mod tests { assert_eq!(block.transactions_root, tx_root, "Must be the same"); } - /// Tests that the receipt root is computed correctly for the regolith block. - /// This was implemented due to a minor bug in op-geth and op-erigon where in - /// the Regolith hardfork, the receipt root calculation does not include the - /// deposit nonce in the receipt encoding. - /// To fix this an op-reth patch was applied to the receipt root calculation - /// to strip the deposit nonce from each receipt before calculating the root. - #[cfg(feature = "optimism")] - #[test] - fn check_optimism_receipt_root() { - use crate::{Bloom, Bytes, BASE_SEPOLIA}; - - let cases = [ - // Deposit nonces didn't exist in Bedrock; No need to strip. For the purposes of this - // test, we do have them, so we should get the same root as Canyon. - ( - "bedrock", - 1679079599, - b256!("e255fed45eae7ede0556fe4fabc77b0d294d18781a5a581cab09127bc4cd9ffb"), - ), - // Deposit nonces introduced in Regolith. They weren't included in the receipt RLP, - // so we need to strip them - the receipt root will differ. - ( - "regolith", - 1679079600, - b256!("e255fed45eae7ede0556fe4fabc77b0d294d18781a5a581cab09127bc4cd9ffb"), - ), - // Receipt root hashing bug fixed in Canyon. Back to including the deposit nonce - // in the receipt RLP when computing the receipt root. - ( - "canyon", - 1699981200, - b256!("6eefbb5efb95235476654a8bfbf8cb64a4f5f0b0c80b700b0c5964550beee6d7"), - ), - ]; - - for case in cases { - let receipts = vec![ - // 0xb0d6ee650637911394396d81172bd1c637d568ed1fbddab0daddfca399c58b53 - ReceiptWithBloom { - receipt: Receipt { - tx_type: TxType::Deposit, - success: true, - cumulative_gas_used: 46913, - logs: vec![], - #[cfg(feature = "optimism")] - deposit_nonce: Some(4012991u64), - #[cfg(feature = "optimism")] - deposit_receipt_version: None, - }, - bloom: Bloom(hex!("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").into()), - }, - // 0x2f433586bae30573c393adfa02bc81d2a1888a3d6c9869f473fb57245166bd9a - ReceiptWithBloom { - receipt: Receipt { - tx_type: TxType::Eip1559, - success: true, - cumulative_gas_used: 118083, - logs: vec![ - Log { - address: hex!("ddb6dcce6b794415145eb5caa6cd335aeda9c272").into(), - data: LogData::new_unchecked( - vec![ - b256!("c3d58168c5ae7397731d063d5bbf3d657854427343f4c083240f7aacaa2d0f62"), - b256!("000000000000000000000000c498902843af527e674846bb7edefa8ad62b8fb9"), - b256!("000000000000000000000000c498902843af527e674846bb7edefa8ad62b8fb9"), - b256!("0000000000000000000000000000000000000000000000000000000000000000"), - ], - Bytes::from_static(&hex!("00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001")) - ) - }, - Log { - address: hex!("ddb6dcce6b794415145eb5caa6cd335aeda9c272").into(), - data: LogData::new_unchecked( - vec![ - b256!("c3d58168c5ae7397731d063d5bbf3d657854427343f4c083240f7aacaa2d0f62"), - b256!("000000000000000000000000c498902843af527e674846bb7edefa8ad62b8fb9"), - b256!("0000000000000000000000000000000000000000000000000000000000000000"), - b256!("000000000000000000000000c498902843af527e674846bb7edefa8ad62b8fb9"), - ], - Bytes::from_static(&hex!("00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000001")) - ) - }, - Log { - address: hex!("ddb6dcce6b794415145eb5caa6cd335aeda9c272").into(), - data: LogData::new_unchecked( - vec![ - b256!("0eb774bb9698a73583fe07b6972cf2dcc08d1d97581a22861f45feb86b395820"), - b256!("000000000000000000000000c498902843af527e674846bb7edefa8ad62b8fb9"), - b256!("000000000000000000000000c498902843af527e674846bb7edefa8ad62b8fb9"), - ], Bytes::from_static(&hex!("0000000000000000000000000000000000000000000000000000000000000003"))) - }, - ], - #[cfg(feature = "optimism")] - deposit_nonce: None, - #[cfg(feature = "optimism")] - deposit_receipt_version: None, - }, - bloom: Bloom(hex!("00001000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000800000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000040000000000004000000000080000000000000000000000000000000000000000000000000000008000000000000080020000000000000000000000000002000000000000000000000000000080000010000").into()), - }, - // 0x6c33676e8f6077f46a62eabab70bc6d1b1b18a624b0739086d77093a1ecf8266 - ReceiptWithBloom { - receipt: Receipt { - tx_type: TxType::Eip1559, - success: true, - cumulative_gas_used: 189253, - logs: vec![ - Log { - address: hex!("ddb6dcce6b794415145eb5caa6cd335aeda9c272").into(), - data: LogData::new_unchecked(vec![ - b256!("c3d58168c5ae7397731d063d5bbf3d657854427343f4c083240f7aacaa2d0f62"), - b256!("0000000000000000000000009d521a04bee134ff8136d2ec957e5bc8c50394ec"), - b256!("0000000000000000000000009d521a04bee134ff8136d2ec957e5bc8c50394ec"), - b256!("0000000000000000000000000000000000000000000000000000000000000000"), - ], - Bytes::from_static(&hex!("00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001"))) - }, - Log { - address: hex!("ddb6dcce6b794415145eb5caa6cd335aeda9c272").into(), - data: LogData::new_unchecked(vec![ - b256!("c3d58168c5ae7397731d063d5bbf3d657854427343f4c083240f7aacaa2d0f62"), - b256!("0000000000000000000000009d521a04bee134ff8136d2ec957e5bc8c50394ec"), - b256!("0000000000000000000000000000000000000000000000000000000000000000"), - b256!("0000000000000000000000009d521a04bee134ff8136d2ec957e5bc8c50394ec"), - ], - Bytes::from_static(&hex!("00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000001"))) - }, - Log { - address: hex!("ddb6dcce6b794415145eb5caa6cd335aeda9c272").into(), - data: LogData::new_unchecked(vec![ - b256!("0eb774bb9698a73583fe07b6972cf2dcc08d1d97581a22861f45feb86b395820"), - b256!("0000000000000000000000009d521a04bee134ff8136d2ec957e5bc8c50394ec"), - b256!("0000000000000000000000009d521a04bee134ff8136d2ec957e5bc8c50394ec"), - ], - Bytes::from_static(&hex!("0000000000000000000000000000000000000000000000000000000000000003"))) - }, - ], - #[cfg(feature = "optimism")] - deposit_nonce: None, - #[cfg(feature = "optimism")] - deposit_receipt_version: None, - }, - bloom: Bloom(hex!("00000000000000000000200000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000002000000000020000000000000000000000000000000000000000000000000000000000000000020000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000040000000000004000000000080000000000000000000000000000000000000000000000000000008000000000000080020000000000000000000000000002000000000000000000000000000080000000000").into()), - }, - // 0x4d3ecbef04ba7ce7f5ab55be0c61978ca97c117d7da448ed9771d4ff0c720a3f - ReceiptWithBloom { - receipt: Receipt { - tx_type: TxType::Eip1559, - success: true, - cumulative_gas_used: 346969, - logs: vec![ - Log { - address: hex!("4200000000000000000000000000000000000006").into(), - data: LogData::new_unchecked( vec![ - b256!("ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef"), - b256!("000000000000000000000000c3feb4ef4c2a5af77add15c95bd98f6b43640cc8"), - b256!("0000000000000000000000002992607c1614484fe6d865088e5c048f0650afd4"), - ], - Bytes::from_static(&hex!("0000000000000000000000000000000000000000000000000018de76816d8000"))) - }, - Log { - address: hex!("cf8e7e6b26f407dee615fc4db18bf829e7aa8c09").into(), - data: LogData::new_unchecked( vec![ - b256!("ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef"), - b256!("0000000000000000000000002992607c1614484fe6d865088e5c048f0650afd4"), - b256!("0000000000000000000000008dbffe4c8bf3caf5deae3a99b50cfcf3648cbc09"), - ], - Bytes::from_static(&hex!("000000000000000000000000000000000000000000000002d24d8e9ac1aa79e2"))) - }, - Log { - address: hex!("2992607c1614484fe6d865088e5c048f0650afd4").into(), - data: LogData::new_unchecked( vec![ - b256!("1c411e9a96e071241c2f21f7726b17ae89e3cab4c78be50e062b03a9fffbbad1"), - ], - Bytes::from_static(&hex!("000000000000000000000000000000000000000000000009bd50642785c15736000000000000000000000000000000000000000000011bb7ac324f724a29bbbf"))) - }, - Log { - address: hex!("2992607c1614484fe6d865088e5c048f0650afd4").into(), - data: LogData::new_unchecked( vec![ - b256!("d78ad95fa46c994b6551d0da85fc275fe613ce37657fb8d5e3d130840159d822"), - b256!("00000000000000000000000029843613c7211d014f5dd5718cf32bcd314914cb"), - b256!("0000000000000000000000008dbffe4c8bf3caf5deae3a99b50cfcf3648cbc09"), - ], - Bytes::from_static(&hex!("0000000000000000000000000000000000000000000000000018de76816d800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002d24d8e9ac1aa79e2"))) - }, - Log { - address: hex!("6d0f8d488b669aa9ba2d0f0b7b75a88bf5051cd3").into(), - data: LogData::new_unchecked( vec![ - b256!("ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef"), - b256!("0000000000000000000000008dbffe4c8bf3caf5deae3a99b50cfcf3648cbc09"), - b256!("000000000000000000000000c3feb4ef4c2a5af77add15c95bd98f6b43640cc8"), - ], - Bytes::from_static(&hex!("00000000000000000000000000000000000000000000000014bc73062aea8093"))) - }, - Log { - address: hex!("8dbffe4c8bf3caf5deae3a99b50cfcf3648cbc09").into(), - data: LogData::new_unchecked( vec![ - b256!("1c411e9a96e071241c2f21f7726b17ae89e3cab4c78be50e062b03a9fffbbad1"), - ], - Bytes::from_static(&hex!("00000000000000000000000000000000000000000000002f122cfadc1ca82a35000000000000000000000000000000000000000000000665879dc0609945d6d1"))) - }, - Log { - address: hex!("8dbffe4c8bf3caf5deae3a99b50cfcf3648cbc09").into(), - data: LogData::new_unchecked( vec![ - b256!("d78ad95fa46c994b6551d0da85fc275fe613ce37657fb8d5e3d130840159d822"), - b256!("00000000000000000000000029843613c7211d014f5dd5718cf32bcd314914cb"), - b256!("000000000000000000000000c3feb4ef4c2a5af77add15c95bd98f6b43640cc8"), - ], - Bytes::from_static(&hex!("0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002d24d8e9ac1aa79e200000000000000000000000000000000000000000000000014bc73062aea80930000000000000000000000000000000000000000000000000000000000000000"))) - }, - ], - #[cfg(feature = "optimism")] - deposit_nonce: None, - #[cfg(feature = "optimism")] - deposit_receipt_version: None, - }, - bloom: Bloom(hex!("00200000000000000000000080000000000000000000000000040000100004000000000000000000000000100000000000000000000000000000100000000000000000000000000002000008000000200000000200000000020000000000000040000000000000000400000200000000000000000000000000000010000000000400000000010400000000000000000000000000002000c80000004080002000000000000000400200000000800000000000000000000000000000000000000000000002000000000000000000000000000000000100001000000000000000000000002000000000000000000000010000000000000000000000800000800000").into()), - }, - // 0xf738af5eb00ba23dbc1be2dbce41dbc0180f0085b7fb46646e90bf737af90351 - ReceiptWithBloom { - receipt: Receipt { - tx_type: TxType::Eip1559, - success: true, - cumulative_gas_used: 623249, - logs: vec![ - Log { - address: hex!("ac6564f3718837caadd42eed742d75c12b90a052").into(), - data: LogData::new_unchecked( vec![ - b256!("ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef"), - b256!("0000000000000000000000000000000000000000000000000000000000000000"), - b256!("000000000000000000000000a4fa7f3fbf0677f254ebdb1646146864c305b76e"), - b256!("000000000000000000000000000000000000000000000000000000000011a1d3"), - ], - Default::default()) - }, - Log { - address: hex!("ac6564f3718837caadd42eed742d75c12b90a052").into(), - data: LogData::new_unchecked( vec![ - b256!("9d89e36eadf856db0ad9ffb5a569e07f95634dddd9501141ecf04820484ad0dc"), - b256!("000000000000000000000000a4fa7f3fbf0677f254ebdb1646146864c305b76e"), - b256!("000000000000000000000000000000000000000000000000000000000011a1d3"), - ], - Bytes::from_static(&hex!("00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000037697066733a2f2f516d515141646b33736538396b47716577395256567a316b68643548375562476d4d4a485a62566f386a6d346f4a2f30000000000000000000"))) - }, - Log { - address: hex!("ac6564f3718837caadd42eed742d75c12b90a052").into(), - data: LogData::new_unchecked( vec![ - b256!("110d160a1bedeea919a88fbc4b2a9fb61b7e664084391b6ca2740db66fef80fe"), - b256!("00000000000000000000000084d47f6eea8f8d87910448325519d1bb45c2972a"), - b256!("000000000000000000000000a4fa7f3fbf0677f254ebdb1646146864c305b76e"), - b256!("000000000000000000000000000000000000000000000000000000000011a1d3"), - ], - Bytes::from_static(&hex!("0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000a4fa7f3fbf0677f254ebdb1646146864c305b76e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007717500762343034303661353035646234633961386163316433306335633332303265370000000000000000000000000000000000000000000000000000000000000037697066733a2f2f516d515141646b33736538396b47716577395256567a316b68643548375562476d4d4a485a62566f386a6d346f4a2f30000000000000000000"))) - }, - ], - #[cfg(feature = "optimism")] - deposit_nonce: None, - #[cfg(feature = "optimism")] - deposit_receipt_version: None, - }, - bloom: Bloom(hex!("00000000000000000000000000000000400000000000000000000000000000000000004000000000000001000000000000000002000000000100000000000000000000000000000000000008000000000000000000000000000000000000000004000000020000000000000000000800000000000000000000000010200100200008000002000000000000000000800000000000000000000002000000000000000000000000000000080000000000000000000000004000000000000000000000000002000000000000000000000000000000000000200000000000000020002000000000000000002000000000000000000000000000000000000000000000").into()), - }, - ]; - let root = calculate_receipt_root_optimism(&receipts, BASE_SEPOLIA.as_ref(), case.1); - assert_eq!(root, case.2); - } - } - - #[cfg(feature = "optimism")] - #[test] - fn check_receipt_root_optimism() { - let logs = vec![Log { - address: Address::ZERO, - data: LogData::new_unchecked(vec![], Default::default()), - }]; - let bloom = bloom!("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"); - let receipt = ReceiptWithBloom { - receipt: Receipt { - tx_type: TxType::Eip2930, - success: true, - cumulative_gas_used: 102068, - logs, - deposit_nonce: None, - deposit_receipt_version: None, - }, - bloom, - }; - let receipt = vec![receipt]; - let root = calculate_receipt_root_optimism(&receipt, crate::BASE_SEPOLIA.as_ref(), 0); - assert_eq!(root, b256!("fe70ae4a136d98944951b2123859698d59ad251a381abc9960fa81cae3d0d4a0")); - } - #[cfg(not(feature = "optimism"))] #[test] fn check_receipt_root_optimism() { diff --git a/crates/primitives/src/receipt.rs b/crates/primitives/src/receipt.rs index 5bce92c3c0..967dde4694 100644 --- a/crates/primitives/src/receipt.rs +++ b/crates/primitives/src/receipt.rs @@ -4,6 +4,7 @@ use crate::{ logs_bloom, Bloom, Bytes, TxType, B256, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, }; +use alloc::{vec, vec::Vec}; use alloy_primitives::Log; use alloy_rlp::{length_of_length, Decodable, Encodable, RlpDecodable, RlpEncodable}; use bytes::{Buf, BufMut}; @@ -13,9 +14,6 @@ use derive_more::{DerefMut, From, IntoIterator}; use reth_codecs::{Compact, CompactZstd}; use serde::{Deserialize, Serialize}; -#[cfg(not(feature = "std"))] -use alloc::{vec, vec::Vec}; - /// Receipt containing result of transaction execution. #[derive( Clone, Debug, PartialEq, Eq, Default, RlpEncodable, RlpDecodable, Serialize, Deserialize, @@ -526,7 +524,7 @@ mod tests { fn encode_legacy_receipt() { let expected = hex!("f901668001b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f85ff85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff"); - let mut data = vec![]; + let mut data = Vec::with_capacity(expected.length()); let receipt = ReceiptWithBloom { receipt: Receipt { tx_type: TxType::Legacy, @@ -607,7 +605,7 @@ mod tests { let receipt = ReceiptWithBloom::decode(&mut &data[..]).unwrap(); assert_eq!(receipt, expected); - let mut buf = Vec::new(); + let mut buf = Vec::with_capacity(data.len()); receipt.encode_inner(&mut buf, false); assert_eq!(buf, &data[..]); } @@ -633,7 +631,7 @@ mod tests { let receipt = ReceiptWithBloom::decode(&mut &data[..]).unwrap(); assert_eq!(receipt, expected); - let mut buf = Vec::new(); + let mut buf = Vec::with_capacity(data.len()); expected.encode_inner(&mut buf, false); assert_eq!(buf, &data[..]); } diff --git a/crates/primitives/src/transaction/compat.rs b/crates/primitives/src/transaction/compat.rs index 7f47aa7e66..f54e553725 100644 --- a/crates/primitives/src/transaction/compat.rs +++ b/crates/primitives/src/transaction/compat.rs @@ -1,9 +1,6 @@ use crate::{Address, Transaction, TransactionSigned, TxKind, U256}; use revm_primitives::{AuthorizationList, TxEnv}; -#[cfg(all(not(feature = "std"), feature = "optimism"))] -use alloc::vec::Vec; - /// Implements behaviour to fill a [`TxEnv`] from another transaction. pub trait FillTxEnv { /// Fills [`TxEnv`] with an [`Address`] and transaction. @@ -14,7 +11,7 @@ impl FillTxEnv for TransactionSigned { fn fill_tx_env(&self, tx_env: &mut TxEnv, sender: Address) { #[cfg(feature = "optimism")] let envelope = { - let mut envelope = Vec::with_capacity(self.length_without_header()); + let mut envelope = alloc::vec::Vec::with_capacity(self.length_without_header()); self.encode_enveloped(&mut envelope); envelope }; @@ -22,7 +19,7 @@ impl FillTxEnv for TransactionSigned { tx_env.caller = sender; match self.as_ref() { Transaction::Legacy(tx) => { - tx_env.gas_limit = tx.gas_limit; + tx_env.gas_limit = tx.gas_limit as u64; tx_env.gas_price = U256::from(tx.gas_price); tx_env.gas_priority_fee = None; tx_env.transact_to = tx.to; @@ -36,7 +33,7 @@ impl FillTxEnv for TransactionSigned { tx_env.authorization_list = None; } Transaction::Eip2930(tx) => { - tx_env.gas_limit = tx.gas_limit; + tx_env.gas_limit = tx.gas_limit as u64; tx_env.gas_price = U256::from(tx.gas_price); tx_env.gas_priority_fee = None; tx_env.transact_to = tx.to; @@ -50,7 +47,7 @@ impl FillTxEnv for TransactionSigned { tx_env.authorization_list = None; } Transaction::Eip1559(tx) => { - tx_env.gas_limit = tx.gas_limit; + tx_env.gas_limit = tx.gas_limit as u64; tx_env.gas_price = U256::from(tx.max_fee_per_gas); tx_env.gas_priority_fee = Some(U256::from(tx.max_priority_fee_per_gas)); tx_env.transact_to = tx.to; @@ -64,7 +61,7 @@ impl FillTxEnv for TransactionSigned { tx_env.authorization_list = None; } Transaction::Eip4844(tx) => { - tx_env.gas_limit = tx.gas_limit; + tx_env.gas_limit = tx.gas_limit as u64; tx_env.gas_price = U256::from(tx.max_fee_per_gas); tx_env.gas_priority_fee = Some(U256::from(tx.max_priority_fee_per_gas)); tx_env.transact_to = TxKind::Call(tx.to); @@ -78,10 +75,10 @@ impl FillTxEnv for TransactionSigned { tx_env.authorization_list = None; } Transaction::Eip7702(tx) => { - tx_env.gas_limit = tx.gas_limit; + tx_env.gas_limit = tx.gas_limit as u64; tx_env.gas_price = U256::from(tx.max_fee_per_gas); tx_env.gas_priority_fee = Some(U256::from(tx.max_priority_fee_per_gas)); - tx_env.transact_to = tx.to; + tx_env.transact_to = tx.to.into(); tx_env.value = tx.value; tx_env.data = tx.input.clone(); tx_env.chain_id = Some(tx.chain_id); @@ -95,7 +92,7 @@ impl FillTxEnv for TransactionSigned { #[cfg(feature = "optimism")] Transaction::Deposit(tx) => { tx_env.access_list.clear(); - tx_env.gas_limit = tx.gas_limit; + tx_env.gas_limit = tx.gas_limit as u64; tx_env.gas_price = U256::ZERO; tx_env.gas_priority_fee = None; tx_env.transact_to = tx.to; diff --git a/crates/primitives/src/transaction/eip1559.rs b/crates/primitives/src/transaction/eip1559.rs deleted file mode 100644 index f95f84cc41..0000000000 --- a/crates/primitives/src/transaction/eip1559.rs +++ /dev/null @@ -1,284 +0,0 @@ -use super::access_list::AccessList; -use crate::{keccak256, Bytes, ChainId, Signature, TxKind, TxType, B256, U256}; -use alloy_rlp::{length_of_length, Decodable, Encodable, Header}; -use core::mem; - -#[cfg(any(test, feature = "reth-codec"))] -use reth_codecs::Compact; - -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; -use serde::{Deserialize, Serialize}; - -/// A transaction with a priority fee ([EIP-1559](https://eips.ethereum.org/EIPS/eip-1559)). -#[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Serialize, Deserialize)] -#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] -#[cfg_attr(any(test, feature = "reth-codec"), derive(Compact))] -#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] -pub struct TxEip1559 { - /// Added as EIP-155: Simple replay attack protection - pub chain_id: ChainId, - - /// A scalar value equal to the number of transactions sent by the sender; formally Tn. - pub nonce: u64, - - /// A scalar value equal to the maximum - /// amount of gas that should be used in executing - /// this transaction. This is paid up-front, before any - /// computation is done and may not be increased - /// later; formally Tg. - pub gas_limit: u64, - - /// A scalar value equal to the maximum - /// amount of gas that should be used in executing - /// this transaction. This is paid up-front, before any - /// computation is done and may not be increased - /// later; formally Tg. - /// - /// As ethereum circulation is around 120mil eth as of 2022 that is around - /// 120000000000000000000000000 wei we are safe to use u128 as its max number is: - /// 340282366920938463463374607431768211455 - /// - /// This is also known as `GasFeeCap` - pub max_fee_per_gas: u128, - - /// Max Priority fee that transaction is paying - /// - /// As ethereum circulation is around 120mil eth as of 2022 that is around - /// 120000000000000000000000000 wei we are safe to use u128 as its max number is: - /// 340282366920938463463374607431768211455 - /// - /// This is also known as `GasTipCap` - pub max_priority_fee_per_gas: u128, - - /// The 160-bit address of the message call’s recipient or, for a contract creation - /// transaction, ∅, used here to denote the only member of B0 ; formally Tt. - pub to: TxKind, - - /// A scalar value equal to the number of Wei to - /// be transferred to the message call’s recipient or, - /// in the case of contract creation, as an endowment - /// to the newly created account; formally Tv. - pub value: U256, - - /// The accessList specifies a list of addresses and storage keys; - /// these addresses and storage keys are added into the `accessed_addresses` - /// and `accessed_storage_keys` global sets (introduced in EIP-2929). - /// A gas cost is charged, though at a discount relative to the cost of - /// accessing outside the list. - pub access_list: AccessList, - - /// Input has two uses depending if the transaction `to` field is [`TxKind::Create`] or - /// [`TxKind::Call`]. - /// - /// Input as init code, or if `to` is [`TxKind::Create`]: An unlimited size byte array - /// specifying the EVM-code for the account initialisation procedure `CREATE` - /// - /// Input as data, or if `to` is [`TxKind::Call`]: An unlimited size byte array specifying the - /// input data of the message call, formally Td. - pub input: Bytes, -} - -impl TxEip1559 { - /// Returns the effective gas price for the given `base_fee`. - pub const fn effective_gas_price(&self, base_fee: Option) -> u128 { - match base_fee { - None => self.max_fee_per_gas, - Some(base_fee) => { - // if the tip is greater than the max priority fee per gas, set it to the max - // priority fee per gas + base fee - let tip = self.max_fee_per_gas.saturating_sub(base_fee as u128); - if tip > self.max_priority_fee_per_gas { - self.max_priority_fee_per_gas + base_fee as u128 - } else { - // otherwise return the max fee per gas - self.max_fee_per_gas - } - } - } - } - - /// Decodes the inner [`TxEip1559`] fields from RLP bytes. - /// - /// NOTE: This assumes a RLP header has already been decoded, and _just_ decodes the following - /// RLP fields in the following order: - /// - /// - `chain_id` - /// - `nonce` - /// - `max_priority_fee_per_gas` - /// - `max_fee_per_gas` - /// - `gas_limit` - /// - `to` - /// - `value` - /// - `data` (`input`) - /// - `access_list` - pub(crate) fn decode_inner(buf: &mut &[u8]) -> alloy_rlp::Result { - Ok(Self { - chain_id: Decodable::decode(buf)?, - nonce: Decodable::decode(buf)?, - max_priority_fee_per_gas: Decodable::decode(buf)?, - max_fee_per_gas: Decodable::decode(buf)?, - gas_limit: Decodable::decode(buf)?, - to: Decodable::decode(buf)?, - value: Decodable::decode(buf)?, - input: Decodable::decode(buf)?, - access_list: Decodable::decode(buf)?, - }) - } - - /// Encodes only the transaction's fields into the desired buffer, without a RLP header. - pub(crate) fn fields_len(&self) -> usize { - self.chain_id.length() + - self.nonce.length() + - self.max_priority_fee_per_gas.length() + - self.max_fee_per_gas.length() + - self.gas_limit.length() + - self.to.length() + - self.value.length() + - self.input.0.length() + - self.access_list.length() - } - - /// Encodes only the transaction's fields into the desired buffer, without a RLP header. - pub(crate) fn encode_fields(&self, out: &mut dyn bytes::BufMut) { - self.chain_id.encode(out); - self.nonce.encode(out); - self.max_priority_fee_per_gas.encode(out); - self.max_fee_per_gas.encode(out); - self.gas_limit.encode(out); - self.to.encode(out); - self.value.encode(out); - self.input.0.encode(out); - self.access_list.encode(out); - } - - /// Inner encoding function that is used for both rlp [`Encodable`] trait and for calculating - /// hash that for eip2718 does not require rlp header - /// - /// This encodes the transaction as: - /// `rlp(chain_id, nonce, max_priority_fee_per_gas, max_fee_per_gas, gas_limit to, value, input, - /// access_list, y_parity, r, s)` - pub(crate) fn encode_with_signature( - &self, - signature: &Signature, - out: &mut dyn bytes::BufMut, - with_header: bool, - ) { - let payload_length = self.fields_len() + signature.payload_len(); - if with_header { - Header { - list: false, - payload_length: 1 + length_of_length(payload_length) + payload_length, - } - .encode(out); - } - out.put_u8(self.tx_type() as u8); - let header = Header { list: true, payload_length }; - header.encode(out); - self.encode_fields(out); - signature.encode(out); - } - - /// Output the length of the RLP signed transaction encoding, _without_ a RLP string header. - pub(crate) fn payload_len_with_signature_without_header(&self, signature: &Signature) -> usize { - let payload_length = self.fields_len() + signature.payload_len(); - // 'transaction type byte length' + 'header length' + 'payload length' - 1 + length_of_length(payload_length) + payload_length - } - - /// Output the length of the RLP signed transaction encoding. This encodes with a RLP header. - pub(crate) fn payload_len_with_signature(&self, signature: &Signature) -> usize { - let len = self.payload_len_with_signature_without_header(signature); - length_of_length(len) + len - } - - /// Get transaction type - pub(crate) const fn tx_type(&self) -> TxType { - TxType::Eip1559 - } - - /// Calculates a heuristic for the in-memory size of the [`TxEip1559`] transaction. - #[inline] - pub fn size(&self) -> usize { - mem::size_of::() + // chain_id - mem::size_of::() + // nonce - mem::size_of::() + // gas_limit - mem::size_of::() + // max_fee_per_gas - mem::size_of::() + // max_priority_fee_per_gas - self.to.size() + // to - mem::size_of::() + // value - self.access_list.size() + // access_list - self.input.len() // input - } - - /// Encodes the EIP-1559 transaction in RLP for signing. - /// - /// This encodes the transaction as: - /// `tx_type || rlp(chain_id, nonce, max_priority_fee_per_gas, max_fee_per_gas, gas_limit, to, - /// value, input, access_list)` - /// - /// Note that there is no rlp header before the transaction type byte. - pub(crate) fn encode_for_signing(&self, out: &mut dyn bytes::BufMut) { - out.put_u8(self.tx_type() as u8); - Header { list: true, payload_length: self.fields_len() }.encode(out); - self.encode_fields(out); - } - - /// Outputs the length of the signature RLP encoding for the transaction. - pub(crate) fn payload_len_for_signature(&self) -> usize { - let payload_length = self.fields_len(); - // 'transaction type byte length' + 'header length' + 'payload length' - 1 + length_of_length(payload_length) + payload_length - } - - /// Outputs the signature hash of the transaction by first encoding without a signature, then - /// hashing. - pub(crate) fn signature_hash(&self) -> B256 { - let mut buf = Vec::with_capacity(self.payload_len_for_signature()); - self.encode_for_signing(&mut buf); - keccak256(&buf) - } -} - -#[cfg(test)] -mod tests { - use super::TxEip1559; - use crate::{ - transaction::{signature::Signature, TxKind}, - AccessList, Address, Transaction, TransactionSigned, B256, U256, - }; - use std::str::FromStr; - - #[test] - fn recover_signer_eip1559() { - use crate::hex_literal::hex; - - let signer: Address = hex!("dd6b8b3dc6b7ad97db52f08a275ff4483e024cea").into(); - let hash: B256 = - hex!("0ec0b6a2df4d87424e5f6ad2a654e27aaeb7dac20ae9e8385cc09087ad532ee0").into(); - - let tx = Transaction::Eip1559( TxEip1559 { - chain_id: 1, - nonce: 0x42, - gas_limit: 44386, - to: TxKind::Call(hex!("6069a6c32cf691f5982febae4faf8a6f3ab2f0f6").into()), - value: U256::ZERO, - input: hex!("a22cb4650000000000000000000000005eee75727d804a2b13038928d36f8b188945a57a0000000000000000000000000000000000000000000000000000000000000000").into(), - max_fee_per_gas: 0x4a817c800, - max_priority_fee_per_gas: 0x3b9aca00, - access_list: AccessList::default(), - }); - - let sig = Signature { - r: U256::from_str("0x840cfc572845f5786e702984c2a582528cad4b49b2a10b9db1be7fca90058565") - .unwrap(), - s: U256::from_str("0x25e7109ceb98168d95b09b18bbf6b685130e0562f233877d492b94eee0c5b6d1") - .unwrap(), - odd_y_parity: false, - }; - - let signed_tx = TransactionSigned::from_transaction_and_signature(tx, sig); - assert_eq!(signed_tx.hash(), hash, "Expected same hash"); - assert_eq!(signed_tx.recover_signer(), Some(signer), "Recovering signer should pass."); - } -} diff --git a/crates/primitives/src/transaction/eip2930.rs b/crates/primitives/src/transaction/eip2930.rs deleted file mode 100644 index 44bad8afb9..0000000000 --- a/crates/primitives/src/transaction/eip2930.rs +++ /dev/null @@ -1,262 +0,0 @@ -use super::access_list::AccessList; -use crate::{keccak256, Bytes, ChainId, Signature, TxKind, TxType, B256, U256}; -use alloy_rlp::{length_of_length, Decodable, Encodable, Header}; -use core::mem; - -#[cfg(any(test, feature = "reth-codec"))] -use reth_codecs::Compact; - -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; -use serde::{Deserialize, Serialize}; - -/// Transaction with an [`AccessList`] ([EIP-2930](https://eips.ethereum.org/EIPS/eip-2930)). -#[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Serialize, Deserialize)] -#[cfg_attr(any(test, feature = "reth-codec"), derive(Compact))] -#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] -#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] -pub struct TxEip2930 { - /// Added as EIP-155: Simple replay attack protection - pub chain_id: ChainId, - - /// A scalar value equal to the number of transactions sent by the sender; formally Tn. - pub nonce: u64, - - /// A scalar value equal to the number of - /// Wei to be paid per unit of gas for all computation - /// costs incurred as a result of the execution of this transaction; formally Tp. - /// - /// As ethereum circulation is around 120mil eth as of 2022 that is around - /// 120000000000000000000000000 wei we are safe to use u128 as its max number is: - /// 340282366920938463463374607431768211455 - pub gas_price: u128, - - /// A scalar value equal to the maximum - /// amount of gas that should be used in executing - /// this transaction. This is paid up-front, before any - /// computation is done and may not be increased - /// later; formally Tg. - pub gas_limit: u64, - - /// The 160-bit address of the message call’s recipient or, for a contract creation - /// transaction, ∅, used here to denote the only member of B0 ; formally Tt. - pub to: TxKind, - - /// A scalar value equal to the number of Wei to - /// be transferred to the message call’s recipient or, - /// in the case of contract creation, as an endowment - /// to the newly created account; formally Tv. - pub value: U256, - - /// The accessList specifies a list of addresses and storage keys; - /// these addresses and storage keys are added into the `accessed_addresses` - /// and `accessed_storage_keys` global sets (introduced in EIP-2929). - /// A gas cost is charged, though at a discount relative to the cost of - /// accessing outside the list. - pub access_list: AccessList, - - /// Input has two uses depending if the transaction `to` field is [`TxKind::Create`] or - /// [`TxKind::Call`]. - /// - /// Input as init code, or if `to` is [`TxKind::Create`]: An unlimited size byte array - /// specifying the EVM-code for the account initialisation procedure `CREATE` - /// - /// Input as data, or if `to` is [`TxKind::Call`]: An unlimited size byte array specifying the - /// input data of the message call, formally Td. - pub input: Bytes, -} - -impl TxEip2930 { - /// Calculates a heuristic for the in-memory size of the [`TxEip2930`] transaction. - #[inline] - pub fn size(&self) -> usize { - mem::size_of::() + // chain_id - mem::size_of::() + // nonce - mem::size_of::() + // gas_price - mem::size_of::() + // gas_limit - self.to.size() + // to - mem::size_of::() + // value - self.access_list.size() + // access_list - self.input.len() // input - } - - /// Decodes the inner [`TxEip2930`] fields from RLP bytes. - /// - /// NOTE: This assumes a RLP header has already been decoded, and _just_ decodes the following - /// RLP fields in the following order: - /// - /// - `chain_id` - /// - `nonce` - /// - `gas_price` - /// - `gas_limit` - /// - `to` - /// - `value` - /// - `data` (`input`) - /// - `access_list` - pub(crate) fn decode_inner(buf: &mut &[u8]) -> alloy_rlp::Result { - Ok(Self { - chain_id: Decodable::decode(buf)?, - nonce: Decodable::decode(buf)?, - gas_price: Decodable::decode(buf)?, - gas_limit: Decodable::decode(buf)?, - to: Decodable::decode(buf)?, - value: Decodable::decode(buf)?, - input: Decodable::decode(buf)?, - access_list: Decodable::decode(buf)?, - }) - } - - /// Outputs the length of the transaction's fields, without a RLP header. - pub(crate) fn fields_len(&self) -> usize { - self.chain_id.length() + - self.nonce.length() + - self.gas_price.length() + - self.gas_limit.length() + - self.to.length() + - self.value.length() + - self.input.0.length() + - self.access_list.length() - } - - /// Encodes only the transaction's fields into the desired buffer, without a RLP header. - pub(crate) fn encode_fields(&self, out: &mut dyn bytes::BufMut) { - self.chain_id.encode(out); - self.nonce.encode(out); - self.gas_price.encode(out); - self.gas_limit.encode(out); - self.to.encode(out); - self.value.encode(out); - self.input.0.encode(out); - self.access_list.encode(out); - } - - /// Inner encoding function that is used for both rlp [`Encodable`] trait and for calculating - /// hash that for eip2718 does not require rlp header - /// - /// This encodes the transaction as: - /// `rlp(nonce, gas_price, gas_limit, to, value, input, access_list, y_parity, r, s)` - pub(crate) fn encode_with_signature( - &self, - signature: &Signature, - out: &mut dyn bytes::BufMut, - with_header: bool, - ) { - let payload_length = self.fields_len() + signature.payload_len(); - if with_header { - Header { - list: false, - payload_length: 1 + length_of_length(payload_length) + payload_length, - } - .encode(out); - } - out.put_u8(self.tx_type() as u8); - let header = Header { list: true, payload_length }; - header.encode(out); - self.encode_fields(out); - signature.encode(out); - } - - /// Output the length of the RLP signed transaction encoding, _without_ a RLP string header. - pub(crate) fn payload_len_with_signature_without_header(&self, signature: &Signature) -> usize { - let payload_length = self.fields_len() + signature.payload_len(); - // 'transaction type byte length' + 'header length' + 'payload length' - 1 + length_of_length(payload_length) + payload_length - } - - /// Output the length of the RLP signed transaction encoding. This encodes with a RLP header. - pub(crate) fn payload_len_with_signature(&self, signature: &Signature) -> usize { - let len = self.payload_len_with_signature_without_header(signature); - length_of_length(len) + len - } - - /// Get transaction type - pub(crate) const fn tx_type(&self) -> TxType { - TxType::Eip2930 - } - - /// Encodes the EIP-2930 transaction in RLP for signing. - /// - /// This encodes the transaction as: - /// `tx_type || rlp(chain_id, nonce, gas_price, gas_limit, to, value, input, access_list)` - /// - /// Note that there is no rlp header before the transaction type byte. - pub(crate) fn encode_for_signing(&self, out: &mut dyn bytes::BufMut) { - out.put_u8(self.tx_type() as u8); - Header { list: true, payload_length: self.fields_len() }.encode(out); - self.encode_fields(out); - } - - /// Outputs the length of the signature RLP encoding for the transaction. - pub(crate) fn payload_len_for_signature(&self) -> usize { - let payload_length = self.fields_len(); - // 'transaction type byte length' + 'header length' + 'payload length' - 1 + length_of_length(payload_length) + payload_length - } - - /// Outputs the signature hash of the transaction by first encoding without a signature, then - /// hashing. - pub(crate) fn signature_hash(&self) -> B256 { - let mut buf = Vec::with_capacity(self.payload_len_for_signature()); - self.encode_for_signing(&mut buf); - keccak256(&buf) - } -} - -#[cfg(test)] -mod tests { - use super::TxEip2930; - use crate::{ - transaction::{signature::Signature, TxKind}, - Address, Bytes, Transaction, TransactionSigned, U256, - }; - use alloy_rlp::{Decodable, Encodable}; - - #[test] - fn test_decode_create() { - // tests that a contract creation tx encodes and decodes properly - let request = Transaction::Eip2930(TxEip2930 { - chain_id: 1u64, - nonce: 0, - gas_price: 1, - gas_limit: 2, - to: TxKind::Create, - value: U256::from(3), - input: Bytes::from(vec![1, 2]), - access_list: Default::default(), - }); - let signature = Signature { odd_y_parity: true, r: U256::default(), s: U256::default() }; - let tx = TransactionSigned::from_transaction_and_signature(request, signature); - - let mut encoded = Vec::new(); - tx.encode(&mut encoded); - assert_eq!(encoded.len(), tx.length()); - - let decoded = TransactionSigned::decode(&mut &*encoded).unwrap(); - assert_eq!(decoded, tx); - } - - #[test] - fn test_decode_call() { - let request = Transaction::Eip2930(TxEip2930 { - chain_id: 1u64, - nonce: 0, - gas_price: 1, - gas_limit: 2, - to: Address::default().into(), - value: U256::from(3), - input: Bytes::from(vec![1, 2]), - access_list: Default::default(), - }); - - let signature = Signature { odd_y_parity: true, r: U256::default(), s: U256::default() }; - - let tx = TransactionSigned::from_transaction_and_signature(request, signature); - - let mut encoded = Vec::new(); - tx.encode(&mut encoded); - assert_eq!(encoded.len(), tx.length()); - - let decoded = TransactionSigned::decode(&mut &*encoded).unwrap(); - assert_eq!(decoded, tx); - } -} diff --git a/crates/primitives/src/transaction/eip4844.rs b/crates/primitives/src/transaction/eip4844.rs deleted file mode 100644 index a3bad68805..0000000000 --- a/crates/primitives/src/transaction/eip4844.rs +++ /dev/null @@ -1,324 +0,0 @@ -use super::access_list::AccessList; -use crate::{ - constants::eip4844::DATA_GAS_PER_BLOB, keccak256, Address, Bytes, ChainId, Signature, TxType, - B256, U256, -}; -use alloy_rlp::{length_of_length, Decodable, Encodable, Header}; -use core::mem; - -#[cfg(any(test, feature = "reth-codec"))] -use reth_codecs::Compact; - -/// To be used with `Option` to place or replace one bit on the bitflag struct. -pub(crate) type CompactPlaceholder = (); - -#[cfg(feature = "c-kzg")] -use crate::kzg::KzgSettings; - -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; -use serde::{Deserialize, Serialize}; - -/// [EIP-4844 Blob Transaction](https://eips.ethereum.org/EIPS/eip-4844#blob-transaction) -/// -/// A transaction with blob hashes and max blob fee -#[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Serialize, Deserialize)] -#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] -#[cfg_attr(any(test, feature = "reth-codec"), derive(Compact))] -#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] -pub struct TxEip4844 { - /// Added as EIP-155: Simple replay attack protection - pub chain_id: ChainId, - - /// A scalar value equal to the number of transactions sent by the sender; formally Tn. - pub nonce: u64, - - /// A scalar value equal to the maximum - /// amount of gas that should be used in executing - /// this transaction. This is paid up-front, before any - /// computation is done and may not be increased - /// later; formally Tg. - pub gas_limit: u64, - - /// A scalar value equal to the maximum - /// amount of gas that should be used in executing - /// this transaction. This is paid up-front, before any - /// computation is done and may not be increased - /// later; formally Tg. - /// - /// As ethereum circulation is around 120mil eth as of 2022 that is around - /// 120000000000000000000000000 wei we are safe to use u128 as its max number is: - /// 340282366920938463463374607431768211455 - /// - /// This is also known as `GasFeeCap` - pub max_fee_per_gas: u128, - - /// Max Priority fee that transaction is paying - /// - /// As ethereum circulation is around 120mil eth as of 2022 that is around - /// 120000000000000000000000000 wei we are safe to use u128 as its max number is: - /// 340282366920938463463374607431768211455 - /// - /// This is also known as `GasTipCap` - pub max_priority_fee_per_gas: u128, - - /// TODO(debt): this should be removed if we break the DB. - /// Makes sure that the Compact bitflag struct has one bit after the above field: - /// - pub placeholder: Option, - - /// The 160-bit address of the message call’s recipient. - pub to: Address, - - /// A scalar value equal to the number of Wei to - /// be transferred to the message call’s recipient or, - /// in the case of contract creation, as an endowment - /// to the newly created account; formally Tv. - pub value: U256, - - /// The accessList specifies a list of addresses and storage keys; - /// these addresses and storage keys are added into the `accessed_addresses` - /// and `accessed_storage_keys` global sets (introduced in EIP-2929). - /// A gas cost is charged, though at a discount relative to the cost of - /// accessing outside the list. - pub access_list: AccessList, - - /// It contains a vector of fixed size hash(32 bytes) - pub blob_versioned_hashes: Vec, - - /// Max fee per data gas - /// - /// aka BlobFeeCap or blobGasFeeCap - pub max_fee_per_blob_gas: u128, - - /// Unlike other transaction types, where the `input` field has two uses depending on whether - /// or not the `to` field is [`Create`](crate::TxKind::Create) or - /// [`Call`](crate::TxKind::Call), EIP-4844 transactions cannot be - /// [`Create`](crate::TxKind::Create) transactions. - /// - /// This means the `input` field has a single use, as data: An unlimited size byte array - /// specifying the input data of the message call, formally Td. - pub input: Bytes, -} - -impl TxEip4844 { - /// Returns the effective gas price for the given `base_fee`. - pub const fn effective_gas_price(&self, base_fee: Option) -> u128 { - match base_fee { - None => self.max_fee_per_gas, - Some(base_fee) => { - // if the tip is greater than the max priority fee per gas, set it to the max - // priority fee per gas + base fee - let tip = self.max_fee_per_gas.saturating_sub(base_fee as u128); - if tip > self.max_priority_fee_per_gas { - self.max_priority_fee_per_gas + base_fee as u128 - } else { - // otherwise return the max fee per gas - self.max_fee_per_gas - } - } - } - } - - /// Verifies that the given blob data, commitments, and proofs are all valid for this - /// transaction. - /// - /// Takes as input the [`KzgSettings`], which should contain the parameters derived from the - /// KZG trusted setup. - /// - /// This ensures that the blob transaction payload has the same number of blob data elements, - /// commitments, and proofs. Each blob data element is verified against its commitment and - /// proof. - /// - /// Returns `InvalidProof` if any blob KZG proof in the response - /// fails to verify, or if the versioned hashes in the transaction do not match the actual - /// commitment versioned hashes. - #[cfg(feature = "c-kzg")] - pub fn validate_blob( - &self, - sidecar: &crate::BlobTransactionSidecar, - proof_settings: &KzgSettings, - ) -> Result<(), alloy_eips::eip4844::BlobTransactionValidationError> { - sidecar.validate(&self.blob_versioned_hashes, proof_settings) - } - - /// Returns the total gas for all blobs in this transaction. - #[inline] - pub fn blob_gas(&self) -> u64 { - // NOTE: we don't expect u64::MAX / DATA_GAS_PER_BLOB hashes in a single transaction - self.blob_versioned_hashes.len() as u64 * DATA_GAS_PER_BLOB - } - - /// Decodes the inner [`TxEip4844`] fields from RLP bytes. - /// - /// NOTE: This assumes a RLP header has already been decoded, and _just_ decodes the following - /// RLP fields in the following order: - /// - /// - `chain_id` - /// - `nonce` - /// - `max_priority_fee_per_gas` - /// - `max_fee_per_gas` - /// - `gas_limit` - /// - `to` - /// - `value` - /// - `data` (`input`) - /// - `access_list` - /// - `max_fee_per_blob_gas` - /// - `blob_versioned_hashes` - pub fn decode_inner(buf: &mut &[u8]) -> alloy_rlp::Result { - let mut tx = Self { - chain_id: Decodable::decode(buf)?, - nonce: Decodable::decode(buf)?, - max_priority_fee_per_gas: Decodable::decode(buf)?, - max_fee_per_gas: Decodable::decode(buf)?, - gas_limit: Decodable::decode(buf)?, - placeholder: None, - to: Decodable::decode(buf)?, - value: Decodable::decode(buf)?, - input: Decodable::decode(buf)?, - access_list: Decodable::decode(buf)?, - max_fee_per_blob_gas: Decodable::decode(buf)?, - blob_versioned_hashes: Decodable::decode(buf)?, - }; - - // HACK: our arbitrary implementation sets the placeholder this way for backwards - // compatibility, and should be removed once `placeholder` can be removed - if tx.to != Address::default() { - tx.placeholder = Some(()) - } - - Ok(tx) - } - - /// Outputs the length of the transaction's fields, without a RLP header. - pub(crate) fn fields_len(&self) -> usize { - self.chain_id.length() + - self.nonce.length() + - self.gas_limit.length() + - self.max_fee_per_gas.length() + - self.max_priority_fee_per_gas.length() + - self.to.length() + - self.value.length() + - self.access_list.length() + - self.blob_versioned_hashes.length() + - self.max_fee_per_blob_gas.length() + - self.input.0.length() - } - - /// Encodes only the transaction's fields into the desired buffer, without a RLP header. - pub(crate) fn encode_fields(&self, out: &mut dyn bytes::BufMut) { - self.chain_id.encode(out); - self.nonce.encode(out); - self.max_priority_fee_per_gas.encode(out); - self.max_fee_per_gas.encode(out); - self.gas_limit.encode(out); - self.to.encode(out); - self.value.encode(out); - self.input.0.encode(out); - self.access_list.encode(out); - self.max_fee_per_blob_gas.encode(out); - self.blob_versioned_hashes.encode(out); - } - - /// Calculates a heuristic for the in-memory size of the [`TxEip4844`] transaction. - #[inline] - pub fn size(&self) -> usize { - mem::size_of::() + // chain_id - mem::size_of::() + // nonce - mem::size_of::() + // gas_limit - mem::size_of::() + // max_fee_per_gas - mem::size_of::() + // max_priority_fee_per_gas - mem::size_of::

() + // to - mem::size_of::() + // value - self.access_list.size() + // access_list - self.input.len() + // input - self.blob_versioned_hashes.capacity() * mem::size_of::() + // blob hashes size - mem::size_of::() // max_fee_per_data_gas - } - - /// Inner encoding function that is used for both rlp [`Encodable`] trait and for calculating - /// hash that for eip2718 does not require rlp header - pub(crate) fn encode_with_signature( - &self, - signature: &Signature, - out: &mut dyn bytes::BufMut, - with_header: bool, - ) { - let payload_length = self.fields_len() + signature.payload_len(); - if with_header { - Header { - list: false, - payload_length: 1 + length_of_length(payload_length) + payload_length, - } - .encode(out); - } - out.put_u8(self.tx_type() as u8); - let header = Header { list: true, payload_length }; - header.encode(out); - self.encode_fields(out); - signature.encode(out); - } - - /// Output the length of the RLP signed transaction encoding. This encodes with a RLP header. - pub(crate) fn payload_len_with_signature(&self, signature: &Signature) -> usize { - let len = self.payload_len_with_signature_without_header(signature); - length_of_length(len) + len - } - - /// Output the length of the RLP signed transaction encoding, _without_ a RLP header. - pub(crate) fn payload_len_with_signature_without_header(&self, signature: &Signature) -> usize { - let payload_length = self.fields_len() + signature.payload_len(); - // 'transaction type byte length' + 'header length' + 'payload length' - 1 + length_of_length(payload_length) + payload_length - } - - /// Get transaction type - pub(crate) const fn tx_type(&self) -> TxType { - TxType::Eip4844 - } - - /// Encodes the EIP-4844 transaction in RLP for signing. - /// - /// This encodes the transaction as: - /// `tx_type || rlp(chain_id, nonce, max_priority_fee_per_gas, max_fee_per_gas, gas_limit, to, - /// value, input, access_list, max_fee_per_blob_gas, blob_versioned_hashes)` - /// - /// Note that there is no rlp header before the transaction type byte. - pub(crate) fn encode_for_signing(&self, out: &mut dyn bytes::BufMut) { - out.put_u8(self.tx_type() as u8); - Header { list: true, payload_length: self.fields_len() }.encode(out); - self.encode_fields(out); - } - - /// Outputs the length of the signature RLP encoding for the transaction. - pub(crate) fn payload_len_for_signature(&self) -> usize { - let payload_length = self.fields_len(); - // 'transaction type byte length' + 'header length' + 'payload length' - 1 + length_of_length(payload_length) + payload_length - } - - /// Outputs the signature hash of the transaction by first encoding without a signature, then - /// hashing. - pub(crate) fn signature_hash(&self) -> B256 { - let mut buf = Vec::with_capacity(self.payload_len_for_signature()); - self.encode_for_signing(&mut buf); - keccak256(&buf) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use alloy_primitives::{address, bytes}; - - #[test] - fn backwards_compatible_txkind_test() { - // TxEip4844 encoded with TxKind on to field - // holesky tx hash: <0xa3b1668225bf0fbfdd6c19aa6fd071fa4ff5d09a607c67ccd458b97735f745ac> - let tx = bytes!("224348a100426844cb2dc6c0b2d05e003b9aca0079c9109b764609df928d16fc4a91e9081f7e87db09310001019101fb28118ceccaabca22a47e35b9c3f12eb2dcb25e5c543d5b75e6cd841f0a05328d26ef16e8450000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000052000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000000000200000000000000000000000007b399987d24fc5951f3e94a4cb16e87414bf22290000000000000000000000001670090000000000000000000000000000010001302e31382e302d64657600000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000420000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000009e640a6aadf4f664cf467b795c31332f44acbe6c000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000002c00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006614c2d1000000000000000000000000000000000000000000000000000000000014012c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001e000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000064000000000000000000000000000000000000000000000000000000000000093100000000000000000000000000000000000000000000000000000000000000c8000000000000000000000000000000000000000000000000000000000000093100000000000000000000000000000000000000000000000000000000000003e800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000041f06fd78f4dcdf089263524731620941747b9b93fd8f631557e25b23845a78b685bd82f9d36bce2f4cc812b6e5191df52479d349089461ffe76e9f2fa2848a0fe1b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000410819f04aba17677807c61ae72afdddf7737f26931ecfa8af05b7c669808b36a2587e32c90bb0ed2100266dd7797c80121a109a2b0fe941ca5a580e438988cac81c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"); - let (tx, _) = TxEip4844::from_compact(&tx, tx.len()); - assert_eq!(tx.to, address!("79C9109b764609df928d16fC4a91e9081F7e87DB")); - assert_eq!(tx.placeholder, Some(())); - assert_eq!(tx.input, bytes!("ef16e8450000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000052000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000000000200000000000000000000000007b399987d24fc5951f3e94a4cb16e87414bf22290000000000000000000000001670090000000000000000000000000000010001302e31382e302d64657600000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000420000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000009e640a6aadf4f664cf467b795c31332f44acbe6c000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000002c00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006614c2d1000000000000000000000000000000000000000000000000000000000014012c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001e000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000064000000000000000000000000000000000000000000000000000000000000093100000000000000000000000000000000000000000000000000000000000000c8000000000000000000000000000000000000000000000000000000000000093100000000000000000000000000000000000000000000000000000000000003e800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000041f06fd78f4dcdf089263524731620941747b9b93fd8f631557e25b23845a78b685bd82f9d36bce2f4cc812b6e5191df52479d349089461ffe76e9f2fa2848a0fe1b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000410819f04aba17677807c61ae72afdddf7737f26931ecfa8af05b7c669808b36a2587e32c90bb0ed2100266dd7797c80121a109a2b0fe941ca5a580e438988cac81c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")); - } -} diff --git a/crates/primitives/src/transaction/eip7702.rs b/crates/primitives/src/transaction/eip7702.rs deleted file mode 100644 index 0445a1eac3..0000000000 --- a/crates/primitives/src/transaction/eip7702.rs +++ /dev/null @@ -1,311 +0,0 @@ -use super::access_list::AccessList; -use crate::{ - eip7702::SignedAuthorization, keccak256, Bytes, ChainId, Signature, TxKind, TxType, B256, U256, -}; -use alloy_rlp::{length_of_length, Decodable, Encodable, Header}; -use core::mem; - -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; -use serde::{Deserialize, Serialize}; - -#[cfg(any(test, feature = "reth-codec"))] -use reth_codecs::Compact; - -/// [EIP-7702 Set Code Transaction](https://eips.ethereum.org/EIPS/eip-7702) -/// -/// Set EOA account code for one transaction -#[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Serialize, Deserialize)] -#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] -#[cfg_attr(any(test, feature = "reth-codec"), derive(Compact))] -#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] -pub struct TxEip7702 { - /// Added as EIP-155: Simple replay attack protection - pub chain_id: ChainId, - /// A scalar value equal to the number of transactions sent by the sender; formally Tn. - pub nonce: u64, - /// A scalar value equal to the number of - /// Wei to be paid per unit of gas for all computation - /// costs incurred as a result of the execution of this transaction; formally Tp. - /// - /// As ethereum circulation is around 120mil eth as of 2022 that is around - /// 120000000000000000000000000 wei we are safe to use u128 as its max number is: - /// 340282366920938463463374607431768211455 - pub gas_limit: u64, - /// A scalar value equal to the maximum - /// amount of gas that should be used in executing - /// this transaction. This is paid up-front, before any - /// computation is done and may not be increased - /// later; formally Tg. - /// - /// As ethereum circulation is around 120mil eth as of 2022 that is around - /// 120000000000000000000000000 wei we are safe to use u128 as its max number is: - /// 340282366920938463463374607431768211455 - /// - /// This is also known as `GasFeeCap` - pub max_fee_per_gas: u128, - /// Max Priority fee that transaction is paying - /// - /// As ethereum circulation is around 120mil eth as of 2022 that is around - /// 120000000000000000000000000 wei we are safe to use u128 as its max number is: - /// 340282366920938463463374607431768211455 - /// - /// This is also known as `GasTipCap` - pub max_priority_fee_per_gas: u128, - /// The 160-bit address of the message call’s recipient or, for a contract creation - /// transaction, ∅, used here to denote the only member of B0 ; formally Tt. - pub to: TxKind, - /// A scalar value equal to the number of Wei to - /// be transferred to the message call’s recipient or, - /// in the case of contract creation, as an endowment - /// to the newly created account; formally Tv. - pub value: U256, - /// The accessList specifies a list of addresses and storage keys; - /// these addresses and storage keys are added into the `accessed_addresses` - /// and `accessed_storage_keys` global sets (introduced in EIP-2929). - /// A gas cost is charged, though at a discount relative to the cost of - /// accessing outside the list. - pub access_list: AccessList, - /// Authorizations are used to temporarily set the code of its signer to - /// the code referenced by `address`. These also include a `chain_id` (which - /// can be set to zero and not evaluated) as well as an optional `nonce`. - pub authorization_list: Vec, - /// Input has two uses depending if the transaction `to` field is [`TxKind::Create`] or - /// [`TxKind::Call`]. - /// - /// Input as init code, or if `to` is [`TxKind::Create`]: An unlimited size byte array - /// specifying the EVM-code for the account initialisation procedure `CREATE` - /// - /// Input as data, or if `to` is [`TxKind::Call`]: An unlimited size byte array specifying the - /// input data of the message call, formally Td. - pub input: Bytes, -} - -impl TxEip7702 { - /// Returns the effective gas price for the given `base_fee`. - pub const fn effective_gas_price(&self, base_fee: Option) -> u128 { - match base_fee { - None => self.max_fee_per_gas, - Some(base_fee) => { - // if the tip is greater than the max priority fee per gas, set it to the max - // priority fee per gas + base fee - let tip = self.max_fee_per_gas.saturating_sub(base_fee as u128); - if tip > self.max_priority_fee_per_gas { - self.max_priority_fee_per_gas + base_fee as u128 - } else { - // otherwise return the max fee per gas - self.max_fee_per_gas - } - } - } - } - - /// Calculates a heuristic for the in-memory size of the [`TxEip7702`] transaction. - #[inline] - pub fn size(&self) -> usize { - mem::size_of::() + // chain_id - mem::size_of::() + // nonce - mem::size_of::() + // gas_price - mem::size_of::() + // gas_limit - self.to.size() + // to - mem::size_of::() + // value - self.access_list.size() + // access_list - mem::size_of::() - * self.authorization_list.capacity() + // authorization_list - self.input.len() // input - } - - /// Decodes the inner [`TxEip7702`] fields from RLP bytes. - /// - /// NOTE: This assumes a RLP header has already been decoded, and _just_ decodes the following - /// RLP fields in the following order: - /// - /// - `chain_id` - /// - `nonce` - /// - `gas_price` - /// - `gas_limit` - /// - `to` - /// - `value` - /// - `data` (`input`) - /// - `access_list` - /// - `authorization_list` - pub(crate) fn decode_inner(buf: &mut &[u8]) -> alloy_rlp::Result { - Ok(Self { - chain_id: Decodable::decode(buf)?, - nonce: Decodable::decode(buf)?, - max_priority_fee_per_gas: Decodable::decode(buf)?, - max_fee_per_gas: Decodable::decode(buf)?, - gas_limit: Decodable::decode(buf)?, - to: Decodable::decode(buf)?, - value: Decodable::decode(buf)?, - input: Decodable::decode(buf)?, - access_list: Decodable::decode(buf)?, - authorization_list: Decodable::decode(buf)?, - }) - } - - /// Outputs the length of the transaction's fields, without a RLP header. - pub(crate) fn fields_len(&self) -> usize { - self.chain_id.length() + - self.nonce.length() + - self.max_priority_fee_per_gas.length() + - self.max_fee_per_gas.length() + - self.gas_limit.length() + - self.to.length() + - self.value.length() + - self.input.0.length() + - self.access_list.length() + - self.authorization_list.length() - } - - /// Encodes only the transaction's fields into the desired buffer, without a RLP header. - pub(crate) fn encode_fields(&self, out: &mut dyn bytes::BufMut) { - self.chain_id.encode(out); - self.nonce.encode(out); - self.max_priority_fee_per_gas.encode(out); - self.max_fee_per_gas.encode(out); - self.gas_limit.encode(out); - self.to.encode(out); - self.value.encode(out); - self.input.0.encode(out); - self.access_list.encode(out); - self.authorization_list.encode(out); - } - - /// Inner encoding function that is used for both rlp [`Encodable`] trait and for calculating - /// hash that for eip2718 does not require rlp header - /// - /// This encodes the transaction as: - /// `rlp([chain_id, nonce, max_priority_fee_per_gas, max_fee_per_gas, gas_limit, destination, - /// value, data, access_list, authorization_list, signature_y_parity, signature_r, - /// signature_s])` - pub(crate) fn encode_with_signature( - &self, - signature: &Signature, - out: &mut dyn bytes::BufMut, - with_header: bool, - ) { - let payload_length = self.fields_len() + signature.payload_len(); - if with_header { - Header { - list: false, - payload_length: 1 + length_of_length(payload_length) + payload_length, - } - .encode(out); - } - out.put_u8(self.tx_type() as u8); - let header = Header { list: true, payload_length }; - header.encode(out); - self.encode_fields(out); - signature.encode(out); - } - - /// Output the length of the RLP signed transaction encoding, _without_ a RLP string header. - pub(crate) fn payload_len_with_signature_without_header(&self, signature: &Signature) -> usize { - let payload_length = self.fields_len() + signature.payload_len(); - // 'transaction type byte length' + 'header length' + 'payload length' - 1 + length_of_length(payload_length) + payload_length - } - - /// Output the length of the RLP signed transaction encoding. This encodes with a RLP header. - pub(crate) fn payload_len_with_signature(&self, signature: &Signature) -> usize { - let len = self.payload_len_with_signature_without_header(signature); - length_of_length(len) + len - } - - /// Get transaction type - pub(crate) const fn tx_type(&self) -> TxType { - TxType::Eip7702 - } - - /// Encodes the EIP-7702 transaction in RLP for signing. - /// - /// This encodes the transaction as: - /// `tx_type || rlp(chain_id, nonce, gas_price, gas_limit, to, value, input, access_list, - /// authorization_list)` - /// - /// Note that there is no rlp header before the transaction type byte. - pub(crate) fn encode_for_signing(&self, out: &mut dyn bytes::BufMut) { - out.put_u8(self.tx_type() as u8); - Header { list: true, payload_length: self.fields_len() }.encode(out); - self.encode_fields(out); - } - - /// Outputs the length of the signature RLP encoding for the transaction. - pub(crate) fn payload_len_for_signature(&self) -> usize { - let payload_length = self.fields_len(); - // 'transaction type byte length' + 'header length' + 'payload length' - 1 + length_of_length(payload_length) + payload_length - } - - /// Outputs the signature hash of the transaction by first encoding without a signature, then - /// hashing. - pub(crate) fn signature_hash(&self) -> B256 { - let mut buf = Vec::with_capacity(self.payload_len_for_signature()); - self.encode_for_signing(&mut buf); - keccak256(&buf) - } -} - -#[cfg(test)] -mod tests { - use super::TxEip7702; - use crate::{ - transaction::{signature::Signature, TxKind}, - Address, Bytes, Transaction, TransactionSigned, U256, - }; - use alloy_rlp::{Decodable, Encodable}; - - #[test] - fn test_decode_create() { - // tests that a contract creation tx encodes and decodes properly - let request = Transaction::Eip7702(TxEip7702 { - chain_id: 1u64, - nonce: 0, - max_fee_per_gas: 0x4a817c800, - max_priority_fee_per_gas: 0x3b9aca00, - gas_limit: 2, - to: TxKind::Create, - value: U256::ZERO, - input: Bytes::from(vec![1, 2]), - access_list: Default::default(), - authorization_list: Default::default(), - }); - let signature = Signature { odd_y_parity: true, r: U256::default(), s: U256::default() }; - let tx = TransactionSigned::from_transaction_and_signature(request, signature); - - let mut encoded = Vec::new(); - tx.encode(&mut encoded); - assert_eq!(encoded.len(), tx.length()); - - let decoded = TransactionSigned::decode(&mut &*encoded).unwrap(); - assert_eq!(decoded, tx); - } - - #[test] - fn test_decode_call() { - let request = Transaction::Eip7702(TxEip7702 { - chain_id: 1u64, - nonce: 0, - max_fee_per_gas: 0x4a817c800, - max_priority_fee_per_gas: 0x3b9aca00, - gas_limit: 2, - to: Address::default().into(), - value: U256::ZERO, - input: Bytes::from(vec![1, 2]), - access_list: Default::default(), - authorization_list: Default::default(), - }); - - let signature = Signature { odd_y_parity: true, r: U256::default(), s: U256::default() }; - - let tx = TransactionSigned::from_transaction_and_signature(request, signature); - - let mut encoded = Vec::new(); - tx.encode(&mut encoded); - assert_eq!(encoded.len(), tx.length()); - - let decoded = TransactionSigned::decode(&mut &*encoded).unwrap(); - assert_eq!(decoded, tx); - } -} diff --git a/crates/primitives/src/transaction/error.rs b/crates/primitives/src/transaction/error.rs index 2f3ffc5f8b..de4efa4d8f 100644 --- a/crates/primitives/src/transaction/error.rs +++ b/crates/primitives/src/transaction/error.rs @@ -12,8 +12,13 @@ pub enum InvalidTransactionError { /// The nonce is lower than the account's nonce, or there is a nonce gap present. /// /// This is a consensus error. - #[display("transaction nonce is not consistent")] - NonceNotConsistent, + #[display("transaction nonce is not consistent: next nonce {state}, tx nonce {tx}")] + NonceNotConsistent { + /// The nonce of the transaction. + tx: u64, + /// The current state of the nonce in the local chain. + state: u64, + }, /// The transaction is before Spurious Dragon and has a chain ID. #[display("transactions before Spurious Dragon should not have a chain ID")] OldLegacyChainId, @@ -61,7 +66,7 @@ impl std::error::Error for InvalidTransactionError {} /// Represents error variants that can happen when trying to convert a transaction to /// [`PooledTransactionsElement`](crate::PooledTransactionsElement) -#[derive(Debug, Clone, Eq, PartialEq, derive_more::Display)] +#[derive(Debug, Clone, Eq, PartialEq, derive_more::Display, derive_more::Error)] pub enum TransactionConversionError { /// This error variant is used when a transaction cannot be converted into a /// [`PooledTransactionsElement`](crate::PooledTransactionsElement) because it is not supported @@ -70,9 +75,6 @@ pub enum TransactionConversionError { UnsupportedForP2P, } -#[cfg(feature = "std")] -impl std::error::Error for TransactionConversionError {} - /// Represents error variants than can happen when trying to convert a /// [`TransactionSignedEcRecovered`](crate::TransactionSignedEcRecovered) transaction. #[derive(Debug, Clone, Eq, PartialEq, derive_more::Display)] diff --git a/crates/primitives/src/transaction/legacy.rs b/crates/primitives/src/transaction/legacy.rs deleted file mode 100644 index 181c543803..0000000000 --- a/crates/primitives/src/transaction/legacy.rs +++ /dev/null @@ -1,219 +0,0 @@ -use crate::{keccak256, Bytes, ChainId, Signature, TxKind, TxType, B256, U256}; -use alloy_rlp::{length_of_length, Encodable, Header}; -use core::mem; - -#[cfg(any(test, feature = "reth-codec"))] -use reth_codecs::Compact; - -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; -use serde::{Deserialize, Serialize}; - -/// Legacy transaction. -#[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Serialize, Deserialize)] -#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] -#[cfg_attr(any(test, feature = "reth-codec"), derive(Compact))] -#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] -pub struct TxLegacy { - /// Added as EIP-155: Simple replay attack protection - pub chain_id: Option, - /// A scalar value equal to the number of transactions sent by the sender; formally Tn. - pub nonce: u64, - /// A scalar value equal to the number of - /// Wei to be paid per unit of gas for all computation - /// costs incurred as a result of the execution of this transaction; formally Tp. - /// - /// As ethereum circulation is around 120mil eth as of 2022 that is around - /// 120000000000000000000000000 wei we are safe to use u128 as its max number is: - /// 340282366920938463463374607431768211455 - pub gas_price: u128, - /// A scalar value equal to the maximum - /// amount of gas that should be used in executing - /// this transaction. This is paid up-front, before any - /// computation is done and may not be increased - /// later; formally Tg. - pub gas_limit: u64, - /// The 160-bit address of the message call’s recipient or, for a contract creation - /// transaction, ∅, used here to denote the only member of B0 ; formally Tt. - pub to: TxKind, - /// A scalar value equal to the number of Wei to - /// be transferred to the message call’s recipient or, - /// in the case of contract creation, as an endowment - /// to the newly created account; formally Tv. - pub value: U256, - /// Input has two uses depending if transaction is Create or Call (if `to` field is None or - /// Some). pub init: An unlimited size byte array specifying the - /// EVM-code for the account initialisation procedure CREATE, - /// data: An unlimited size byte array specifying the - /// input data of the message call, formally Td. - pub input: Bytes, -} - -impl TxLegacy { - /// Calculates a heuristic for the in-memory size of the [`TxLegacy`] transaction. - #[inline] - pub fn size(&self) -> usize { - mem::size_of::>() + // chain_id - mem::size_of::() + // nonce - mem::size_of::() + // gas_price - mem::size_of::() + // gas_limit - self.to.size() + // to - mem::size_of::() + // value - self.input.len() // input - } - - /// Outputs the length of the transaction's fields, without a RLP header or length of the - /// eip155 fields. - pub(crate) fn fields_len(&self) -> usize { - self.nonce.length() + - self.gas_price.length() + - self.gas_limit.length() + - self.to.length() + - self.value.length() + - self.input.0.length() - } - - /// Encodes only the transaction's fields into the desired buffer, without a RLP header or - /// eip155 fields. - pub(crate) fn encode_fields(&self, out: &mut dyn bytes::BufMut) { - self.nonce.encode(out); - self.gas_price.encode(out); - self.gas_limit.encode(out); - self.to.encode(out); - self.value.encode(out); - self.input.0.encode(out); - } - - /// Inner encoding function that is used for both rlp [`Encodable`] trait and for calculating - /// hash. - /// - /// This encodes the transaction as: - /// `rlp(nonce, gas_price, gas_limit, to, value, input, v, r, s)` - /// - /// The `v` value is encoded according to EIP-155 if the `chain_id` is not `None`. - pub(crate) fn encode_with_signature(&self, signature: &Signature, out: &mut dyn bytes::BufMut) { - let payload_length = - self.fields_len() + signature.payload_len_with_eip155_chain_id(self.chain_id); - let header = Header { list: true, payload_length }; - header.encode(out); - self.encode_fields(out); - signature.encode_with_eip155_chain_id(out, self.chain_id); - } - - /// Output the length of the RLP signed transaction encoding. - pub(crate) fn payload_len_with_signature(&self, signature: &Signature) -> usize { - let payload_length = - self.fields_len() + signature.payload_len_with_eip155_chain_id(self.chain_id); - // 'header length' + 'payload length' - length_of_length(payload_length) + payload_length - } - - /// Get transaction type - pub(crate) const fn tx_type(&self) -> TxType { - TxType::Legacy - } - - /// Encodes EIP-155 arguments into the desired buffer. Only encodes values for legacy - /// transactions. - /// - /// If a `chain_id` is `Some`, this encodes the `chain_id`, followed by two zeroes, as defined - /// by [EIP-155](https://eips.ethereum.org/EIPS/eip-155). - pub(crate) fn encode_eip155_fields(&self, out: &mut dyn bytes::BufMut) { - // if this is a legacy transaction without a chain ID, it must be pre-EIP-155 - // and does not need to encode the chain ID for the signature hash encoding - if let Some(id) = self.chain_id { - // EIP-155 encodes the chain ID and two zeroes - id.encode(out); - 0x00u8.encode(out); - 0x00u8.encode(out); - } - } - - /// Outputs the length of EIP-155 fields. Only outputs a non-zero value for EIP-155 legacy - /// transactions. - pub(crate) fn eip155_fields_len(&self) -> usize { - if let Some(id) = self.chain_id { - // EIP-155 encodes the chain ID and two zeroes, so we add 2 to the length of the chain - // ID to get the length of all 3 fields - // len(chain_id) + (0x00) + (0x00) - id.length() + 2 - } else { - // this is either a pre-EIP-155 legacy transaction or a typed transaction - 0 - } - } - - /// Encodes the legacy transaction in RLP for signing, including the EIP-155 fields if possible. - /// - /// If a `chain_id` is `Some`, this encodes the transaction as: - /// `rlp(nonce, gas_price, gas_limit, to, value, input, chain_id, 0, 0)` - /// - /// Otherwise, this encodes the transaction as: - /// `rlp(nonce, gas_price, gas_limit, to, value, input)` - pub(crate) fn encode_for_signing(&self, out: &mut dyn bytes::BufMut) { - Header { list: true, payload_length: self.fields_len() + self.eip155_fields_len() } - .encode(out); - self.encode_fields(out); - self.encode_eip155_fields(out); - } - - /// Outputs the length of the signature RLP encoding for the transaction, including the length - /// of the EIP-155 fields if possible. - pub(crate) fn payload_len_for_signature(&self) -> usize { - let payload_length = self.fields_len() + self.eip155_fields_len(); - // 'header length' + 'payload length' - length_of_length(payload_length) + payload_length - } - - /// Outputs the signature hash of the transaction by first encoding without a signature, then - /// hashing. - /// - /// See [`Self::encode_for_signing`] for more information on the encoding format. - pub(crate) fn signature_hash(&self) -> B256 { - let mut buf = Vec::with_capacity(self.payload_len_for_signature()); - self.encode_for_signing(&mut buf); - keccak256(&buf) - } -} - -#[cfg(test)] -mod tests { - use super::TxLegacy; - use crate::{ - transaction::{signature::Signature, TxKind}, - Address, Transaction, TransactionSigned, B256, U256, - }; - - #[test] - fn recover_signer_legacy() { - use crate::hex_literal::hex; - - let signer: Address = hex!("398137383b3d25c92898c656696e41950e47316b").into(); - let hash: B256 = - hex!("bb3a336e3f823ec18197f1e13ee875700f08f03e2cab75f0d0b118dabb44cba0").into(); - - let tx = Transaction::Legacy(TxLegacy { - chain_id: Some(1), - nonce: 0x18, - gas_price: 0xfa56ea00, - gas_limit: 119902, - to: TxKind::Call(hex!("06012c8cf97bead5deae237070f9587f8e7a266d").into()), - value: U256::from(0x1c6bf526340000u64), - input: hex!("f7d8c88300000000000000000000000000000000000000000000000000000000000cee6100000000000000000000000000000000000000000000000000000000000ac3e1").into(), - }); - - let sig = Signature { - r: U256::from_be_bytes(hex!( - "2a378831cf81d99a3f06a18ae1b6ca366817ab4d88a70053c41d7a8f0368e031" - )), - s: U256::from_be_bytes(hex!( - "450d831a05b6e418724436c05c155e0a1b7b921015d0fbc2f667aed709ac4fb5" - )), - odd_y_parity: false, - }; - - let signed_tx = TransactionSigned::from_transaction_and_signature(tx, sig); - assert_eq!(signed_tx.hash(), hash, "Expected same hash"); - assert_eq!(signed_tx.recover_signer(), Some(signer), "Recovering signer should pass."); - } -} diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index c1bdcc9648..1d645302a5 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -5,6 +5,7 @@ use crate::{ B256, U256, }; +use alloy_consensus::SignableTransaction; use alloy_rlp::{ Decodable, Encodable, Error as RlpError, Header, EMPTY_LIST_CODE, EMPTY_STRING_CODE, }; @@ -16,15 +17,12 @@ use rayon::prelude::{IntoParallelIterator, ParallelIterator}; use serde::{Deserialize, Serialize}; pub use access_list::{AccessList, AccessListItem, AccessListResult}; -pub use eip1559::TxEip1559; -pub use eip2930::TxEip2930; -pub use eip4844::TxEip4844; -pub use eip7702::TxEip7702; + +pub use alloy_consensus::{TxEip1559, TxEip2930, TxEip4844, TxEip7702, TxLegacy}; pub use error::{ InvalidTransactionError, TransactionConversionError, TryFromRecoveredTransactionError, }; -pub use legacy::TxLegacy; pub use meta::TransactionMeta; pub use pooled::{PooledTransactionsElement, PooledTransactionsElementEcRecovered}; #[cfg(all(feature = "c-kzg", any(test, feature = "arbitrary")))] @@ -43,12 +41,7 @@ pub use variant::TransactionSignedVariant; pub(crate) mod access_list; mod compat; -mod eip1559; -mod eip2930; -mod eip4844; -mod eip7702; mod error; -mod legacy; mod meta; mod pooled; mod sidecar; @@ -58,17 +51,18 @@ pub(crate) mod util; mod variant; #[cfg(feature = "optimism")] -mod optimism; - -#[cfg(feature = "optimism")] -pub use optimism::TxDeposit; +pub use op_alloy_consensus::TxDeposit; #[cfg(feature = "optimism")] pub use tx_type::DEPOSIT_TX_TYPE_ID; +#[cfg(any(test, feature = "reth-codec"))] +use tx_type::{ + COMPACT_EXTENDED_IDENTIFIER_FLAG, COMPACT_IDENTIFIER_EIP1559, COMPACT_IDENTIFIER_EIP2930, + COMPACT_IDENTIFIER_LEGACY, +}; #[cfg(test)] use reth_codecs::Compact; -#[cfg(not(feature = "std"))] use alloc::vec::Vec; /// Either a transaction hash or number. @@ -86,8 +80,7 @@ pub(crate) static PARALLEL_SENDER_RECOVERY_THRESHOLD: Lazy = /// A raw transaction. /// /// Transaction types were introduced in [EIP-2718](https://eips.ethereum.org/EIPS/eip-2718). -#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] -#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] +#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, derive_more::From)] #[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] pub enum Transaction { /// Legacy transaction (type `0x0`). @@ -140,6 +133,46 @@ pub enum Transaction { Deposit(TxDeposit), } +#[cfg(any(test, feature = "arbitrary"))] +impl<'a> arbitrary::Arbitrary<'a> for Transaction { + fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { + Ok(match TxType::arbitrary(u)? { + TxType::Legacy => { + let mut tx = TxLegacy::arbitrary(u)?; + tx.gas_limit = (tx.gas_limit as u64).into(); + Self::Legacy(tx) + } + TxType::Eip2930 => { + let mut tx = TxEip2930::arbitrary(u)?; + tx.gas_limit = (tx.gas_limit as u64).into(); + Self::Eip2930(tx) + } + TxType::Eip1559 => { + let mut tx = TxEip1559::arbitrary(u)?; + tx.gas_limit = (tx.gas_limit as u64).into(); + Self::Eip1559(tx) + } + TxType::Eip4844 => { + let mut tx = TxEip4844::arbitrary(u)?; + tx.gas_limit = (tx.gas_limit as u64).into(); + Self::Eip4844(tx) + } + + TxType::Eip7702 => { + let mut tx = TxEip7702::arbitrary(u)?; + tx.gas_limit = (tx.gas_limit as u64).into(); + Self::Eip7702(tx) + } + #[cfg(feature = "optimism")] + TxType::Deposit => { + let mut tx = TxDeposit::arbitrary(u)?; + tx.gas_limit = (tx.gas_limit as u64).into(); + Self::Deposit(tx) + } + }) + } +} + // === impl Transaction === impl Transaction { @@ -189,9 +222,10 @@ impl Transaction { match self { Self::Legacy(TxLegacy { to, .. }) | Self::Eip2930(TxEip2930 { to, .. }) | - Self::Eip1559(TxEip1559 { to, .. }) | - Self::Eip7702(TxEip7702 { to, .. }) => *to, - Self::Eip4844(TxEip4844 { to, .. }) => TxKind::Call(*to), + Self::Eip1559(TxEip1559 { to, .. }) => *to, + Self::Eip4844(TxEip4844 { to, .. }) | Self::Eip7702(TxEip7702 { to, .. }) => { + TxKind::Call(*to) + } #[cfg(feature = "optimism")] Self::Deposit(TxDeposit { to, .. }) => *to, } @@ -208,13 +242,13 @@ impl Transaction { /// Get the transaction's type pub const fn tx_type(&self) -> TxType { match self { - Self::Legacy(legacy_tx) => legacy_tx.tx_type(), - Self::Eip2930(access_list_tx) => access_list_tx.tx_type(), - Self::Eip1559(dynamic_fee_tx) => dynamic_fee_tx.tx_type(), - Self::Eip4844(blob_tx) => blob_tx.tx_type(), - Self::Eip7702(set_code_tx) => set_code_tx.tx_type(), + Self::Legacy(_) => TxType::Legacy, + Self::Eip2930(_) => TxType::Eip2930, + Self::Eip1559(_) => TxType::Eip1559, + Self::Eip4844(_) => TxType::Eip4844, + Self::Eip7702(_) => TxType::Eip7702, #[cfg(feature = "optimism")] - Self::Deposit(deposit_tx) => deposit_tx.tx_type(), + Self::Deposit(_) => TxType::Deposit, } } @@ -274,12 +308,12 @@ impl Transaction { pub const fn gas_limit(&self) -> u64 { match self { Self::Legacy(TxLegacy { gas_limit, .. }) | - Self::Eip2930(TxEip2930 { gas_limit, .. }) | Self::Eip1559(TxEip1559 { gas_limit, .. }) | Self::Eip4844(TxEip4844 { gas_limit, .. }) | - Self::Eip7702(TxEip7702 { gas_limit, .. }) => *gas_limit, + Self::Eip7702(TxEip7702 { gas_limit, .. }) | + Self::Eip2930(TxEip2930 { gas_limit, .. }) => *gas_limit as u64, #[cfg(feature = "optimism")] - Self::Deposit(TxDeposit { gas_limit, .. }) => *gas_limit, + Self::Deposit(TxDeposit { gas_limit, .. }) => *gas_limit as u64, } } @@ -494,33 +528,46 @@ impl Transaction { match self { Self::Legacy(legacy_tx) => { // do nothing w/ with_header - legacy_tx.encode_with_signature(signature, out) - } - Self::Eip2930(access_list_tx) => { - access_list_tx.encode_with_signature(signature, out, with_header) - } - Self::Eip1559(dynamic_fee_tx) => { - dynamic_fee_tx.encode_with_signature(signature, out, with_header) - } - Self::Eip4844(blob_tx) => blob_tx.encode_with_signature(signature, out, with_header), - Self::Eip7702(set_code_tx) => { - set_code_tx.encode_with_signature(signature, out, with_header) + legacy_tx.encode_with_signature_fields( + &signature.as_signature_with_eip155_parity(legacy_tx.chain_id), + out, + ) } + Self::Eip2930(access_list_tx) => access_list_tx.encode_with_signature( + &signature.as_signature_with_boolean_parity(), + out, + with_header, + ), + Self::Eip1559(dynamic_fee_tx) => dynamic_fee_tx.encode_with_signature( + &signature.as_signature_with_boolean_parity(), + out, + with_header, + ), + Self::Eip4844(blob_tx) => blob_tx.encode_with_signature( + &signature.as_signature_with_boolean_parity(), + out, + with_header, + ), + Self::Eip7702(set_code_tx) => set_code_tx.encode_with_signature( + &signature.as_signature_with_boolean_parity(), + out, + with_header, + ), #[cfg(feature = "optimism")] - Self::Deposit(deposit_tx) => deposit_tx.encode(out, with_header), + Self::Deposit(deposit_tx) => deposit_tx.encode_inner(out, with_header), } } /// This sets the transaction's gas limit. pub fn set_gas_limit(&mut self, gas_limit: u64) { match self { - Self::Legacy(tx) => tx.gas_limit = gas_limit, - Self::Eip2930(tx) => tx.gas_limit = gas_limit, - Self::Eip1559(tx) => tx.gas_limit = gas_limit, - Self::Eip4844(tx) => tx.gas_limit = gas_limit, - Self::Eip7702(tx) => tx.gas_limit = gas_limit, + Self::Legacy(tx) => tx.gas_limit = gas_limit.into(), + Self::Eip2930(tx) => tx.gas_limit = gas_limit.into(), + Self::Eip1559(tx) => tx.gas_limit = gas_limit.into(), + Self::Eip4844(tx) => tx.gas_limit = gas_limit.into(), + Self::Eip7702(tx) => tx.gas_limit = gas_limit.into(), #[cfg(feature = "optimism")] - Self::Deposit(tx) => tx.gas_limit = gas_limit, + Self::Deposit(tx) => tx.gas_limit = gas_limit.into(), } } @@ -648,36 +695,6 @@ impl Transaction { } } -impl From for Transaction { - fn from(tx: TxLegacy) -> Self { - Self::Legacy(tx) - } -} - -impl From for Transaction { - fn from(tx: TxEip2930) -> Self { - Self::Eip2930(tx) - } -} - -impl From for Transaction { - fn from(tx: TxEip1559) -> Self { - Self::Eip1559(tx) - } -} - -impl From for Transaction { - fn from(tx: TxEip4844) -> Self { - Self::Eip4844(tx) - } -} - -impl From for Transaction { - fn from(tx: TxEip7702) -> Self { - Self::Eip7702(tx) - } -} - #[cfg(any(test, feature = "reth-codec"))] impl reth_codecs::Compact for Transaction { // Serializes the TxType to the buffer if necessary, returning 2 bits of the type as an @@ -712,45 +729,45 @@ impl reth_codecs::Compact for Transaction { } // For backwards compatibility purposes, only 2 bits of the type are encoded in the identifier - // parameter. In the case of a 3, the full transaction type is read from the buffer as a - // single byte. + // parameter. In the case of a [`COMPACT_EXTENDED_IDENTIFIER_FLAG`], the full transaction type + // is read from the buffer as a single byte. // // # Panics // // A panic will be triggered if an identifier larger than 3 is passed from the database. For - // optimism a identifier with value 126 is allowed. + // optimism a identifier with value [`DEPOSIT_TX_TYPE_ID`] is allowed. fn from_compact(mut buf: &[u8], identifier: usize) -> (Self, &[u8]) { match identifier { - 0 => { + COMPACT_IDENTIFIER_LEGACY => { let (tx, buf) = TxLegacy::from_compact(buf, buf.len()); (Self::Legacy(tx), buf) } - 1 => { + COMPACT_IDENTIFIER_EIP2930 => { let (tx, buf) = TxEip2930::from_compact(buf, buf.len()); (Self::Eip2930(tx), buf) } - 2 => { + COMPACT_IDENTIFIER_EIP1559 => { let (tx, buf) = TxEip1559::from_compact(buf, buf.len()); (Self::Eip1559(tx), buf) } - 3 => { + COMPACT_EXTENDED_IDENTIFIER_FLAG => { // An identifier of 3 indicates that the transaction type did not fit into // the backwards compatible 2 bit identifier, their transaction types are // larger than 2 bits (eg. 4844 and Deposit Transactions). In this case, // we need to read the concrete transaction type from the buffer by // reading the full 8 bits (single byte) and match on this transaction type. - let identifier = buf.get_u8() as usize; + let identifier = buf.get_u8(); match identifier { - 3 => { + EIP4844_TX_TYPE_ID => { let (tx, buf) = TxEip4844::from_compact(buf, buf.len()); (Self::Eip4844(tx), buf) } - 4 => { + EIP7702_TX_TYPE_ID => { let (tx, buf) = TxEip7702::from_compact(buf, buf.len()); (Self::Eip7702(tx), buf) } #[cfg(feature = "optimism")] - 126 => { + DEPOSIT_TX_TYPE_ID => { let (tx, buf) = TxDeposit::from_compact(buf, buf.len()); (Self::Deposit(tx), buf) } @@ -790,7 +807,7 @@ impl Encodable for Transaction { } #[cfg(feature = "optimism")] Self::Deposit(deposit_tx) => { - deposit_tx.encode(out, true); + deposit_tx.encode_inner(out, true); } } } @@ -803,7 +820,7 @@ impl Encodable for Transaction { Self::Eip4844(blob_tx) => blob_tx.payload_len_for_signature(), Self::Eip7702(set_code_tx) => set_code_tx.payload_len_for_signature(), #[cfg(feature = "optimism")] - Self::Deposit(deposit_tx) => deposit_tx.payload_len(), + Self::Deposit(deposit_tx) => deposit_tx.encoded_len(true), } } } @@ -1174,19 +1191,27 @@ impl TransactionSigned { /// only `true`. pub(crate) fn payload_len_inner(&self) -> usize { match &self.transaction { - Transaction::Legacy(legacy_tx) => legacy_tx.payload_len_with_signature(&self.signature), - Transaction::Eip2930(access_list_tx) => { - access_list_tx.payload_len_with_signature(&self.signature) - } - Transaction::Eip1559(dynamic_fee_tx) => { - dynamic_fee_tx.payload_len_with_signature(&self.signature) - } - Transaction::Eip4844(blob_tx) => blob_tx.payload_len_with_signature(&self.signature), - Transaction::Eip7702(set_code_tx) => { - set_code_tx.payload_len_with_signature(&self.signature) - } + Transaction::Legacy(legacy_tx) => legacy_tx.encoded_len_with_signature( + &self.signature.as_signature_with_eip155_parity(legacy_tx.chain_id), + ), + Transaction::Eip2930(access_list_tx) => access_list_tx.encoded_len_with_signature( + &self.signature.as_signature_with_boolean_parity(), + true, + ), + Transaction::Eip1559(dynamic_fee_tx) => dynamic_fee_tx.encoded_len_with_signature( + &self.signature.as_signature_with_boolean_parity(), + true, + ), + Transaction::Eip4844(blob_tx) => blob_tx.encoded_len_with_signature( + &self.signature.as_signature_with_boolean_parity(), + true, + ), + Transaction::Eip7702(set_code_tx) => set_code_tx.encoded_len_with_signature( + &self.signature.as_signature_with_boolean_parity(), + true, + ), #[cfg(feature = "optimism")] - Transaction::Deposit(deposit_tx) => deposit_tx.payload_len(), + Transaction::Deposit(deposit_tx) => deposit_tx.encoded_len(true), } } @@ -1308,12 +1333,12 @@ impl TransactionSigned { }; let transaction = match tx_type { - TxType::Eip2930 => Transaction::Eip2930(TxEip2930::decode_inner(data)?), - TxType::Eip1559 => Transaction::Eip1559(TxEip1559::decode_inner(data)?), - TxType::Eip4844 => Transaction::Eip4844(TxEip4844::decode_inner(data)?), - TxType::Eip7702 => Transaction::Eip7702(TxEip7702::decode_inner(data)?), + TxType::Eip2930 => Transaction::Eip2930(TxEip2930::decode_fields(data)?), + TxType::Eip1559 => Transaction::Eip1559(TxEip1559::decode_fields(data)?), + TxType::Eip4844 => Transaction::Eip4844(TxEip4844::decode_fields(data)?), + TxType::Eip7702 => Transaction::Eip7702(TxEip7702::decode_fields(data)?), #[cfg(feature = "optimism")] - TxType::Deposit => Transaction::Deposit(TxDeposit::decode_inner(data)?), + TxType::Deposit => Transaction::Deposit(TxDeposit::decode_fields(data)?), TxType::Legacy => return Err(RlpError::Custom("unexpected legacy tx type")), }; @@ -1378,21 +1403,27 @@ impl TransactionSigned { pub fn length_without_header(&self) -> usize { // method computes the payload len without a RLP header match &self.transaction { - Transaction::Legacy(legacy_tx) => legacy_tx.payload_len_with_signature(&self.signature), - Transaction::Eip2930(access_list_tx) => { - access_list_tx.payload_len_with_signature_without_header(&self.signature) - } - Transaction::Eip1559(dynamic_fee_tx) => { - dynamic_fee_tx.payload_len_with_signature_without_header(&self.signature) - } - Transaction::Eip4844(blob_tx) => { - blob_tx.payload_len_with_signature_without_header(&self.signature) - } - Transaction::Eip7702(set_code_tx) => { - set_code_tx.payload_len_with_signature_without_header(&self.signature) - } + Transaction::Legacy(legacy_tx) => legacy_tx.encoded_len_with_signature( + &self.signature.as_signature_with_eip155_parity(legacy_tx.chain_id), + ), + Transaction::Eip2930(access_list_tx) => access_list_tx.encoded_len_with_signature( + &self.signature.as_signature_with_boolean_parity(), + false, + ), + Transaction::Eip1559(dynamic_fee_tx) => dynamic_fee_tx.encoded_len_with_signature( + &self.signature.as_signature_with_boolean_parity(), + false, + ), + Transaction::Eip4844(blob_tx) => blob_tx.encoded_len_with_signature( + &self.signature.as_signature_with_boolean_parity(), + false, + ), + Transaction::Eip7702(set_code_tx) => set_code_tx.encoded_len_with_signature( + &self.signature.as_signature_with_boolean_parity(), + false, + ), #[cfg(feature = "optimism")] - Transaction::Deposit(deposit_tx) => deposit_tx.payload_len_without_header(), + Transaction::Deposit(deposit_tx) => deposit_tx.encoded_len(false), } } } @@ -1491,11 +1522,6 @@ impl<'a> arbitrary::Arbitrary<'a> for TransactionSigned { transaction.set_chain_id(chain_id % (u64::MAX / 2 - 36)); } - if let Transaction::Eip4844(ref mut tx_eip_4844) = transaction { - tx_eip_4844.placeholder = - if tx_eip_4844.to == Address::default() { None } else { Some(()) }; - } - #[cfg(feature = "optimism")] // Both `Some(0)` and `None` values are encoded as empty string byte. This introduces // ambiguity in roundtrip tests. Patch the mint value of deposit transaction here, so that @@ -1655,6 +1681,7 @@ mod tests { }; use alloy_primitives::{address, b256, bytes}; use alloy_rlp::{Decodable, Encodable, Error as RlpError}; + use reth_chainspec::MIN_TRANSACTION_GAS; use reth_codecs::Compact; use std::str::FromStr; @@ -1770,7 +1797,7 @@ mod tests { chain_id: Some(4), nonce: 1u64, gas_price: 1000000000, - gas_limit: 100000u64, + gas_limit: 100000, to: Address::from_slice(&hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046")[..]).into(), value: U256::from(693361000000000u64), input: Default::default(), @@ -1809,7 +1836,7 @@ mod tests { nonce: 26, max_priority_fee_per_gas: 1500000000, max_fee_per_gas: 1500000013, - gas_limit: 21000, + gas_limit: MIN_TRANSACTION_GAS as u128, to: Address::from_slice(&hex!("61815774383099e24810ab832a5b2a5425c154d5")[..]).into(), value: U256::from(3000000000000000000u64), input: Default::default(), @@ -1914,13 +1941,13 @@ mod tests { // some random transactions pulled from hive tests let data = hex!("b86f02f86c0705843b9aca008506fc23ac00830124f89400000000000000000000000000000000000003160180c001a00293c713e2f1eab91c366621ff2f867e05ad7e99d4aa5d069aafeb9e1e8c9b6aa05ec6c0605ff20b57c90a6484ec3b0509e5923733d06f9b69bee9a2dabe4f1352"); let tx = TransactionSigned::decode(&mut data.as_slice()).unwrap(); - let mut b = Vec::new(); + let mut b = Vec::with_capacity(data.len()); tx.encode(&mut b); assert_eq!(data.as_slice(), b.as_slice()); let data = hex!("f865048506fc23ac00830124f8940000000000000000000000000000000000000316018032a06b8fdfdcb84790816b7af85b19305f493665fe8b4e7c51ffdd7cc144cd776a60a028a09ab55def7b8d6602ba1c97a0ebbafe64ffc9c8e89520cec97a8edfb2ebe9"); let tx = TransactionSigned::decode(&mut data.as_slice()).unwrap(); - let mut b = Vec::new(); + let mut b = Vec::with_capacity(data.len()); tx.encode(&mut b); assert_eq!(data.as_slice(), b.as_slice()); } diff --git a/crates/primitives/src/transaction/optimism.rs b/crates/primitives/src/transaction/optimism.rs deleted file mode 100644 index d7fc652f07..0000000000 --- a/crates/primitives/src/transaction/optimism.rs +++ /dev/null @@ -1,231 +0,0 @@ -use crate::{Address, Bytes, TxKind, TxType, B256, U256}; -use alloy_rlp::{ - length_of_length, Decodable, Encodable, Error as DecodeError, Header, EMPTY_STRING_CODE, -}; -use bytes::Buf; -use core::mem; -#[cfg(any(test, feature = "reth-codec"))] -use reth_codecs::Compact; -use serde::{Deserialize, Serialize}; - -/// Deposit transactions, also known as deposits are initiated on L1, and executed on L2. -#[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Serialize, Deserialize)] -#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] -#[cfg_attr(any(test, feature = "reth-codec"), derive(Compact))] -#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] -pub struct TxDeposit { - /// Hash that uniquely identifies the source of the deposit. - pub source_hash: B256, - /// The address of the sender account. - pub from: Address, - /// The address of the recipient account, or the null (zero-length) address if the deposited - /// transaction is a contract creation. - pub to: TxKind, - /// The ETH value to mint on L2. - pub mint: Option, - /// The ETH value to send to the recipient account. - pub value: U256, - /// The gas limit for the L2 transaction. - pub gas_limit: u64, - /// Field indicating if this transaction is exempt from the L2 gas limit. - pub is_system_transaction: bool, - /// Input has two uses depending if transaction is Create or Call (if `to` field is None or - /// Some). - pub input: Bytes, -} - -impl TxDeposit { - /// Calculates a heuristic for the in-memory size of the [`TxDeposit`] transaction. - #[inline] - pub fn size(&self) -> usize { - mem::size_of::() + // source_hash - mem::size_of::
() + // from - self.to.size() + // to - mem::size_of::>() + // mint - mem::size_of::() + // value - mem::size_of::() + // gas_limit - mem::size_of::() + // is_system_transaction - self.input.len() // input - } - - /// Decodes the inner [`TxDeposit`] fields from RLP bytes. - /// - /// NOTE: This assumes a RLP header has already been decoded, and _just_ decodes the following - /// RLP fields in the following order: - /// - /// - `source_hash` - /// - `from` - /// - `to` - /// - `mint` - /// - `value` - /// - `gas_limit` - /// - `is_system_transaction` - /// - `input` - pub fn decode_inner(buf: &mut &[u8]) -> Result { - Ok(Self { - source_hash: Decodable::decode(buf)?, - from: Decodable::decode(buf)?, - to: Decodable::decode(buf)?, - mint: if *buf.first().ok_or(DecodeError::InputTooShort)? == EMPTY_STRING_CODE { - buf.advance(1); - None - } else { - Some(Decodable::decode(buf)?) - }, - value: Decodable::decode(buf)?, - gas_limit: Decodable::decode(buf)?, - is_system_transaction: Decodable::decode(buf)?, - input: Decodable::decode(buf)?, - }) - } - - /// Outputs the length of the transaction's fields, without a RLP header or length of the - /// eip155 fields. - pub(crate) fn fields_len(&self) -> usize { - self.source_hash.length() + - self.from.length() + - self.to.length() + - self.mint.map_or(1, |mint| mint.length()) + - self.value.length() + - self.gas_limit.length() + - self.is_system_transaction.length() + - self.input.0.length() - } - - /// Encodes only the transaction's fields into the desired buffer, without a RLP header. - /// - pub(crate) fn encode_fields(&self, out: &mut dyn bytes::BufMut) { - self.source_hash.encode(out); - self.from.encode(out); - self.to.encode(out); - if let Some(mint) = self.mint { - mint.encode(out); - } else { - out.put_u8(EMPTY_STRING_CODE); - } - self.value.encode(out); - self.gas_limit.encode(out); - self.is_system_transaction.encode(out); - self.input.encode(out); - } - - /// Inner encoding function that is used for both rlp [`Encodable`] trait and for calculating - /// hash that for eip2718 does not require rlp header - pub(crate) fn encode(&self, out: &mut dyn bytes::BufMut, with_header: bool) { - let payload_length = self.fields_len(); - if with_header { - Header { - list: false, - payload_length: 1 + length_of_length(payload_length) + payload_length, - } - .encode(out); - } - out.put_u8(self.tx_type() as u8); - let header = Header { list: true, payload_length }; - header.encode(out); - self.encode_fields(out); - } - - /// Output the length of the RLP signed transaction encoding. This encodes with a RLP header. - pub(crate) fn payload_len(&self) -> usize { - let payload_length = self.fields_len(); - // 'tx type' + 'header length' + 'payload length' - let len = 1 + length_of_length(payload_length) + payload_length; - length_of_length(len) + len - } - - pub(crate) fn payload_len_without_header(&self) -> usize { - let payload_length = self.fields_len(); - // 'transaction type byte length' + 'header length' + 'payload length' - 1 + length_of_length(payload_length) + payload_length - } - - /// Get the transaction type - pub(crate) const fn tx_type(&self) -> TxType { - TxType::Deposit - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{revm_primitives::hex_literal::hex, TransactionSigned}; - use bytes::BytesMut; - - #[test] - fn test_rlp_roundtrip() { - let bytes = Bytes::from_static(&hex!("7ef9015aa044bae9d41b8380d781187b426c6fe43df5fb2fb57bd4466ef6a701e1f01e015694deaddeaddeaddeaddeaddeaddeaddeaddead000194420000000000000000000000000000000000001580808408f0d18001b90104015d8eb900000000000000000000000000000000000000000000000000000000008057650000000000000000000000000000000000000000000000000000000063d96d10000000000000000000000000000000000000000000000000000000000009f35273d89754a1e0387b89520d989d3be9c37c1f32495a88faf1ea05c61121ab0d1900000000000000000000000000000000000000000000000000000000000000010000000000000000000000002d679b567db6187c0c8323fa982cfb88b74dbcc7000000000000000000000000000000000000000000000000000000000000083400000000000000000000000000000000000000000000000000000000000f4240")); - - let tx_a = TransactionSigned::decode_enveloped(&mut bytes.as_ref()).unwrap(); - let tx_b = TransactionSigned::decode(&mut &bytes[..]).unwrap(); - - let mut buf_a = BytesMut::default(); - tx_a.encode_enveloped(&mut buf_a); - assert_eq!(&buf_a[..], &bytes[..]); - - let mut buf_b = BytesMut::default(); - tx_b.encode_enveloped(&mut buf_b); - assert_eq!(&buf_b[..], &bytes[..]); - } - - #[test] - fn test_encode_decode_fields() { - let original = TxDeposit { - source_hash: B256::default(), - from: Address::default(), - to: TxKind::default(), - mint: Some(100), - value: U256::default(), - gas_limit: 50000, - is_system_transaction: true, - input: Bytes::default(), - }; - - let mut buffer = BytesMut::new(); - original.encode_fields(&mut buffer); - let decoded = TxDeposit::decode_inner(&mut &buffer[..]).expect("Failed to decode"); - - assert_eq!(original, decoded); - } - - #[test] - fn test_encode_with_and_without_header() { - let tx_deposit = TxDeposit { - source_hash: B256::default(), - from: Address::default(), - to: TxKind::default(), - mint: Some(100), - value: U256::default(), - gas_limit: 50000, - is_system_transaction: true, - input: Bytes::default(), - }; - - let mut buffer_with_header = BytesMut::new(); - tx_deposit.encode(&mut buffer_with_header, true); - - let mut buffer_without_header = BytesMut::new(); - tx_deposit.encode(&mut buffer_without_header, false); - - assert!(buffer_with_header.len() > buffer_without_header.len()); - } - - #[test] - fn test_payload_length() { - let tx_deposit = TxDeposit { - source_hash: B256::default(), - from: Address::default(), - to: TxKind::default(), - mint: Some(100), - value: U256::default(), - gas_limit: 50000, - is_system_transaction: true, - input: Bytes::default(), - }; - - let total_len = tx_deposit.payload_len(); - let len_without_header = tx_deposit.payload_len_without_header(); - - assert!(total_len > len_without_header); - } -} diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index ff6284d375..a883e739a1 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -7,14 +7,13 @@ use crate::{ TransactionSigned, TransactionSignedEcRecovered, TxEip1559, TxEip2930, TxEip4844, TxHash, TxLegacy, B256, EIP4844_TX_TYPE_ID, }; +use alloc::vec::Vec; +use alloy_consensus::{SignableTransaction, TxEip4844WithSidecar}; use alloy_rlp::{Decodable, Encodable, Error as RlpError, Header, EMPTY_LIST_CODE}; use bytes::Buf; use derive_more::{AsRef, Deref}; use serde::{Deserialize, Serialize}; -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; - /// A response to `GetPooledTransactions`. This can include either a blob transaction, or a /// non-4844 signed transaction. #[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests)] @@ -101,7 +100,11 @@ impl PooledTransactionsElement { // If the transaction is an EIP-4844 transaction... TransactionSigned { transaction: Transaction::Eip4844(tx), signature, hash } => { // Construct a `PooledTransactionsElement::BlobTransaction` with provided sidecar. - Self::BlobTransaction(BlobTransaction { transaction: tx, signature, hash, sidecar }) + Self::BlobTransaction(BlobTransaction { + signature, + hash, + transaction: TxEip4844WithSidecar { tx, sidecar }, + }) } // If the transaction is not EIP-4844, return an error with the original // transaction. @@ -150,7 +153,7 @@ impl PooledTransactionsElement { Self::Eip2930 { transaction, .. } => transaction.nonce, Self::Eip1559 { transaction, .. } => transaction.nonce, Self::Eip7702 { transaction, .. } => transaction.nonce, - Self::BlobTransaction(blob_tx) => blob_tx.transaction.nonce, + Self::BlobTransaction(blob_tx) => blob_tx.transaction.tx.nonce, } } @@ -301,19 +304,30 @@ impl PooledTransactionsElement { match self { Self::Legacy { transaction, signature, .. } => { // method computes the payload len with a RLP header - transaction.payload_len_with_signature(signature) + transaction.encoded_len_with_signature( + &signature.as_signature_with_eip155_parity(transaction.chain_id), + ) } Self::Eip2930 { transaction, signature, .. } => { // method computes the payload len without a RLP header - transaction.payload_len_with_signature_without_header(signature) + transaction.encoded_len_with_signature( + &signature.as_signature_with_boolean_parity(), + false, + ) } Self::Eip1559 { transaction, signature, .. } => { // method computes the payload len without a RLP header - transaction.payload_len_with_signature_without_header(signature) + transaction.encoded_len_with_signature( + &signature.as_signature_with_boolean_parity(), + false, + ) } Self::Eip7702 { transaction, signature, .. } => { // method computes the payload len without a RLP header - transaction.payload_len_with_signature_without_header(signature) + transaction.encoded_len_with_signature( + &signature.as_signature_with_boolean_parity(), + false, + ) } Self::BlobTransaction(blob_tx) => { // the encoding does not use a header, so we set `with_header` to false @@ -347,18 +361,26 @@ impl PooledTransactionsElement { // - EIP-4844: BlobTransaction::encode_with_type_inner // - EIP-7702: TxEip7702::encode_with_signature match self { - Self::Legacy { transaction, signature, .. } => { - transaction.encode_with_signature(signature, out) - } - Self::Eip2930 { transaction, signature, .. } => { - transaction.encode_with_signature(signature, out, false) - } - Self::Eip1559 { transaction, signature, .. } => { - transaction.encode_with_signature(signature, out, false) - } - Self::Eip7702 { transaction, signature, .. } => { - transaction.encode_with_signature(signature, out, false) - } + Self::Legacy { transaction, signature, .. } => transaction + .encode_with_signature_fields( + &signature.as_signature_with_eip155_parity(transaction.chain_id), + out, + ), + Self::Eip2930 { transaction, signature, .. } => transaction.encode_with_signature( + &signature.as_signature_with_boolean_parity(), + out, + false, + ), + Self::Eip1559 { transaction, signature, .. } => transaction.encode_with_signature( + &signature.as_signature_with_boolean_parity(), + out, + false, + ), + Self::Eip7702 { transaction, signature, .. } => transaction.encode_with_signature( + &signature.as_signature_with_boolean_parity(), + out, + false, + ), Self::BlobTransaction(blob_tx) => { // The inner encoding is used with `with_header` set to true, making the final // encoding: @@ -401,7 +423,7 @@ impl PooledTransactionsElement { /// Returns the [`TxEip4844`] variant if the transaction is an EIP-4844 transaction. pub const fn as_eip4844(&self) -> Option<&TxEip4844> { match self { - Self::BlobTransaction(tx) => Some(&tx.transaction), + Self::BlobTransaction(tx) => Some(&tx.transaction.tx), _ => None, } } @@ -430,7 +452,7 @@ impl PooledTransactionsElement { /// This is also commonly referred to as the "Blob Gas Fee Cap" (`BlobGasFeeCap`). pub const fn max_fee_per_blob_gas(&self) -> Option { match self { - Self::BlobTransaction(tx) => Some(tx.transaction.max_fee_per_blob_gas), + Self::BlobTransaction(tx) => Some(tx.transaction.tx.max_fee_per_blob_gas), _ => None, } } @@ -444,7 +466,7 @@ impl PooledTransactionsElement { Self::Legacy { .. } | Self::Eip2930 { .. } => None, Self::Eip1559 { transaction, .. } => Some(transaction.max_priority_fee_per_gas), Self::Eip7702 { transaction, .. } => Some(transaction.max_priority_fee_per_gas), - Self::BlobTransaction(tx) => Some(tx.transaction.max_priority_fee_per_gas), + Self::BlobTransaction(tx) => Some(tx.transaction.tx.max_priority_fee_per_gas), } } @@ -457,7 +479,7 @@ impl PooledTransactionsElement { Self::Eip2930 { transaction, .. } => transaction.gas_price, Self::Eip1559 { transaction, .. } => transaction.max_fee_per_gas, Self::Eip7702 { transaction, .. } => transaction.max_fee_per_gas, - Self::BlobTransaction(tx) => tx.transaction.max_fee_per_gas, + Self::BlobTransaction(tx) => tx.transaction.tx.max_fee_per_gas, } } } @@ -478,20 +500,34 @@ impl Encodable for PooledTransactionsElement { // - EIP-4844: BlobTransaction::encode_with_type_inner // - EIP-7702: TxEip7702::encode_with_signature match self { - Self::Legacy { transaction, signature, .. } => { - transaction.encode_with_signature(signature, out) - } + Self::Legacy { transaction, signature, .. } => transaction + .encode_with_signature_fields( + &signature.as_signature_with_eip155_parity(transaction.chain_id), + out, + ), Self::Eip2930 { transaction, signature, .. } => { // encodes with string header - transaction.encode_with_signature(signature, out, true) + transaction.encode_with_signature( + &signature.as_signature_with_boolean_parity(), + out, + true, + ) } Self::Eip1559 { transaction, signature, .. } => { // encodes with string header - transaction.encode_with_signature(signature, out, true) + transaction.encode_with_signature( + &signature.as_signature_with_boolean_parity(), + out, + true, + ) } Self::Eip7702 { transaction, signature, .. } => { // encodes with string header - transaction.encode_with_signature(signature, out, true) + transaction.encode_with_signature( + &signature.as_signature_with_boolean_parity(), + out, + true, + ) } Self::BlobTransaction(blob_tx) => { // The inner encoding is used with `with_header` set to true, making the final @@ -506,19 +542,24 @@ impl Encodable for PooledTransactionsElement { match self { Self::Legacy { transaction, signature, .. } => { // method computes the payload len with a RLP header - transaction.payload_len_with_signature(signature) + transaction.encoded_len_with_signature( + &signature.as_signature_with_eip155_parity(transaction.chain_id), + ) } Self::Eip2930 { transaction, signature, .. } => { // method computes the payload len with a RLP header - transaction.payload_len_with_signature(signature) + transaction + .encoded_len_with_signature(&signature.as_signature_with_boolean_parity(), true) } Self::Eip1559 { transaction, signature, .. } => { // method computes the payload len with a RLP header - transaction.payload_len_with_signature(signature) + transaction + .encoded_len_with_signature(&signature.as_signature_with_boolean_parity(), true) } Self::Eip7702 { transaction, signature, .. } => { // method computes the payload len with a RLP header - transaction.payload_len_with_signature(signature) + transaction + .encoded_len_with_signature(&signature.as_signature_with_boolean_parity(), true) } Self::BlobTransaction(blob_tx) => { // the encoding uses a header, so we set `with_header` to true @@ -662,7 +703,7 @@ impl<'a> arbitrary::Arbitrary<'a> for PooledTransactionsElement { match Self::try_from(tx_signed) { Ok(Self::BlobTransaction(mut tx)) => { // Successfully converted to a BlobTransaction, now generate a sidecar. - tx.sidecar = crate::BlobTransactionSidecar::arbitrary(u)?; + tx.transaction.sidecar = crate::BlobTransactionSidecar::arbitrary(u)?; Ok(Self::BlobTransaction(tx)) } Ok(tx) => Ok(tx), // Successfully converted, but not a BlobTransaction. diff --git a/crates/primitives/src/transaction/sidecar.rs b/crates/primitives/src/transaction/sidecar.rs index 2c6f4598a5..340d7662d8 100644 --- a/crates/primitives/src/transaction/sidecar.rs +++ b/crates/primitives/src/transaction/sidecar.rs @@ -3,7 +3,8 @@ use crate::{ keccak256, Signature, Transaction, TransactionSigned, TxEip4844, TxHash, EIP4844_TX_TYPE_ID, }; -use alloy_rlp::{Decodable, Encodable, Error as RlpError, Header}; +use alloy_consensus::TxEip4844WithSidecar; +use alloy_rlp::{Decodable, Error as RlpError, Header}; use serde::{Deserialize, Serialize}; #[doc(inline)] @@ -12,7 +13,6 @@ pub use alloy_eips::eip4844::BlobTransactionSidecar; #[cfg(feature = "c-kzg")] pub use alloy_eips::eip4844::BlobTransactionValidationError; -#[cfg(not(feature = "std"))] use alloc::vec::Vec; /// A response to `GetPooledTransactions` that includes blob data, their commitments, and their @@ -24,12 +24,10 @@ use alloc::vec::Vec; pub struct BlobTransaction { /// The transaction hash. pub hash: TxHash, - /// The transaction payload. - pub transaction: TxEip4844, /// The transaction signature. pub signature: Signature, - /// The transaction's blob sidecar. - pub sidecar: BlobTransactionSidecar, + /// The transaction payload with the sidecar. + pub transaction: TxEip4844WithSidecar, } impl BlobTransaction { @@ -43,7 +41,11 @@ impl BlobTransaction { ) -> Result { let TransactionSigned { transaction, signature, hash } = tx; match transaction { - Transaction::Eip4844(transaction) => Ok(Self { hash, transaction, signature, sidecar }), + Transaction::Eip4844(transaction) => Ok(Self { + hash, + transaction: TxEip4844WithSidecar { tx: transaction, sidecar }, + signature, + }), transaction => { let tx = TransactionSigned { transaction, signature, hash }; Err((tx, sidecar)) @@ -59,19 +61,19 @@ impl BlobTransaction { &self, proof_settings: &c_kzg::KzgSettings, ) -> Result<(), BlobTransactionValidationError> { - self.transaction.validate_blob(&self.sidecar, proof_settings) + self.transaction.validate_blob(proof_settings) } /// Splits the [`BlobTransaction`] into its [`TransactionSigned`] and [`BlobTransactionSidecar`] /// components. pub fn into_parts(self) -> (TransactionSigned, BlobTransactionSidecar) { let transaction = TransactionSigned { - transaction: Transaction::Eip4844(self.transaction), + transaction: Transaction::Eip4844(self.transaction.tx), hash: self.hash, signature: self.signature, }; - (transaction, self.sidecar) + (transaction, self.transaction.sidecar) } /// Encodes the [`BlobTransaction`] fields as RLP, with a tx type. If `with_header` is `false`, @@ -111,36 +113,8 @@ impl BlobTransaction { /// Note: this should be used only when implementing other RLP encoding methods, and does not /// represent the full RLP encoding of the blob transaction. pub(crate) fn encode_inner(&self, out: &mut dyn bytes::BufMut) { - // First we construct both required list headers. - // - // The `transaction_payload_body` length is the length of the fields, plus the length of - // its list header. - let tx_header = Header { - list: true, - payload_length: self.transaction.fields_len() + self.signature.payload_len(), - }; - - let tx_length = tx_header.length() + tx_header.payload_length; - - // The payload length is the length of the `tranascation_payload_body` list, plus the - // length of the blobs, commitments, and proofs. - let payload_length = tx_length + self.sidecar.fields_len(); - - // First we use the payload len to construct the first list header - let blob_tx_header = Header { list: true, payload_length }; - - // Encode the blob tx header first - blob_tx_header.encode(out); - - // Encode the inner tx list header, then its fields - tx_header.encode(out); - self.transaction.encode_fields(out); - - // Encode the signature - self.signature.encode(out); - - // Encode the blobs, commitments, and proofs - self.sidecar.encode(out); + self.transaction + .encode_with_signature_fields(&self.signature.as_signature_with_boolean_parity(), out); } /// Outputs the length of the RLP encoding of the blob transaction, including the tx type byte, @@ -181,14 +155,14 @@ impl BlobTransaction { // its list header. let tx_header = Header { list: true, - payload_length: self.transaction.fields_len() + self.signature.payload_len(), + payload_length: self.transaction.tx.fields_len() + self.signature.payload_len(), }; let tx_length = tx_header.length() + tx_header.payload_length; // The payload length is the length of the `tranascation_payload_body` list, plus the // length of the blobs, commitments, and proofs. - let payload_length = tx_length + self.sidecar.fields_len(); + let payload_length = tx_length + self.transaction.sidecar.fields_len(); // We use the calculated payload len to construct the first list header, which encompasses // everything in the tx - the length of the second, inner list header is part of @@ -234,7 +208,7 @@ impl BlobTransaction { let inner_remaining_len = data.len(); // inner transaction - let transaction = TxEip4844::decode_inner(data)?; + let transaction = TxEip4844::decode(data)?; // signature let signature = Signature::decode(data)?; @@ -265,7 +239,11 @@ impl BlobTransaction { // Instead, we use `encode_with_signature`, which RLP encodes the transaction with a // signature for hashing without a header. We then hash the result. let mut buf = Vec::new(); - transaction.encode_with_signature(&signature, &mut buf, false); + transaction.encode_with_signature( + &signature.as_signature_with_boolean_parity(), + &mut buf, + false, + ); let hash = keccak256(&buf); // the outer header is for the entire transaction, so we check the length here @@ -274,7 +252,7 @@ impl BlobTransaction { return Err(RlpError::UnexpectedLength) } - Ok(Self { transaction, hash, signature, sidecar }) + Ok(Self { transaction: TxEip4844WithSidecar { tx: transaction, sidecar }, hash, signature }) } } @@ -311,6 +289,7 @@ mod tests { use super::*; use crate::{hex, kzg::Blob}; use alloy_eips::eip4844::Bytes48; + use alloy_rlp::Encodable; use std::{fs, path::PathBuf, str::FromStr}; #[test] diff --git a/crates/primitives/src/transaction/signature.rs b/crates/primitives/src/transaction/signature.rs index 69fbd6ab1c..fe03d8d712 100644 --- a/crates/primitives/src/transaction/signature.rs +++ b/crates/primitives/src/transaction/signature.rs @@ -1,5 +1,8 @@ +use core::fmt::Debug; + use crate::{transaction::util::secp256k1, Address, B256, U256}; -use alloy_primitives::Bytes; +use alloy_consensus::EncodableSignature; +use alloy_primitives::{Bytes, Parity}; use alloy_rlp::{Decodable, Encodable, Error as RlpError}; use serde::{Deserialize, Serialize}; @@ -18,6 +21,9 @@ const SECP256K1N_HALF: U256 = U256::from_be_bytes([ /// r, s: Values corresponding to the signature of the /// transaction and used to determine the sender of /// the transaction; formally Tr and Ts. This is expanded in Appendix F of yellow paper. +/// +/// This type is unaware of the chain id, and thus shouldn't be used when encoding or decoding +/// legacy transactions. Use `SignatureWithParity` instead. #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, Default, Serialize, Deserialize)] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] #[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] @@ -33,15 +39,6 @@ pub struct Signature { pub odd_y_parity: bool, } -impl Signature { - /// Returns the signature for the optimism deposit transactions, which don't include a - /// signature. - #[cfg(feature = "optimism")] - pub const fn optimism_deposit_tx_signature() -> Self { - Self { r: U256::ZERO, s: U256::ZERO, odd_y_parity: false } - } -} - #[cfg(any(test, feature = "reth-codec"))] impl reth_codecs::Compact for Signature { fn to_compact(&self, buf: &mut B) -> usize @@ -64,44 +61,6 @@ impl reth_codecs::Compact for Signature { } impl Signature { - /// Output the length of the signature without the length of the RLP header, using the legacy - /// scheme with EIP-155 support depends on `chain_id`. - pub(crate) fn payload_len_with_eip155_chain_id(&self, chain_id: Option) -> usize { - self.v(chain_id).length() + self.r.length() + self.s.length() - } - - /// Encode the `v`, `r`, `s` values without a RLP header. - /// Encodes the `v` value using the legacy scheme with EIP-155 support depends on `chain_id`. - pub(crate) fn encode_with_eip155_chain_id( - &self, - out: &mut dyn alloy_rlp::BufMut, - chain_id: Option, - ) { - self.v(chain_id).encode(out); - self.r.encode(out); - self.s.encode(out); - } - - /// Output the `v` of the signature depends on `chain_id` - #[inline] - #[allow(clippy::missing_const_for_fn)] - pub fn v(&self, chain_id: Option) -> u64 { - if let Some(chain_id) = chain_id { - // EIP-155: v = {0, 1} + CHAIN_ID * 2 + 35 - self.odd_y_parity as u64 + chain_id * 2 + 35 - } else { - #[cfg(feature = "optimism")] - // pre bedrock system transactions were sent from the zero address as legacy - // transactions with an empty signature - // - // NOTE: this is very hacky and only relevant for op-mainnet pre bedrock - if *self == Self::optimism_deposit_tx_signature() { - return 0 - } - self.odd_y_parity as u64 + 27 - } - } - /// Decodes the `v`, `r`, `s` values without a RLP header. /// This will return a chain ID if the `v` value is [EIP-155](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-155.md) compatible. pub(crate) fn decode_with_eip155_chain_id( @@ -202,6 +161,45 @@ impl Signature { pub const fn size(&self) -> usize { core::mem::size_of::() } + + /// Returns [Parity] value based on `chain_id` for legacy transaction signature. + #[allow(clippy::missing_const_for_fn)] + pub fn legacy_parity(&self, chain_id: Option) -> Parity { + if let Some(chain_id) = chain_id { + Parity::Parity(self.odd_y_parity).with_chain_id(chain_id) + } else { + #[cfg(feature = "optimism")] + // pre bedrock system transactions were sent from the zero address as legacy + // transactions with an empty signature + // + // NOTE: this is very hacky and only relevant for op-mainnet pre bedrock + if *self == Self::optimism_deposit_tx_signature() { + return Parity::Parity(false) + } + Parity::NonEip155(self.odd_y_parity) + } + } + + /// Returns a signature with the given chain ID applied to the `v` value. + pub(crate) fn as_signature_with_eip155_parity( + &self, + chain_id: Option, + ) -> SignatureWithParity { + SignatureWithParity::new(self.r, self.s, self.legacy_parity(chain_id)) + } + + /// Returns a signature with a boolean parity flag. This is useful when we want to encode + /// the `v` value as 0 or 1. + pub(crate) const fn as_signature_with_boolean_parity(&self) -> SignatureWithParity { + SignatureWithParity::new(self.r, self.s, Parity::Parity(self.odd_y_parity)) + } + + /// Returns the signature for the optimism deposit transactions, which don't include a + /// signature. + #[cfg(feature = "optimism")] + pub const fn optimism_deposit_tx_signature() -> Self { + Self { r: U256::ZERO, s: U256::ZERO, odd_y_parity: false } + } } impl From for Signature { @@ -228,52 +226,69 @@ pub const fn extract_chain_id(v: u64) -> alloy_rlp::Result<(bool, Option)> } } -#[cfg(test)] -mod tests { - use crate::{transaction::signature::SECP256K1N_HALF, Address, Signature, B256, U256}; - use alloy_primitives::{hex, hex::FromHex, Bytes}; - use std::str::FromStr; +/// A signature with full parity included. +// TODO: replace by alloy Signature when there will be an easy way to instantiate them. +pub(crate) struct SignatureWithParity { + /// The R field of the signature; the point on the curve. + r: U256, + /// The S field of the signature; the point on the curve. + s: U256, + /// Signature parity + parity: Parity, +} - #[test] - fn test_payload_len_with_eip155_chain_id() { - // Select 1 as an arbitrary nonzero value for R and S, as v() always returns 0 for (0, 0). - let signature = Signature { r: U256::from(1), s: U256::from(1), odd_y_parity: false }; +impl SignatureWithParity { + /// Creates a new [`SignatureWithParity`]. + pub(crate) const fn new(r: U256, s: U256, parity: Parity) -> Self { + Self { r, s, parity } + } +} - assert_eq!(3, signature.payload_len_with_eip155_chain_id(None)); - assert_eq!(3, signature.payload_len_with_eip155_chain_id(Some(1))); - assert_eq!(4, signature.payload_len_with_eip155_chain_id(Some(47))); +impl EncodableSignature for SignatureWithParity { + fn from_rs_and_parity< + P: TryInto, + E: Into, + >( + r: U256, + s: U256, + parity: P, + ) -> Result { + Ok(Self { r, s, parity: parity.try_into().map_err(Into::into)? }) } - #[test] - fn test_v() { - // Select 1 as an arbitrary nonzero value for R and S, as v() always returns 0 for (0, 0). - let signature = Signature { r: U256::from(1), s: U256::from(1), odd_y_parity: false }; - assert_eq!(27, signature.v(None)); - assert_eq!(37, signature.v(Some(1))); + fn r(&self) -> U256 { + self.r + } - let signature = Signature { r: U256::from(1), s: U256::from(1), odd_y_parity: true }; - assert_eq!(28, signature.v(None)); - assert_eq!(38, signature.v(Some(1))); + fn s(&self) -> U256 { + self.s + } + + fn v(&self) -> Parity { + self.parity + } + + fn with_parity>(self, parity: T) -> Self { + Self { r: self.r, s: self.s, parity: parity.into() } } +} + +#[cfg(test)] +mod tests { + use crate::{hex, transaction::signature::SECP256K1N_HALF, Address, Signature, B256, U256}; + use alloy_primitives::{hex::FromHex, Bytes, Parity}; + use std::str::FromStr; #[test] - fn test_encode_and_decode_with_eip155_chain_id() { + fn test_legacy_parity() { // Select 1 as an arbitrary nonzero value for R and S, as v() always returns 0 for (0, 0). let signature = Signature { r: U256::from(1), s: U256::from(1), odd_y_parity: false }; + assert_eq!(Parity::NonEip155(false), signature.legacy_parity(None)); + assert_eq!(Parity::Eip155(37), signature.legacy_parity(Some(1))); - let mut encoded = Vec::new(); - signature.encode_with_eip155_chain_id(&mut encoded, None); - assert_eq!(encoded.len(), signature.payload_len_with_eip155_chain_id(None)); - let (decoded, chain_id) = Signature::decode_with_eip155_chain_id(&mut &*encoded).unwrap(); - assert_eq!(signature, decoded); - assert_eq!(None, chain_id); - - let mut encoded = Vec::new(); - signature.encode_with_eip155_chain_id(&mut encoded, Some(1)); - assert_eq!(encoded.len(), signature.payload_len_with_eip155_chain_id(Some(1))); - let (decoded, chain_id) = Signature::decode_with_eip155_chain_id(&mut &*encoded).unwrap(); - assert_eq!(signature, decoded); - assert_eq!(Some(1), chain_id); + let signature = Signature { r: U256::from(1), s: U256::from(1), odd_y_parity: true }; + assert_eq!(Parity::NonEip155(true), signature.legacy_parity(None)); + assert_eq!(Parity::Eip155(38), signature.legacy_parity(Some(1))); } #[test] diff --git a/crates/primitives/src/transaction/tx_type.rs b/crates/primitives/src/transaction/tx_type.rs index eef61da840..c4ddbb41ca 100644 --- a/crates/primitives/src/transaction/tx_type.rs +++ b/crates/primitives/src/transaction/tx_type.rs @@ -5,11 +5,23 @@ use serde::{Deserialize, Serialize}; #[cfg(test)] use reth_codecs::Compact; +/// Identifier parameter for legacy transaction +#[cfg(any(test, feature = "reth-codec"))] +pub(crate) const COMPACT_IDENTIFIER_LEGACY: usize = 0; + +/// Identifier parameter for EIP-2930 transaction +#[cfg(any(test, feature = "reth-codec"))] +pub(crate) const COMPACT_IDENTIFIER_EIP2930: usize = 1; + +/// Identifier parameter for EIP-1559 transaction +#[cfg(any(test, feature = "reth-codec"))] +pub(crate) const COMPACT_IDENTIFIER_EIP1559: usize = 2; + /// For backwards compatibility purposes only 2 bits of the type are encoded in the identifier -/// parameter. In the case of a 3, the full transaction type is read from the buffer as a -/// single byte. +/// parameter. In the case of a [`COMPACT_EXTENDED_IDENTIFIER_FLAG`], the full transaction type is +/// read from the buffer as a single byte. #[cfg(any(test, feature = "reth-codec"))] -const COMPACT_EXTENDED_IDENTIFIER_FLAG: usize = 3; +pub(crate) const COMPACT_EXTENDED_IDENTIFIER_FLAG: usize = 3; /// Identifier for legacy transaction, however [`TxLegacy`](crate::TxLegacy) this is technically not /// typed. @@ -144,35 +156,35 @@ impl reth_codecs::Compact for TxType { B: bytes::BufMut + AsMut<[u8]>, { match self { - Self::Legacy => 0, - Self::Eip2930 => 1, - Self::Eip1559 => 2, + Self::Legacy => COMPACT_IDENTIFIER_LEGACY, + Self::Eip2930 => COMPACT_IDENTIFIER_EIP2930, + Self::Eip1559 => COMPACT_IDENTIFIER_EIP1559, Self::Eip4844 => { - buf.put_u8(*self as u8); + buf.put_u8(EIP4844_TX_TYPE_ID); COMPACT_EXTENDED_IDENTIFIER_FLAG } Self::Eip7702 => { - buf.put_u8(*self as u8); + buf.put_u8(EIP7702_TX_TYPE_ID); COMPACT_EXTENDED_IDENTIFIER_FLAG } #[cfg(feature = "optimism")] Self::Deposit => { - buf.put_u8(*self as u8); + buf.put_u8(DEPOSIT_TX_TYPE_ID); COMPACT_EXTENDED_IDENTIFIER_FLAG } } } // For backwards compatibility purposes only 2 bits of the type are encoded in the identifier - // parameter. In the case of a 3, the full transaction type is read from the buffer as a - // single byte. + // parameter. In the case of a [`COMPACT_EXTENDED_IDENTIFIER_FLAG`], the full transaction type + // is read from the buffer as a single byte. fn from_compact(mut buf: &[u8], identifier: usize) -> (Self, &[u8]) { use bytes::Buf; ( match identifier { - 0 => Self::Legacy, - 1 => Self::Eip2930, - 2 => Self::Eip1559, + COMPACT_IDENTIFIER_LEGACY => Self::Legacy, + COMPACT_IDENTIFIER_EIP2930 => Self::Eip2930, + COMPACT_IDENTIFIER_EIP1559 => Self::Eip1559, COMPACT_EXTENDED_IDENTIFIER_FLAG => { let extended_identifier = buf.get_u8(); match extended_identifier { @@ -243,34 +255,34 @@ mod tests { #[test] fn test_u64_to_tx_type() { // Test for Legacy transaction - assert_eq!(TxType::try_from(U64::from(0)).unwrap(), TxType::Legacy); + assert_eq!(TxType::try_from(U64::from(LEGACY_TX_TYPE_ID)).unwrap(), TxType::Legacy); // Test for EIP2930 transaction - assert_eq!(TxType::try_from(U64::from(1)).unwrap(), TxType::Eip2930); + assert_eq!(TxType::try_from(U64::from(EIP2930_TX_TYPE_ID)).unwrap(), TxType::Eip2930); // Test for EIP1559 transaction - assert_eq!(TxType::try_from(U64::from(2)).unwrap(), TxType::Eip1559); + assert_eq!(TxType::try_from(U64::from(EIP1559_TX_TYPE_ID)).unwrap(), TxType::Eip1559); // Test for EIP4844 transaction - assert_eq!(TxType::try_from(U64::from(3)).unwrap(), TxType::Eip4844); + assert_eq!(TxType::try_from(U64::from(EIP4844_TX_TYPE_ID)).unwrap(), TxType::Eip4844); // Test for EIP7702 transaction - assert_eq!(TxType::try_from(U64::from(4)).unwrap(), TxType::Eip7702); + assert_eq!(TxType::try_from(U64::from(EIP7702_TX_TYPE_ID)).unwrap(), TxType::Eip7702); // Test for Deposit transaction #[cfg(feature = "optimism")] - assert_eq!(TxType::try_from(U64::from(126)).unwrap(), TxType::Deposit); + assert_eq!(TxType::try_from(U64::from(DEPOSIT_TX_TYPE_ID)).unwrap(), TxType::Deposit); // For transactions with unsupported values - assert!(TxType::try_from(U64::from(5)).is_err()); + assert!(TxType::try_from(U64::from(EIP7702_TX_TYPE_ID + 1)).is_err()); } #[test] fn test_txtype_to_compat() { let cases = vec![ - (TxType::Legacy, 0, vec![]), - (TxType::Eip2930, 1, vec![]), - (TxType::Eip1559, 2, vec![]), + (TxType::Legacy, COMPACT_IDENTIFIER_LEGACY, vec![]), + (TxType::Eip2930, COMPACT_IDENTIFIER_EIP2930, vec![]), + (TxType::Eip1559, COMPACT_IDENTIFIER_EIP1559, vec![]), (TxType::Eip4844, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP4844_TX_TYPE_ID]), (TxType::Eip7702, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP7702_TX_TYPE_ID]), #[cfg(feature = "optimism")] @@ -291,9 +303,9 @@ mod tests { #[test] fn test_txtype_from_compact() { let cases = vec![ - (TxType::Legacy, 0, vec![]), - (TxType::Eip2930, 1, vec![]), - (TxType::Eip1559, 2, vec![]), + (TxType::Legacy, COMPACT_IDENTIFIER_LEGACY, vec![]), + (TxType::Eip2930, COMPACT_IDENTIFIER_EIP2930, vec![]), + (TxType::Eip1559, COMPACT_IDENTIFIER_EIP1559, vec![]), (TxType::Eip4844, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP4844_TX_TYPE_ID]), (TxType::Eip7702, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP7702_TX_TYPE_ID]), #[cfg(feature = "optimism")] @@ -317,29 +329,29 @@ mod tests { assert_eq!(tx_type, TxType::Legacy); // Test for EIP2930 transaction - let tx_type = TxType::decode(&mut &[1u8][..]).unwrap(); + let tx_type = TxType::decode(&mut &[EIP2930_TX_TYPE_ID][..]).unwrap(); assert_eq!(tx_type, TxType::Eip2930); // Test for EIP1559 transaction - let tx_type = TxType::decode(&mut &[2u8][..]).unwrap(); + let tx_type = TxType::decode(&mut &[EIP1559_TX_TYPE_ID][..]).unwrap(); assert_eq!(tx_type, TxType::Eip1559); // Test for EIP4844 transaction - let tx_type = TxType::decode(&mut &[3u8][..]).unwrap(); + let tx_type = TxType::decode(&mut &[EIP4844_TX_TYPE_ID][..]).unwrap(); assert_eq!(tx_type, TxType::Eip4844); // Test for EIP7702 transaction - let tx_type = TxType::decode(&mut &[4u8][..]).unwrap(); + let tx_type = TxType::decode(&mut &[EIP7702_TX_TYPE_ID][..]).unwrap(); assert_eq!(tx_type, TxType::Eip7702); // Test random byte not in range - let buf = [rand::thread_rng().gen_range(5..=u8::MAX)]; + let buf = [rand::thread_rng().gen_range(EIP7702_TX_TYPE_ID + 1..=u8::MAX)]; assert!(TxType::decode(&mut &buf[..]).is_err()); // Test for Deposit transaction #[cfg(feature = "optimism")] { - let buf = [126u8]; + let buf = [DEPOSIT_TX_TYPE_ID]; let tx_type = TxType::decode(&mut &buf[..]).unwrap(); assert_eq!(tx_type, TxType::Deposit); } diff --git a/crates/primitives/src/transaction/variant.rs b/crates/primitives/src/transaction/variant.rs index 5bff5215d7..3e96b6dda8 100644 --- a/crates/primitives/src/transaction/variant.rs +++ b/crates/primitives/src/transaction/variant.rs @@ -11,7 +11,7 @@ use core::ops::Deref; /// /// All variants are based on a the raw [Transaction] data and can contain additional information /// extracted (expensive) from that transaction, like the hash and the signer. -#[derive(Debug, Clone, PartialEq, Eq, Hash)] +#[derive(Debug, Clone, PartialEq, Eq, Hash, derive_more::From)] pub enum TransactionSignedVariant { /// A signed transaction without a hash. SignedNoHash(TransactionSignedNoHash), @@ -130,24 +130,6 @@ impl TransactionSignedVariant { } } -impl From for TransactionSignedVariant { - fn from(tx: TransactionSignedNoHash) -> Self { - Self::SignedNoHash(tx) - } -} - -impl From for TransactionSignedVariant { - fn from(tx: TransactionSigned) -> Self { - Self::Signed(tx) - } -} - -impl From for TransactionSignedVariant { - fn from(tx: TransactionSignedEcRecovered) -> Self { - Self::SignedEcRecovered(tx) - } -} - impl AsRef for TransactionSignedVariant { fn as_ref(&self) -> &Transaction { self.as_raw() diff --git a/crates/prune/prune/src/builder.rs b/crates/prune/prune/src/builder.rs index bd8c95c225..e8993170a9 100644 --- a/crates/prune/prune/src/builder.rs +++ b/crates/prune/prune/src/builder.rs @@ -1,9 +1,12 @@ use crate::{segments::SegmentSet, Pruner}; use reth_chainspec::MAINNET; use reth_config::PruneConfig; -use reth_db_api::database::Database; +use reth_db::transaction::DbTxMut; use reth_exex_types::FinishedExExHeight; -use reth_provider::{providers::StaticFileProvider, ProviderFactory, StaticFileProviderFactory}; +use reth_provider::{ + providers::StaticFileProvider, BlockReader, DBProvider, DatabaseProviderFactory, + PruneCheckpointWriter, StaticFileProviderFactory, TransactionsProvider, +}; use reth_prune_types::PruneModes; use std::time::Duration; use tokio::sync::watch; @@ -80,16 +83,15 @@ impl PrunerBuilder { } /// Builds a [Pruner] from the current configuration with the given provider factory. - pub fn build_with_provider_factory( - self, - provider_factory: ProviderFactory, - ) -> Pruner> { - let segments = SegmentSet::::from_components( - provider_factory.static_file_provider(), - self.segments, - ); - - Pruner::<_, ProviderFactory>::new( + pub fn build_with_provider_factory(self, provider_factory: PF) -> Pruner + where + PF: DatabaseProviderFactory + + StaticFileProviderFactory, + { + let segments = + SegmentSet::from_components(provider_factory.static_file_provider(), self.segments); + let static_file_path = Some(provider_factory.static_file_provider().path().to_path_buf()); + Pruner::new_with_factory( provider_factory, segments.into_vec(), self.block_interval, @@ -97,20 +99,27 @@ impl PrunerBuilder { self.timeout, self.finished_exex_height, self.recent_sidecars_kept_blocks, + static_file_path, ) } /// Builds a [Pruner] from the current configuration with the given static file provider. - pub fn build(self, static_file_provider: StaticFileProvider) -> Pruner { - let segments = SegmentSet::::from_components(static_file_provider, self.segments); + pub fn build(self, static_file_provider: StaticFileProvider) -> Pruner + where + Provider: + DBProvider + BlockReader + PruneCheckpointWriter + TransactionsProvider, + { + let segments = + SegmentSet::::from_components(static_file_provider.clone(), self.segments); - Pruner::<_, ()>::new( + Pruner::new( segments.into_vec(), self.block_interval, self.delete_limit, self.timeout, self.finished_exex_height, self.recent_sidecars_kept_blocks, + Some(static_file_provider.path().to_path_buf()), ) } } diff --git a/crates/prune/prune/src/db_ext.rs b/crates/prune/prune/src/db_ext.rs new file mode 100644 index 0000000000..a14127af20 --- /dev/null +++ b/crates/prune/prune/src/db_ext.rs @@ -0,0 +1,127 @@ +use std::{fmt::Debug, ops::RangeBounds}; + +use reth_db::{ + cursor::{DbCursorRO, DbCursorRW, RangeWalker}, + table::{Table, TableRow}, + transaction::DbTxMut, + DatabaseError, +}; +use reth_prune_types::PruneLimiter; +use tracing::debug; + +pub(crate) trait DbTxPruneExt: DbTxMut { + /// Prune the table for the specified pre-sorted key iterator. + /// + /// Returns number of rows pruned. + fn prune_table_with_iterator( + &self, + keys: impl IntoIterator, + limiter: &mut PruneLimiter, + mut delete_callback: impl FnMut(TableRow), + ) -> Result<(usize, bool), DatabaseError> { + let mut cursor = self.cursor_write::()?; + let mut keys = keys.into_iter(); + + let mut deleted_entries = 0; + + for key in &mut keys { + if limiter.is_limit_reached() { + debug!( + target: "providers::db", + ?limiter, + deleted_entries_limit = %limiter.is_deleted_entries_limit_reached(), + time_limit = %limiter.is_time_limit_reached(), + table = %T::NAME, + "Pruning limit reached" + ); + break + } + + let row = cursor.seek_exact(key)?; + if let Some(row) = row { + cursor.delete_current()?; + limiter.increment_deleted_entries_count(); + deleted_entries += 1; + delete_callback(row); + } + } + + let done = keys.next().is_none(); + Ok((deleted_entries, done)) + } + + /// Prune the table for the specified key range. + /// + /// Returns number of rows pruned. + fn prune_table_with_range( + &self, + keys: impl RangeBounds + Clone + Debug, + limiter: &mut PruneLimiter, + mut skip_filter: impl FnMut(&TableRow) -> bool, + mut delete_callback: impl FnMut(TableRow), + ) -> Result<(usize, bool), DatabaseError> { + let mut cursor = self.cursor_write::()?; + let mut walker = cursor.walk_range(keys)?; + + let mut deleted_entries = 0; + + let done = loop { + // check for time out must be done in this scope since it's not done in + // `prune_table_with_range_step` + if limiter.is_limit_reached() { + debug!( + target: "providers::db", + ?limiter, + deleted_entries_limit = %limiter.is_deleted_entries_limit_reached(), + time_limit = %limiter.is_time_limit_reached(), + table = %T::NAME, + "Pruning limit reached" + ); + break false + } + + let done = self.prune_table_with_range_step( + &mut walker, + limiter, + &mut skip_filter, + &mut delete_callback, + )?; + + if done { + break true + } + deleted_entries += 1; + }; + + Ok((deleted_entries, done)) + } + + /// Steps once with the given walker and prunes the entry in the table. + /// + /// Returns `true` if the walker is finished, `false` if it may have more data to prune. + /// + /// CAUTION: Pruner limits are not checked. This allows for a clean exit of a prune run that's + /// pruning different tables concurrently, by letting them step to the same height before + /// timing out. + fn prune_table_with_range_step( + &self, + walker: &mut RangeWalker<'_, T, Self::CursorMut>, + limiter: &mut PruneLimiter, + skip_filter: &mut impl FnMut(&TableRow) -> bool, + delete_callback: &mut impl FnMut(TableRow), + ) -> Result { + let Some(res) = walker.next() else { return Ok(true) }; + + let row = res?; + + if !skip_filter(&row) { + walker.delete_current()?; + limiter.increment_deleted_entries_count(); + delete_callback(row); + } + + Ok(false) + } +} + +impl DbTxPruneExt for Tx where Tx: DbTxMut {} diff --git a/crates/prune/prune/src/lib.rs b/crates/prune/prune/src/lib.rs index 38453385ee..5a43afeb50 100644 --- a/crates/prune/prune/src/lib.rs +++ b/crates/prune/prune/src/lib.rs @@ -10,6 +10,7 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] mod builder; +mod db_ext; mod error; mod event; mod metrics; @@ -20,7 +21,7 @@ use crate::metrics::Metrics; pub use builder::PrunerBuilder; pub use error::PrunerError; pub use event::PrunerEvent; -pub use pruner::{Pruner, PrunerResult, PrunerWithResult}; +pub use pruner::{Pruner, PrunerResult, PrunerWithFactory, PrunerWithResult}; // Re-export prune types #[doc(inline)] diff --git a/crates/prune/prune/src/pruner.rs b/crates/prune/prune/src/pruner.rs index b27a22cea4..582cc0d629 100644 --- a/crates/prune/prune/src/pruner.rs +++ b/crates/prune/prune/src/pruner.rs @@ -5,15 +5,16 @@ use crate::{ Metrics, PrunerError, PrunerEvent, }; use alloy_primitives::BlockNumber; -use reth_db_api::database::Database; use reth_exex_types::FinishedExExHeight; -use reth_provider::{DatabaseProviderRW, ProviderFactory, PruneCheckpointReader}; +use reth_provider::{ + DBProvider, DatabaseProviderFactory, PruneCheckpointReader, PruneCheckpointWriter, +}; use reth_prune_types::{PruneLimiter, PruneProgress, PruneSegment, PrunerOutput}; use reth_static_file_types::{find_fixed_range, StaticFileSegment}; use reth_tokio_util::{EventSender, EventStream}; use std::{ fs, - path::Path, + path::{Path, PathBuf}, time::{Duration, Instant}, }; use tokio::sync::watch; @@ -27,12 +28,15 @@ pub type PrunerWithResult = (Pruner, PrunerResult); type PrunerStats = Vec<(PruneSegment, usize, PruneProgress)>; +/// Pruner with preset provider factory. +pub type PrunerWithFactory = Pruner<::ProviderRW, PF>; + /// Pruning routine. Main pruning logic happens in [`Pruner::run`]. #[derive(Debug)] -pub struct Pruner { +pub struct Pruner { /// Provider factory. If pruner is initialized without it, it will be set to `()`. provider_factory: PF, - segments: Vec>>, + segments: Vec>>, /// Minimum pruning interval measured in blocks. All prune segments are checked and, if needed, /// pruned, when the chain advances by the specified number of blocks. min_block_interval: usize, @@ -48,20 +52,23 @@ pub struct Pruner { finished_exex_height: watch::Receiver, /// The number of recent sidecars to keep in the static file provider. recent_sidecars_kept_blocks: usize, + /// The path to the static file. + static_file_path: Option, #[doc(hidden)] metrics: Metrics, event_sender: EventSender, } -impl Pruner { +impl Pruner { /// Creates a new [Pruner] without a provider factory. pub fn new( - segments: Vec>>, + segments: Vec>>, min_block_interval: usize, delete_limit: usize, timeout: Option, finished_exex_height: watch::Receiver, recent_sidecars_kept_blocks: usize, + static_file_path: Option, ) -> Self { Self { provider_factory: (), @@ -72,23 +79,28 @@ impl Pruner { timeout, finished_exex_height, recent_sidecars_kept_blocks, + static_file_path, metrics: Metrics::default(), event_sender: Default::default(), } } } -impl Pruner> { +impl Pruner +where + PF: DatabaseProviderFactory, +{ /// Crates a new pruner with the given provider factory. #[allow(clippy::too_many_arguments)] - pub fn new( - provider_factory: ProviderFactory, - segments: Vec>>, + pub fn new_with_factory( + provider_factory: PF, + segments: Vec>>, min_block_interval: usize, delete_limit: usize, timeout: Option, finished_exex_height: watch::Receiver, recent_sidecars_kept_blocks: usize, + static_file_path: Option, ) -> Self { Self { provider_factory, @@ -99,21 +111,30 @@ impl Pruner> { timeout, finished_exex_height, recent_sidecars_kept_blocks, + static_file_path, metrics: Metrics::default(), event_sender: Default::default(), } } } -impl Pruner { +impl Pruner +where + Provider: PruneCheckpointReader + PruneCheckpointWriter, +{ /// Listen for events on the pruner. pub fn events(&self) -> EventStream { self.event_sender.new_listener() } - fn run_with_provider( + /// Run the pruner with the given provider. This will only prune data up to the highest finished + /// `ExEx` height, if there are no `ExExes`. + /// + /// Returns a [`PruneProgress`], indicating whether pruning is finished, or there is more data + /// to prune. + pub fn run_with_provider( &mut self, - provider: &DatabaseProviderRW, + provider: &Provider, tip_block_number: BlockNumber, ) -> PrunerResult { let Some(tip_block_number) = @@ -176,7 +197,7 @@ impl Pruner { /// Returns [`PrunerStats`], total number of entries pruned, and [`PruneProgress`]. fn prune_segments( &mut self, - provider: &DatabaseProviderRW, + provider: &Provider, tip_block_number: BlockNumber, limiter: &mut PruneLimiter, ) -> Result<(PrunerStats, usize, PrunerOutput), PrunerError> { @@ -310,16 +331,12 @@ impl Pruner { } /// Prunes ancient sidecars data from the static file provider. - pub fn prune_ancient_sidecars( - &self, - provider: &DatabaseProviderRW, - tip_block_number: BlockNumber, - ) { + pub fn prune_ancient_sidecars(&self, _provider: &Provider, tip_block_number: BlockNumber) { if self.recent_sidecars_kept_blocks == 0 { return } - let static_file_provider = provider.static_file_provider(); + let Some(ref static_file_path) = self.static_file_path else { return }; let prune_target_block = tip_block_number.saturating_sub(self.recent_sidecars_kept_blocks as u64); @@ -337,8 +354,7 @@ impl Pruner { while range_start > 0 { let range = find_fixed_range(range_start - 1); - let path = - static_file_provider.path().join(StaticFileSegment::Sidecars.filename(&range)); + let path = static_file_path.join(StaticFileSegment::Sidecars.filename(&range)); if path.exists() { delete_static_files(&path); @@ -384,31 +400,17 @@ fn delete_static_files(path: &Path) { } } -impl Pruner { - /// Run the pruner with the given provider. This will only prune data up to the highest finished - /// ExEx height, if there are no ExExes. - /// - /// Returns a [`PruneProgress`], indicating whether pruning is finished, or there is more data - /// to prune. - #[allow(clippy::doc_markdown)] - pub fn run( - &mut self, - provider: &DatabaseProviderRW, - tip_block_number: BlockNumber, - ) -> PrunerResult { - self.run_with_provider(provider, tip_block_number) - } -} - -impl Pruner> { +impl Pruner +where + PF: DatabaseProviderFactory, +{ /// Run the pruner. This will only prune data up to the highest finished ExEx height, if there /// are no ExExes. /// /// Returns a [`PruneProgress`], indicating whether pruning is finished, or there is more data /// to prune. - #[allow(clippy::doc_markdown)] pub fn run(&mut self, tip_block_number: BlockNumber) -> PrunerResult { - let provider = self.provider_factory.provider_rw()?; + let provider = self.provider_factory.database_provider_rw()?; let result = self.run_with_provider(&provider, tip_block_number); provider.commit()?; result @@ -419,7 +421,7 @@ impl Pruner> { mod tests { use crate::Pruner; use reth_exex_types::FinishedExExHeight; - use reth_provider::{test_utils::create_test_provider_factory, ProviderFactory}; + use reth_provider::test_utils::create_test_provider_factory; #[test] fn is_pruning_needed() { @@ -428,7 +430,7 @@ mod tests { let (finished_exex_height_tx, finished_exex_height_rx) = tokio::sync::watch::channel(FinishedExExHeight::NoExExs); - let mut pruner = Pruner::<_, ProviderFactory<_>>::new( + let mut pruner = Pruner::new_with_factory( provider_factory, vec![], 5, @@ -436,6 +438,7 @@ mod tests { None, finished_exex_height_rx, 0, + None, ); // No last pruned block number was set before diff --git a/crates/prune/prune/src/segments/mod.rs b/crates/prune/prune/src/segments/mod.rs index e04c9ac518..80c2f5393b 100644 --- a/crates/prune/prune/src/segments/mod.rs +++ b/crates/prune/prune/src/segments/mod.rs @@ -5,10 +5,7 @@ mod user; use crate::PrunerError; use alloy_primitives::{BlockNumber, TxNumber}; -use reth_db_api::database::Database; -use reth_provider::{ - errors::provider::ProviderResult, BlockReader, DatabaseProviderRW, PruneCheckpointWriter, -}; +use reth_provider::{errors::provider::ProviderResult, BlockReader, PruneCheckpointWriter}; use reth_prune_types::{ PruneCheckpoint, PruneLimiter, PruneMode, PrunePurpose, PruneSegment, SegmentOutput, }; @@ -31,7 +28,7 @@ pub use user::{ /// 2. If [`Segment::prune`] returned a [Some] in `checkpoint` of [`SegmentOutput`], call /// [`Segment::save_checkpoint`]. /// 3. Subtract `pruned` of [`SegmentOutput`] from `delete_limit` of next [`PruneInput`]. -pub trait Segment: Debug + Send + Sync { +pub trait Segment: Debug + Send + Sync { /// Segment of data that's pruned. fn segment(&self) -> PruneSegment; @@ -42,18 +39,17 @@ pub trait Segment: Debug + Send + Sync { fn purpose(&self) -> PrunePurpose; /// Prune data for [`Self::segment`] using the provided input. - fn prune( - &self, - provider: &DatabaseProviderRW, - input: PruneInput, - ) -> Result; + fn prune(&self, provider: &Provider, input: PruneInput) -> Result; /// Save checkpoint for [`Self::segment`] to the database. fn save_checkpoint( &self, - provider: &DatabaseProviderRW, + provider: &Provider, checkpoint: PruneCheckpoint, - ) -> ProviderResult<()> { + ) -> ProviderResult<()> + where + Provider: PruneCheckpointWriter, + { provider.save_prune_checkpoint(self.segment(), checkpoint) } } @@ -78,9 +74,9 @@ impl PruneInput { /// 2. If checkpoint doesn't exist, return 0. /// /// To get the range end: get last tx number for `to_block`. - pub(crate) fn get_next_tx_num_range( + pub(crate) fn get_next_tx_num_range( &self, - provider: &DatabaseProviderRW, + provider: &Provider, ) -> ProviderResult>> { let from_tx_number = self.previous_checkpoint // Checkpoint exists, prune from the next transaction after the highest pruned one diff --git a/crates/prune/prune/src/segments/receipts.rs b/crates/prune/prune/src/segments/receipts.rs index 67e889a18b..944531fe76 100644 --- a/crates/prune/prune/src/segments/receipts.rs +++ b/crates/prune/prune/src/segments/receipts.rs @@ -5,11 +5,10 @@ //! - [`crate::segments::static_file::Receipts`] is responsible for pruning receipts on an archive //! node after static file producer has finished -use crate::{segments::PruneInput, PrunerError}; -use reth_db::tables; -use reth_db_api::database::Database; +use crate::{db_ext::DbTxPruneExt, segments::PruneInput, PrunerError}; +use reth_db::{tables, transaction::DbTxMut}; use reth_provider::{ - errors::provider::ProviderResult, DatabaseProviderRW, PruneCheckpointWriter, + errors::provider::ProviderResult, BlockReader, DBProvider, PruneCheckpointWriter, TransactionsProvider, }; use reth_prune_types::{ @@ -17,10 +16,13 @@ use reth_prune_types::{ }; use tracing::trace; -pub(crate) fn prune( - provider: &DatabaseProviderRW, +pub(crate) fn prune( + provider: &Provider, input: PruneInput, -) -> Result { +) -> Result +where + Provider: DBProvider + TransactionsProvider + BlockReader, +{ let tx_range = match input.get_next_tx_num_range(provider)? { Some(range) => range, None => { @@ -33,7 +35,7 @@ pub(crate) fn prune( let mut limiter = input.limiter; let mut last_pruned_transaction = tx_range_end; - let (pruned, done) = provider.prune_table_with_range::( + let (pruned, done) = provider.tx_ref().prune_table_with_range::( tx_range, &mut limiter, |_| false, @@ -60,8 +62,8 @@ pub(crate) fn prune( }) } -pub(crate) fn save_checkpoint( - provider: &DatabaseProviderRW, +pub(crate) fn save_checkpoint( + provider: impl PruneCheckpointWriter, checkpoint: PruneCheckpoint, ) -> ProviderResult<()> { provider.save_prune_checkpoint(PruneSegment::Receipts, checkpoint)?; @@ -83,7 +85,7 @@ mod tests { Itertools, }; use reth_db::tables; - use reth_provider::PruneCheckpointReader; + use reth_provider::{DatabaseProviderFactory, PruneCheckpointReader}; use reth_prune_types::{ PruneCheckpoint, PruneInterruptReason, PruneLimiter, PruneMode, PruneProgress, PruneSegment, }; @@ -158,7 +160,7 @@ mod tests { ) .sub(1); - let provider = db.factory.provider_rw().unwrap(); + let provider = db.factory.database_provider_rw().unwrap(); let result = super::prune(&provider, input).unwrap(); limiter.increment_deleted_entries_count_by(result.pruned); diff --git a/crates/prune/prune/src/segments/set.rs b/crates/prune/prune/src/segments/set.rs index 29cfaeac68..af7f479bbc 100644 --- a/crates/prune/prune/src/segments/set.rs +++ b/crates/prune/prune/src/segments/set.rs @@ -2,32 +2,35 @@ use crate::segments::{ AccountHistory, ReceiptsByLogs, Segment, SenderRecovery, StaticFileSidecars, StorageHistory, TransactionLookup, UserReceipts, }; -use reth_db_api::database::Database; -use reth_provider::providers::StaticFileProvider; +use reth_db::transaction::DbTxMut; +use reth_provider::{ + providers::StaticFileProvider, BlockReader, DBProvider, PruneCheckpointWriter, + TransactionsProvider, +}; use reth_prune_types::PruneModes; use super::{StaticFileHeaders, StaticFileReceipts, StaticFileTransactions}; /// Collection of [Segment]. Thread-safe, allocated on the heap. #[derive(Debug)] -pub struct SegmentSet { - inner: Vec>>, +pub struct SegmentSet { + inner: Vec>>, } -impl SegmentSet { +impl SegmentSet { /// Returns empty [`SegmentSet`] collection. pub fn new() -> Self { Self::default() } /// Adds new [Segment] to collection. - pub fn segment + 'static>(mut self, segment: S) -> Self { + pub fn segment + 'static>(mut self, segment: S) -> Self { self.inner.push(Box::new(segment)); self } /// Adds new [Segment] to collection if it's [Some]. - pub fn segment_opt + 'static>(self, segment: Option) -> Self { + pub fn segment_opt + 'static>(self, segment: Option) -> Self { if let Some(segment) = segment { return self.segment(segment) } @@ -35,10 +38,15 @@ impl SegmentSet { } /// Consumes [`SegmentSet`] and returns a [Vec]. - pub fn into_vec(self) -> Vec>> { + pub fn into_vec(self) -> Vec>> { self.inner } +} +impl SegmentSet +where + Provider: DBProvider + TransactionsProvider + PruneCheckpointWriter + BlockReader, +{ /// Creates a [`SegmentSet`] from an existing components, such as [`StaticFileProvider`] and /// [`PruneModes`]. pub fn from_components( @@ -81,7 +89,7 @@ impl SegmentSet { } } -impl Default for SegmentSet { +impl Default for SegmentSet { fn default() -> Self { Self { inner: Vec::new() } } diff --git a/crates/prune/prune/src/segments/static_file/headers.rs b/crates/prune/prune/src/segments/static_file/headers.rs index 450dae3a80..a3daf504e6 100644 --- a/crates/prune/prune/src/segments/static_file/headers.rs +++ b/crates/prune/prune/src/segments/static_file/headers.rs @@ -1,6 +1,7 @@ use std::num::NonZeroUsize; use crate::{ + db_ext::DbTxPruneExt, segments::{PruneInput, Segment}, PrunerError, }; @@ -8,11 +9,10 @@ use alloy_primitives::BlockNumber; use itertools::Itertools; use reth_db::{ cursor::{DbCursorRO, RangeWalker}, - database::Database, tables, transaction::DbTxMut, }; -use reth_provider::{providers::StaticFileProvider, DatabaseProviderRW}; +use reth_provider::{providers::StaticFileProvider, DBProvider}; use reth_prune_types::{ PruneLimiter, PruneMode, PruneProgress, PrunePurpose, PruneSegment, SegmentOutput, SegmentOutputCheckpoint, @@ -34,7 +34,7 @@ impl Headers { } } -impl Segment for Headers { +impl> Segment for Headers { fn segment(&self) -> PruneSegment { PruneSegment::Headers } @@ -49,11 +49,7 @@ impl Segment for Headers { PrunePurpose::StaticFile } - fn prune( - &self, - provider: &DatabaseProviderRW, - input: PruneInput, - ) -> Result { + fn prune(&self, provider: &Provider, input: PruneInput) -> Result { let (block_range_start, block_range_end) = match input.get_next_block_range() { Some(range) => (*range.start(), *range.end()), None => { @@ -106,18 +102,19 @@ impl Segment for Headers { }) } } -type Walker<'a, DB, T> = RangeWalker<'a, T, <::TXMut as DbTxMut>::CursorMut>; +type Walker<'a, Provider, T> = + RangeWalker<'a, T, <::Tx as DbTxMut>::CursorMut>; #[allow(missing_debug_implementations)] -struct HeaderTablesIter<'a, DB> +struct HeaderTablesIter<'a, Provider> where - DB: Database, + Provider: DBProvider, { - provider: &'a DatabaseProviderRW, + provider: &'a Provider, limiter: &'a mut PruneLimiter, - headers_walker: Walker<'a, DB, tables::Headers>, - header_tds_walker: Walker<'a, DB, tables::HeaderTerminalDifficulties>, - canonical_headers_walker: Walker<'a, DB, tables::CanonicalHeaders>, + headers_walker: Walker<'a, Provider, tables::Headers>, + header_tds_walker: Walker<'a, Provider, tables::HeaderTerminalDifficulties>, + canonical_headers_walker: Walker<'a, Provider, tables::CanonicalHeaders>, } struct HeaderTablesIterItem { @@ -125,24 +122,24 @@ struct HeaderTablesIterItem { entries_pruned: usize, } -impl<'a, DB> HeaderTablesIter<'a, DB> +impl<'a, Provider> HeaderTablesIter<'a, Provider> where - DB: Database, + Provider: DBProvider, { fn new( - provider: &'a DatabaseProviderRW, + provider: &'a Provider, limiter: &'a mut PruneLimiter, - headers_walker: Walker<'a, DB, tables::Headers>, - header_tds_walker: Walker<'a, DB, tables::HeaderTerminalDifficulties>, - canonical_headers_walker: Walker<'a, DB, tables::CanonicalHeaders>, + headers_walker: Walker<'a, Provider, tables::Headers>, + header_tds_walker: Walker<'a, Provider, tables::HeaderTerminalDifficulties>, + canonical_headers_walker: Walker<'a, Provider, tables::CanonicalHeaders>, ) -> Self { Self { provider, limiter, headers_walker, header_tds_walker, canonical_headers_walker } } } -impl<'a, DB> Iterator for HeaderTablesIter<'a, DB> +impl<'a, Provider> Iterator for HeaderTablesIter<'a, Provider> where - DB: Database, + Provider: DBProvider, { type Item = Result; fn next(&mut self) -> Option { @@ -154,7 +151,7 @@ where let mut pruned_block_td = None; let mut pruned_block_canonical = None; - if let Err(err) = self.provider.prune_table_with_range_step( + if let Err(err) = self.provider.tx_ref().prune_table_with_range_step( &mut self.headers_walker, self.limiter, &mut |_| false, @@ -163,7 +160,7 @@ where return Some(Err(err.into())) } - if let Err(err) = self.provider.prune_table_with_range_step( + if let Err(err) = self.provider.tx_ref().prune_table_with_range_step( &mut self.header_tds_walker, self.limiter, &mut |_| false, @@ -172,7 +169,7 @@ where return Some(Err(err.into())) } - if let Err(err) = self.provider.prune_table_with_range_step( + if let Err(err) = self.provider.tx_ref().prune_table_with_range_step( &mut self.canonical_headers_walker, self.limiter, &mut |_| false, @@ -202,7 +199,10 @@ mod tests { use assert_matches::assert_matches; use reth_db::tables; use reth_db_api::transaction::DbTx; - use reth_provider::{PruneCheckpointReader, PruneCheckpointWriter, StaticFileProviderFactory}; + use reth_provider::{ + DatabaseProviderFactory, PruneCheckpointReader, PruneCheckpointWriter, + StaticFileProviderFactory, + }; use reth_prune_types::{ PruneCheckpoint, PruneInterruptReason, PruneLimiter, PruneMode, PruneProgress, PruneSegment, SegmentOutputCheckpoint, @@ -254,7 +254,7 @@ mod tests { .map(|block_number| block_number + 1) .unwrap_or_default(); - let provider = db.factory.provider_rw().unwrap(); + let provider = db.factory.database_provider_rw().unwrap(); let result = segment.prune(&provider, input.clone()).unwrap(); limiter.increment_deleted_entries_count_by(result.pruned); trace!(target: "pruner::test", @@ -325,7 +325,7 @@ mod tests { limiter, }; - let provider = db.factory.provider_rw().unwrap(); + let provider = db.factory.database_provider_rw().unwrap(); let segment = super::Headers::new(db.factory.static_file_provider()); let result = segment.prune(&provider, input).unwrap(); assert_eq!( diff --git a/crates/prune/prune/src/segments/static_file/receipts.rs b/crates/prune/prune/src/segments/static_file/receipts.rs index e84f7df443..f766f7ea1d 100644 --- a/crates/prune/prune/src/segments/static_file/receipts.rs +++ b/crates/prune/prune/src/segments/static_file/receipts.rs @@ -2,9 +2,10 @@ use crate::{ segments::{PruneInput, Segment}, PrunerError, }; -use reth_db_api::database::Database; +use reth_db::transaction::DbTxMut; use reth_provider::{ - errors::provider::ProviderResult, providers::StaticFileProvider, DatabaseProviderRW, + errors::provider::ProviderResult, providers::StaticFileProvider, BlockReader, DBProvider, + PruneCheckpointWriter, TransactionsProvider, }; use reth_prune_types::{PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment, SegmentOutput}; use reth_static_file_types::StaticFileSegment; @@ -20,7 +21,10 @@ impl Receipts { } } -impl Segment for Receipts { +impl Segment for Receipts +where + Provider: DBProvider + PruneCheckpointWriter + TransactionsProvider + BlockReader, +{ fn segment(&self) -> PruneSegment { PruneSegment::Receipts } @@ -35,17 +39,13 @@ impl Segment for Receipts { PrunePurpose::StaticFile } - fn prune( - &self, - provider: &DatabaseProviderRW, - input: PruneInput, - ) -> Result { + fn prune(&self, provider: &Provider, input: PruneInput) -> Result { crate::segments::receipts::prune(provider, input) } fn save_checkpoint( &self, - provider: &DatabaseProviderRW, + provider: &Provider, checkpoint: PruneCheckpoint, ) -> ProviderResult<()> { crate::segments::receipts::save_checkpoint(provider, checkpoint) diff --git a/crates/prune/prune/src/segments/static_file/sidecars.rs b/crates/prune/prune/src/segments/static_file/sidecars.rs index db32dc0514..4f45a3963d 100644 --- a/crates/prune/prune/src/segments/static_file/sidecars.rs +++ b/crates/prune/prune/src/segments/static_file/sidecars.rs @@ -1,15 +1,15 @@ use crate::{ + db_ext::DbTxPruneExt, segments::{PruneInput, Segment}, PrunerError, }; -use reth_db::tables; -use reth_db_api::database::Database; -use reth_provider::{providers::StaticFileProvider, DatabaseProviderRW}; +use reth_db::{tables, transaction::DbTxMut}; +use reth_provider::{providers::StaticFileProvider, BlockReader, DBProvider, TransactionsProvider}; use reth_prune_types::{ PruneMode, PruneProgress, PrunePurpose, PruneSegment, SegmentOutput, SegmentOutputCheckpoint, }; use reth_static_file_types::StaticFileSegment; -use tracing::{instrument, trace}; +use tracing::trace; #[derive(Debug)] pub struct Sidecars { @@ -22,7 +22,10 @@ impl Sidecars { } } -impl Segment for Sidecars { +impl Segment for Sidecars +where + Provider: DBProvider + TransactionsProvider + BlockReader, +{ fn segment(&self) -> PruneSegment { PruneSegment::Sidecars } @@ -37,12 +40,7 @@ impl Segment for Sidecars { PrunePurpose::StaticFile } - #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] - fn prune( - &self, - provider: &DatabaseProviderRW, - input: PruneInput, - ) -> Result { + fn prune(&self, provider: &Provider, input: PruneInput) -> Result { let (block_range_start, block_range_end) = match input.get_next_block_range() { Some(range) => (*range.start(), *range.end()), None => { @@ -57,7 +55,7 @@ impl Segment for Sidecars { let mut limiter = input.limiter; let mut last_pruned_block: Option = None; - let (pruned, done) = provider.prune_table_with_range::( + let (pruned, done) = provider.tx_ref().prune_table_with_range::( range, &mut limiter, |_| false, @@ -85,7 +83,9 @@ mod tests { use alloy_primitives::{BlockNumber, B256}; use assert_matches::assert_matches; use reth_db::tables; - use reth_provider::{PruneCheckpointReader, StaticFileProviderFactory}; + use reth_provider::{ + DatabaseProviderFactory, PruneCheckpointReader, StaticFileProviderFactory, + }; use reth_prune_types::{ PruneCheckpoint, PruneInterruptReason, PruneLimiter, PruneMode, PruneProgress, PruneSegment, }; @@ -137,7 +137,7 @@ mod tests { .map(|block_number| block_number + 1) .unwrap_or_default(); - let provider = db.factory.provider_rw().unwrap(); + let provider = db.factory.database_provider_rw().unwrap(); let result = segment.prune(&provider, input.clone()).unwrap(); limiter.increment_deleted_entries_count_by(result.pruned); trace!(target: "pruner::test", diff --git a/crates/prune/prune/src/segments/static_file/transactions.rs b/crates/prune/prune/src/segments/static_file/transactions.rs index 20199ba409..0daf7e2547 100644 --- a/crates/prune/prune/src/segments/static_file/transactions.rs +++ b/crates/prune/prune/src/segments/static_file/transactions.rs @@ -1,10 +1,10 @@ use crate::{ + db_ext::DbTxPruneExt, segments::{PruneInput, Segment}, PrunerError, }; -use reth_db::tables; -use reth_db_api::database::Database; -use reth_provider::{providers::StaticFileProvider, DatabaseProviderRW, TransactionsProvider}; +use reth_db::{tables, transaction::DbTxMut}; +use reth_provider::{providers::StaticFileProvider, BlockReader, DBProvider, TransactionsProvider}; use reth_prune_types::{ PruneMode, PruneProgress, PrunePurpose, PruneSegment, SegmentOutput, SegmentOutputCheckpoint, }; @@ -22,7 +22,10 @@ impl Transactions { } } -impl Segment for Transactions { +impl Segment for Transactions +where + Provider: DBProvider + TransactionsProvider + BlockReader, +{ fn segment(&self) -> PruneSegment { PruneSegment::Transactions } @@ -37,11 +40,7 @@ impl Segment for Transactions { PrunePurpose::StaticFile } - fn prune( - &self, - provider: &DatabaseProviderRW, - input: PruneInput, - ) -> Result { + fn prune(&self, provider: &Provider, input: PruneInput) -> Result { let tx_range = match input.get_next_tx_num_range(provider)? { Some(range) => range, None => { @@ -53,7 +52,7 @@ impl Segment for Transactions { let mut limiter = input.limiter; let mut last_pruned_transaction = *tx_range.end(); - let (pruned, done) = provider.prune_table_with_range::( + let (pruned, done) = provider.tx_ref().prune_table_with_range::( tx_range, &mut limiter, |_| false, @@ -91,7 +90,10 @@ mod tests { Itertools, }; use reth_db::tables; - use reth_provider::{PruneCheckpointReader, PruneCheckpointWriter, StaticFileProviderFactory}; + use reth_provider::{ + DatabaseProviderFactory, PruneCheckpointReader, PruneCheckpointWriter, + StaticFileProviderFactory, + }; use reth_prune_types::{ PruneCheckpoint, PruneInterruptReason, PruneLimiter, PruneMode, PruneProgress, PruneSegment, SegmentOutput, @@ -141,7 +143,7 @@ mod tests { .map(|tx_number| tx_number + 1) .unwrap_or_default(); - let provider = db.factory.provider_rw().unwrap(); + let provider = db.factory.database_provider_rw().unwrap(); let result = segment.prune(&provider, input.clone()).unwrap(); limiter.increment_deleted_entries_count_by(result.pruned); diff --git a/crates/prune/prune/src/segments/user/account_history.rs b/crates/prune/prune/src/segments/user/account_history.rs index 4e5a99bc3d..016d9a22fb 100644 --- a/crates/prune/prune/src/segments/user/account_history.rs +++ b/crates/prune/prune/src/segments/user/account_history.rs @@ -1,11 +1,12 @@ use crate::{ + db_ext::DbTxPruneExt, segments::{user::history::prune_history_indices, PruneInput, Segment}, PrunerError, }; use itertools::Itertools; -use reth_db::tables; -use reth_db_api::{database::Database, models::ShardedKey}; -use reth_provider::DatabaseProviderRW; +use reth_db::{tables, transaction::DbTxMut}; +use reth_db_api::models::ShardedKey; +use reth_provider::DBProvider; use reth_prune_types::{ PruneInterruptReason, PruneMode, PruneProgress, PrunePurpose, PruneSegment, SegmentOutput, SegmentOutputCheckpoint, @@ -30,7 +31,10 @@ impl AccountHistory { } } -impl Segment for AccountHistory { +impl Segment for AccountHistory +where + Provider: DBProvider, +{ fn segment(&self) -> PruneSegment { PruneSegment::AccountHistory } @@ -44,11 +48,7 @@ impl Segment for AccountHistory { } #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] - fn prune( - &self, - provider: &DatabaseProviderRW, - input: PruneInput, - ) -> Result { + fn prune(&self, provider: &Provider, input: PruneInput) -> Result { let range = match input.get_next_block_range() { Some(range) => range, None => { @@ -80,8 +80,8 @@ impl Segment for AccountHistory { // size should be up to 0.5MB + some hashmap overhead. `blocks_since_last_run` is // additionally limited by the `max_reorg_depth`, so no OOM is expected here. let mut highest_deleted_accounts = FxHashMap::default(); - let (pruned_changesets, done) = provider - .prune_table_with_range::( + let (pruned_changesets, done) = + provider.tx_ref().prune_table_with_range::( range, &mut limiter, |_| false, @@ -106,7 +106,7 @@ impl Segment for AccountHistory { .map(|(address, block_number)| { ShardedKey::new(address, block_number.min(last_changeset_pruned_block)) }); - let outcomes = prune_history_indices::( + let outcomes = prune_history_indices::( provider, highest_sharded_keys, |a, b| a.key == b.key, @@ -135,7 +135,7 @@ mod tests { use alloy_primitives::{BlockNumber, B256}; use assert_matches::assert_matches; use reth_db::{tables, BlockNumberList}; - use reth_provider::PruneCheckpointReader; + use reth_provider::{DatabaseProviderFactory, PruneCheckpointReader}; use reth_prune_types::{ PruneCheckpoint, PruneInterruptReason, PruneLimiter, PruneMode, PruneProgress, PruneSegment, }; @@ -203,7 +203,7 @@ mod tests { }; let segment = AccountHistory::new(prune_mode); - let provider = db.factory.provider_rw().unwrap(); + let provider = db.factory.database_provider_rw().unwrap(); let result = segment.prune(&provider, input).unwrap(); limiter.increment_deleted_entries_count_by(result.pruned); diff --git a/crates/prune/prune/src/segments/user/history.rs b/crates/prune/prune/src/segments/user/history.rs index 3551deeed5..e27884a927 100644 --- a/crates/prune/prune/src/segments/user/history.rs +++ b/crates/prune/prune/src/segments/user/history.rs @@ -2,13 +2,12 @@ use alloy_primitives::BlockNumber; use reth_db::{BlockNumberList, RawKey, RawTable, RawValue}; use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW}, - database::Database, models::ShardedKey, table::Table, transaction::DbTxMut, DatabaseError, }; -use reth_provider::DatabaseProviderRW; +use reth_provider::DBProvider; enum PruneShardOutcome { Deleted, @@ -26,13 +25,13 @@ pub(crate) struct PrunedIndices { /// Prune history indices according to the provided list of highest sharded keys. /// /// Returns total number of deleted, updated and unchanged entities. -pub(crate) fn prune_history_indices( - provider: &DatabaseProviderRW, +pub(crate) fn prune_history_indices( + provider: &Provider, highest_sharded_keys: impl IntoIterator, key_matches: impl Fn(&T::Key, &T::Key) -> bool, ) -> Result where - DB: Database, + Provider: DBProvider, T: Table, T::Key: AsRef>, { diff --git a/crates/prune/prune/src/segments/user/receipts.rs b/crates/prune/prune/src/segments/user/receipts.rs index 7185affd0f..5bc9feaf02 100644 --- a/crates/prune/prune/src/segments/user/receipts.rs +++ b/crates/prune/prune/src/segments/user/receipts.rs @@ -2,8 +2,11 @@ use crate::{ segments::{PruneInput, Segment}, PrunerError, }; -use reth_db_api::database::Database; -use reth_provider::{errors::provider::ProviderResult, DatabaseProviderRW}; +use reth_db::transaction::DbTxMut; +use reth_provider::{ + errors::provider::ProviderResult, BlockReader, DBProvider, PruneCheckpointWriter, + TransactionsProvider, +}; use reth_prune_types::{PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment, SegmentOutput}; use tracing::instrument; @@ -18,7 +21,10 @@ impl Receipts { } } -impl Segment for Receipts { +impl Segment for Receipts +where + Provider: DBProvider + PruneCheckpointWriter + TransactionsProvider + BlockReader, +{ fn segment(&self) -> PruneSegment { PruneSegment::Receipts } @@ -32,17 +38,13 @@ impl Segment for Receipts { } #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] - fn prune( - &self, - provider: &DatabaseProviderRW, - input: PruneInput, - ) -> Result { + fn prune(&self, provider: &Provider, input: PruneInput) -> Result { crate::segments::receipts::prune(provider, input) } fn save_checkpoint( &self, - provider: &DatabaseProviderRW, + provider: &Provider, checkpoint: PruneCheckpoint, ) -> ProviderResult<()> { crate::segments::receipts::save_checkpoint(provider, checkpoint) diff --git a/crates/prune/prune/src/segments/user/receipts_by_logs.rs b/crates/prune/prune/src/segments/user/receipts_by_logs.rs index 10a385bda5..fbb353b41a 100644 --- a/crates/prune/prune/src/segments/user/receipts_by_logs.rs +++ b/crates/prune/prune/src/segments/user/receipts_by_logs.rs @@ -1,10 +1,10 @@ use crate::{ + db_ext::DbTxPruneExt, segments::{PruneInput, Segment}, PrunerError, }; -use reth_db::tables; -use reth_db_api::database::Database; -use reth_provider::{BlockReader, DatabaseProviderRW, PruneCheckpointWriter, TransactionsProvider}; +use reth_db::{tables, transaction::DbTxMut}; +use reth_provider::{BlockReader, DBProvider, PruneCheckpointWriter, TransactionsProvider}; use reth_prune_types::{ PruneCheckpoint, PruneMode, PruneProgress, PrunePurpose, PruneSegment, ReceiptsLogPruneConfig, SegmentOutput, MINIMUM_PRUNING_DISTANCE, @@ -22,7 +22,10 @@ impl ReceiptsByLogs { } } -impl Segment for ReceiptsByLogs { +impl Segment for ReceiptsByLogs +where + Provider: DBProvider + PruneCheckpointWriter + TransactionsProvider + BlockReader, +{ fn segment(&self) -> PruneSegment { PruneSegment::ContractLogs } @@ -36,11 +39,7 @@ impl Segment for ReceiptsByLogs { } #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] - fn prune( - &self, - provider: &DatabaseProviderRW, - input: PruneInput, - ) -> Result { + fn prune(&self, provider: &Provider, input: PruneInput) -> Result { // Contract log filtering removes every receipt possible except the ones in the list. So, // for the other receipts it's as if they had a `PruneMode::Distance()` of // `MINIMUM_PRUNING_DISTANCE`. @@ -143,7 +142,7 @@ impl Segment for ReceiptsByLogs { // Delete receipts, except the ones in the inclusion list let mut last_skipped_transaction = 0; let deleted; - (deleted, done) = provider.prune_table_with_range::( + (deleted, done) = provider.tx_ref().prune_table_with_range::( tx_range, &mut limiter, |(tx_num, receipt)| { @@ -224,7 +223,7 @@ mod tests { use assert_matches::assert_matches; use reth_db::tables; use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; - use reth_provider::{PruneCheckpointReader, TransactionsProvider}; + use reth_provider::{DatabaseProviderFactory, PruneCheckpointReader, TransactionsProvider}; use reth_prune_types::{PruneLimiter, PruneMode, PruneSegment, ReceiptsLogPruneConfig}; use reth_stages::test_utils::{StorageKind, TestStageDB}; use reth_testing_utils::generators::{ @@ -286,7 +285,7 @@ mod tests { ); let run_prune = || { - let provider = db.factory.provider_rw().unwrap(); + let provider = db.factory.database_provider_rw().unwrap(); let prune_before_block: usize = 20; let prune_mode = PruneMode::Before(prune_before_block as u64); diff --git a/crates/prune/prune/src/segments/user/sender_recovery.rs b/crates/prune/prune/src/segments/user/sender_recovery.rs index 76058608e3..de3fd686fa 100644 --- a/crates/prune/prune/src/segments/user/sender_recovery.rs +++ b/crates/prune/prune/src/segments/user/sender_recovery.rs @@ -1,10 +1,10 @@ use crate::{ + db_ext::DbTxPruneExt, segments::{PruneInput, Segment}, PrunerError, }; -use reth_db::tables; -use reth_db_api::database::Database; -use reth_provider::{DatabaseProviderRW, TransactionsProvider}; +use reth_db::{tables, transaction::DbTxMut}; +use reth_provider::{BlockReader, DBProvider, TransactionsProvider}; use reth_prune_types::{ PruneMode, PruneProgress, PrunePurpose, PruneSegment, SegmentOutput, SegmentOutputCheckpoint, }; @@ -21,7 +21,10 @@ impl SenderRecovery { } } -impl Segment for SenderRecovery { +impl Segment for SenderRecovery +where + Provider: DBProvider + TransactionsProvider + BlockReader, +{ fn segment(&self) -> PruneSegment { PruneSegment::SenderRecovery } @@ -35,11 +38,7 @@ impl Segment for SenderRecovery { } #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] - fn prune( - &self, - provider: &DatabaseProviderRW, - input: PruneInput, - ) -> Result { + fn prune(&self, provider: &Provider, input: PruneInput) -> Result { let tx_range = match input.get_next_tx_num_range(provider)? { Some(range) => range, None => { @@ -52,12 +51,13 @@ impl Segment for SenderRecovery { let mut limiter = input.limiter; let mut last_pruned_transaction = tx_range_end; - let (pruned, done) = provider.prune_table_with_range::( - tx_range, - &mut limiter, - |_| false, - |row| last_pruned_transaction = row.0, - )?; + let (pruned, done) = + provider.tx_ref().prune_table_with_range::( + tx_range, + &mut limiter, + |_| false, + |row| last_pruned_transaction = row.0, + )?; trace!(target: "pruner", %pruned, %done, "Pruned transaction senders"); let last_pruned_block = provider @@ -90,7 +90,7 @@ mod tests { Itertools, }; use reth_db::tables; - use reth_provider::PruneCheckpointReader; + use reth_provider::{DatabaseProviderFactory, PruneCheckpointReader}; use reth_prune_types::{PruneCheckpoint, PruneLimiter, PruneMode, PruneProgress, PruneSegment}; use reth_stages::test_utils::{StorageKind, TestStageDB}; use reth_testing_utils::generators::{self, random_block_range, BlockRangeParams}; @@ -179,7 +179,7 @@ mod tests { .into_inner() .0; - let provider = db.factory.provider_rw().unwrap(); + let provider = db.factory.database_provider_rw().unwrap(); let result = segment.prune(&provider, input).unwrap(); limiter.increment_deleted_entries_count_by(result.pruned); diff --git a/crates/prune/prune/src/segments/user/storage_history.rs b/crates/prune/prune/src/segments/user/storage_history.rs index f28ea6ccfd..5291d822ce 100644 --- a/crates/prune/prune/src/segments/user/storage_history.rs +++ b/crates/prune/prune/src/segments/user/storage_history.rs @@ -1,14 +1,12 @@ use crate::{ + db_ext::DbTxPruneExt, segments::{user::history::prune_history_indices, PruneInput, Segment, SegmentOutput}, PrunerError, }; use itertools::Itertools; -use reth_db::tables; -use reth_db_api::{ - database::Database, - models::{storage_sharded_key::StorageShardedKey, BlockNumberAddress}, -}; -use reth_provider::DatabaseProviderRW; +use reth_db::{tables, transaction::DbTxMut}; +use reth_db_api::models::{storage_sharded_key::StorageShardedKey, BlockNumberAddress}; +use reth_provider::DBProvider; use reth_prune_types::{ PruneInterruptReason, PruneMode, PruneProgress, PrunePurpose, PruneSegment, SegmentOutputCheckpoint, @@ -33,7 +31,10 @@ impl StorageHistory { } } -impl Segment for StorageHistory { +impl Segment for StorageHistory +where + Provider: DBProvider, +{ fn segment(&self) -> PruneSegment { PruneSegment::StorageHistory } @@ -47,11 +48,7 @@ impl Segment for StorageHistory { } #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] - fn prune( - &self, - provider: &DatabaseProviderRW, - input: PruneInput, - ) -> Result { + fn prune(&self, provider: &Provider, input: PruneInput) -> Result { let range = match input.get_next_block_range() { Some(range) => range, None => { @@ -83,8 +80,8 @@ impl Segment for StorageHistory { // size should be up to 0.5MB + some hashmap overhead. `blocks_since_last_run` is // additionally limited by the `max_reorg_depth`, so no OOM is expected here. let mut highest_deleted_storages = FxHashMap::default(); - let (pruned_changesets, done) = provider - .prune_table_with_range::( + let (pruned_changesets, done) = + provider.tx_ref().prune_table_with_range::( BlockNumberAddress::range(range), &mut limiter, |_| false, @@ -114,7 +111,7 @@ impl Segment for StorageHistory { block_number.min(last_changeset_pruned_block), ) }); - let outcomes = prune_history_indices::( + let outcomes = prune_history_indices::( provider, highest_sharded_keys, |a, b| a.address == b.address && a.sharded_key.key == b.sharded_key.key, @@ -143,7 +140,7 @@ mod tests { use alloy_primitives::{BlockNumber, B256}; use assert_matches::assert_matches; use reth_db::{tables, BlockNumberList}; - use reth_provider::PruneCheckpointReader; + use reth_provider::{DatabaseProviderFactory, PruneCheckpointReader}; use reth_prune_types::{PruneCheckpoint, PruneLimiter, PruneMode, PruneProgress, PruneSegment}; use reth_stages::test_utils::{StorageKind, TestStageDB}; use reth_testing_utils::generators::{ @@ -210,7 +207,7 @@ mod tests { }; let segment = StorageHistory::new(prune_mode); - let provider = db.factory.provider_rw().unwrap(); + let provider = db.factory.database_provider_rw().unwrap(); let result = segment.prune(&provider, input).unwrap(); limiter.increment_deleted_entries_count_by(result.pruned); diff --git a/crates/prune/prune/src/segments/user/transaction_lookup.rs b/crates/prune/prune/src/segments/user/transaction_lookup.rs index ef1fc0ae8e..5b9b7454ff 100644 --- a/crates/prune/prune/src/segments/user/transaction_lookup.rs +++ b/crates/prune/prune/src/segments/user/transaction_lookup.rs @@ -1,11 +1,11 @@ use crate::{ + db_ext::DbTxPruneExt, segments::{PruneInput, Segment, SegmentOutput}, PrunerError, }; use rayon::prelude::*; -use reth_db::tables; -use reth_db_api::database::Database; -use reth_provider::{DatabaseProviderRW, TransactionsProvider}; +use reth_db::{tables, transaction::DbTxMut}; +use reth_provider::{BlockReader, DBProvider, TransactionsProvider}; use reth_prune_types::{ PruneMode, PruneProgress, PrunePurpose, PruneSegment, SegmentOutputCheckpoint, }; @@ -22,7 +22,10 @@ impl TransactionLookup { } } -impl Segment for TransactionLookup { +impl Segment for TransactionLookup +where + Provider: DBProvider + TransactionsProvider + BlockReader, +{ fn segment(&self) -> PruneSegment { PruneSegment::TransactionLookup } @@ -36,11 +39,7 @@ impl Segment for TransactionLookup { } #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] - fn prune( - &self, - provider: &DatabaseProviderRW, - input: PruneInput, - ) -> Result { + fn prune(&self, provider: &Provider, input: PruneInput) -> Result { let (start, end) = match input.get_next_tx_num_range(provider)? { Some(range) => range, None => { @@ -73,13 +72,15 @@ impl Segment for TransactionLookup { let mut limiter = input.limiter; let mut last_pruned_transaction = None; - let (pruned, done) = provider.prune_table_with_iterator::( - hashes, - &mut limiter, - |row| { - last_pruned_transaction = Some(last_pruned_transaction.unwrap_or(row.1).max(row.1)) - }, - )?; + let (pruned, done) = + provider.tx_ref().prune_table_with_iterator::( + hashes, + &mut limiter, + |row| { + last_pruned_transaction = + Some(last_pruned_transaction.unwrap_or(row.1).max(row.1)) + }, + )?; let done = done && tx_range_end == end; trace!(target: "pruner", %pruned, %done, "Pruned transaction lookup"); @@ -117,7 +118,7 @@ mod tests { Itertools, }; use reth_db::tables; - use reth_provider::PruneCheckpointReader; + use reth_provider::{DatabaseProviderFactory, PruneCheckpointReader}; use reth_prune_types::{ PruneCheckpoint, PruneInterruptReason, PruneLimiter, PruneMode, PruneProgress, PruneSegment, }; @@ -204,7 +205,7 @@ mod tests { .into_inner() .0; - let provider = db.factory.provider_rw().unwrap(); + let provider = db.factory.database_provider_rw().unwrap(); let result = segment.prune(&provider, input).unwrap(); limiter.increment_deleted_entries_count_by(result.pruned); diff --git a/crates/revm/src/batch.rs b/crates/revm/src/batch.rs index ad71f38b44..4502732a42 100644 --- a/crates/revm/src/batch.rs +++ b/crates/revm/src/batch.rs @@ -4,14 +4,12 @@ use crate::{ precompile::{Address, HashSet}, primitives::alloy_primitives::BlockNumber, }; +use alloc::vec::Vec; use reth_execution_errors::{BlockExecutionError, InternalBlockExecutionError}; use reth_primitives::{Receipt, Receipts, Request, Requests}; use reth_prune_types::{PruneMode, PruneModes, PruneSegmentError, MINIMUM_PRUNING_DISTANCE}; use revm::db::states::bundle_state::BundleRetention; -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; - /// Takes care of: /// - recording receipts during execution of multiple blocks. /// - pruning receipts according to the pruning configuration. @@ -183,14 +181,9 @@ impl BlockBatchRecord { #[cfg(test)] mod tests { use super::*; + use alloc::collections::BTreeMap; use reth_primitives::{Address, Log, Receipt}; use reth_prune_types::{PruneMode, ReceiptsLogPruneConfig}; - #[cfg(feature = "std")] - use std::collections::BTreeMap; - #[cfg(not(feature = "std"))] - extern crate alloc; - #[cfg(not(feature = "std"))] - use alloc::collections::BTreeMap; #[test] fn test_save_receipts_empty() { diff --git a/crates/revm/src/database.rs b/crates/revm/src/database.rs index 5edd76bea4..fb5f71045e 100644 --- a/crates/revm/src/database.rs +++ b/crates/revm/src/database.rs @@ -8,26 +8,26 @@ use revm::{ Database, }; -/// A helper trait responsible for providing that necessary state for the EVM execution. +/// A helper trait responsible for providing state necessary for EVM execution. /// -/// This servers as the data layer for [Database]. +/// This serves as the data layer for [`Database`]. pub trait EvmStateProvider: Send + Sync { /// Get basic account information. /// - /// Returns `None` if the account doesn't exist. + /// Returns [`None`] if the account doesn't exist. fn basic_account(&self, address: Address) -> ProviderResult>; - /// Get the hash of the block with the given number. Returns `None` if no block with this number - /// exists. + /// Get the hash of the block with the given number. Returns [`None`] if no block with this + /// number exists. fn block_hash(&self, number: BlockNumber) -> ProviderResult>; - /// Get account code by its hash + /// Get account code by hash. fn bytecode_by_hash( &self, code_hash: B256, ) -> ProviderResult>; - /// Get storage of given account. + /// Get storage of the given account. fn storage( &self, account: Address, diff --git a/crates/revm/src/lib.rs b/crates/revm/src/lib.rs index 4fb6c30d1c..5515357d0d 100644 --- a/crates/revm/src/lib.rs +++ b/crates/revm/src/lib.rs @@ -9,7 +9,6 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![cfg_attr(not(feature = "std"), no_std)] -#[cfg(not(feature = "std"))] extern crate alloc; /// Contains glue code for integrating reth database into revm's [Database]. diff --git a/crates/revm/src/test_utils.rs b/crates/revm/src/test_utils.rs index 92f09e461d..fe377bc5fc 100644 --- a/crates/revm/src/test_utils.rs +++ b/crates/revm/src/test_utils.rs @@ -1,4 +1,7 @@ +use std::collections::HashSet; + use crate::precompile::HashMap; +use alloc::vec::Vec; use reth_primitives::{ keccak256, Account, Address, BlockNumber, Bytecode, Bytes, StorageKey, B256, U256, }; @@ -8,13 +11,9 @@ use reth_storage_api::{ }; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ - prefix_set::TriePrefixSetsMut, updates::TrieUpdates, AccountProof, HashedPostState, - HashedStorage, + updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, TrieInput, }; -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; - /// Mock state for testing #[derive(Debug, Default, Clone, Eq, PartialEq)] pub struct StateProviderTest { @@ -76,12 +75,7 @@ impl StateRootProvider for StateProviderTest { unimplemented!("state root computation is not supported") } - fn state_root_from_nodes( - &self, - _nodes: TrieUpdates, - _hashed_state: HashedPostState, - _prefix_sets: TriePrefixSetsMut, - ) -> ProviderResult { + fn state_root_from_nodes(&self, _input: TrieInput) -> ProviderResult { unimplemented!("state root computation is not supported") } @@ -94,9 +88,7 @@ impl StateRootProvider for StateProviderTest { fn state_root_from_nodes_with_updates( &self, - _nodes: TrieUpdates, - _hashed_state: HashedPostState, - _prefix_sets: TriePrefixSetsMut, + _input: TrieInput, ) -> ProviderResult<(B256, TrieUpdates)> { unimplemented!("state root computation is not supported") } @@ -115,16 +107,24 @@ impl StorageRootProvider for StateProviderTest { impl StateProofProvider for StateProviderTest { fn proof( &self, - _hashed_state: HashedPostState, + _input: TrieInput, _address: Address, _slots: &[B256], ) -> ProviderResult { unimplemented!("proof generation is not supported") } + fn multiproof( + &self, + _input: TrieInput, + _targets: HashMap>, + ) -> ProviderResult { + unimplemented!("proof generation is not supported") + } + fn witness( &self, - _overlay: HashedPostState, + _input: TrieInput, _target: HashedPostState, ) -> ProviderResult> { unimplemented!("witness generation is not supported") diff --git a/crates/rpc/rpc-api/Cargo.toml b/crates/rpc/rpc-api/Cargo.toml index 3bfb2e9a76..bd51ac9975 100644 --- a/crates/rpc/rpc-api/Cargo.toml +++ b/crates/rpc/rpc-api/Cargo.toml @@ -20,7 +20,9 @@ reth-engine-primitives.workspace = true reth-network-peers.workspace = true # ethereum +alloy-eips.workspace = true alloy-json-rpc.workspace = true +alloy-primitives.workspace = true # misc jsonrpsee = { workspace = true, features = ["server", "macros"] } diff --git a/crates/rpc/rpc-api/src/anvil.rs b/crates/rpc/rpc-api/src/anvil.rs index 56416d0941..e916fa8c23 100644 --- a/crates/rpc/rpc-api/src/anvil.rs +++ b/crates/rpc/rpc-api/src/anvil.rs @@ -1,6 +1,6 @@ use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_primitives::{Address, Bytes, B256, U256}; +use alloy_primitives::{Address, Bytes, B256, U256}; use reth_rpc_types::{ anvil::{Forking, Metadata, MineOptions, NodeInfo}, Block, diff --git a/crates/rpc/rpc-api/src/debug.rs b/crates/rpc/rpc-api/src/debug.rs index ab755b2583..671ec82a02 100644 --- a/crates/rpc/rpc-api/src/debug.rs +++ b/crates/rpc/rpc-api/src/debug.rs @@ -1,13 +1,14 @@ +use alloy_eips::{BlockId, BlockNumberOrTag}; +use alloy_primitives::{Address, Bytes, B256}; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_primitives::{Address, BlockId, BlockNumberOrTag, Bytes, B256}; use reth_rpc_types::{ + debug::ExecutionWitness, trace::geth::{ BlockTraceResult, GethDebugTracingCallOptions, GethDebugTracingOptions, GethTrace, TraceResult, }, Block, Bundle, StateContext, TransactionRequest, }; -use std::collections::HashMap; /// Debug rpc interface. #[cfg_attr(not(feature = "client"), rpc(server, namespace = "debug"))] @@ -106,7 +107,7 @@ pub trait DebugApi { async fn debug_trace_call( &self, request: TransactionRequest, - block_number: Option, + block_id: Option, opts: Option, ) -> RpcResult; @@ -138,12 +139,14 @@ pub trait DebugApi { /// to their preimages that were required during the execution of the block, including during /// state root recomputation. /// - /// The first and only argument is the block number or block hash. + /// The first argument is the block number or block hash. The second argument is a boolean + /// indicating whether to include the preimages of keys in the response. #[method(name = "executionWitness")] async fn debug_execution_witness( &self, block: BlockNumberOrTag, - ) -> RpcResult>; + include_preimages: bool, + ) -> RpcResult; /// Sets the logging backtrace location. When a backtrace location is set and a log message is /// emitted at that location, the stack of the goroutine executing the log statement will diff --git a/crates/rpc/rpc-api/src/engine.rs b/crates/rpc/rpc-api/src/engine.rs index 8c855aa34c..750c88cdc2 100644 --- a/crates/rpc/rpc-api/src/engine.rs +++ b/crates/rpc/rpc-api/src/engine.rs @@ -3,10 +3,11 @@ //! This contains the `engine_` namespace and the subset of the `eth_` namespace that is exposed to //! the consensus client. +use alloy_eips::{eip4844::BlobAndProofV1, BlockId, BlockNumberOrTag}; use alloy_json_rpc::RpcObject; +use alloy_primitives::{Address, BlockHash, Bytes, B256, U256, U64}; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; use reth_engine_primitives::EngineTypes; -use reth_primitives::{Address, BlockHash, BlockId, BlockNumberOrTag, Bytes, B256, U256, U64}; use reth_rpc_types::{ engine::{ ClientVersionV1, ExecutionPayloadBodiesV1, ExecutionPayloadBodiesV2, @@ -213,6 +214,13 @@ pub trait EngineApi { /// See also #[method(name = "exchangeCapabilities")] async fn exchange_capabilities(&self, capabilities: Vec) -> RpcResult>; + + /// Fetch blobs for the consensus layer from the in-memory blob cache. + #[method(name = "getBlobsV1")] + async fn get_blobs_v1( + &self, + transaction_ids: Vec, + ) -> RpcResult>>; } /// A subset of the ETH rpc interface: @@ -238,14 +246,14 @@ pub trait EngineEthApi { async fn call( &self, request: TransactionRequest, - block_number: Option, + block_id: Option, state_overrides: Option, block_overrides: Option>, ) -> RpcResult; /// Returns code at a given address at given block number. #[method(name = "getCode")] - async fn get_code(&self, address: Address, block_number: Option) -> RpcResult; + async fn get_code(&self, address: Address, block_id: Option) -> RpcResult; /// Returns information about a block by hash. #[method(name = "getBlockByHash")] diff --git a/crates/rpc/rpc-api/src/ganache.rs b/crates/rpc/rpc-api/src/ganache.rs index 338c914980..99a50c0fae 100644 --- a/crates/rpc/rpc-api/src/ganache.rs +++ b/crates/rpc/rpc-api/src/ganache.rs @@ -1,5 +1,5 @@ +use alloy_primitives::U256; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_primitives::U256; use reth_rpc_types::anvil::MineOptions; /// Ganache rpc interface. diff --git a/crates/rpc/rpc-api/src/hardhat.rs b/crates/rpc/rpc-api/src/hardhat.rs index 1620bdb596..c34991fd3a 100644 --- a/crates/rpc/rpc-api/src/hardhat.rs +++ b/crates/rpc/rpc-api/src/hardhat.rs @@ -1,5 +1,5 @@ +use alloy_primitives::{Address, Bytes, B256, U256}; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_primitives::{Address, Bytes, B256, U256}; use reth_rpc_types::anvil::{Forking, Metadata}; /// Hardhat rpc interface. diff --git a/crates/rpc/rpc-api/src/net.rs b/crates/rpc/rpc-api/src/net.rs index 98aa65bd09..5535f123c1 100644 --- a/crates/rpc/rpc-api/src/net.rs +++ b/crates/rpc/rpc-api/src/net.rs @@ -1,5 +1,5 @@ +use alloy_primitives::U64; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_primitives::U64; /// Net rpc interface. #[cfg_attr(not(feature = "client"), rpc(server, namespace = "net"))] diff --git a/crates/rpc/rpc-api/src/otterscan.rs b/crates/rpc/rpc-api/src/otterscan.rs index c232381612..c025087ff5 100644 --- a/crates/rpc/rpc-api/src/otterscan.rs +++ b/crates/rpc/rpc-api/src/otterscan.rs @@ -1,5 +1,6 @@ +use alloy_primitives::{Address, Bytes, TxHash, B256}; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_primitives::{Address, Bytes, TxHash, B256}; +use reth_primitives::BlockId; use reth_rpc_types::{ trace::otterscan::{ BlockDetails, ContractCreator, InternalOperation, OtsBlockTransactions, TraceEntry, @@ -24,7 +25,7 @@ pub trait Otterscan { /// Check if a certain address contains a deployed code. #[method(name = "hasCode")] - async fn has_code(&self, address: Address, block_number: Option) -> RpcResult; + async fn has_code(&self, address: Address, block_id: Option) -> RpcResult; /// Very simple API versioning scheme. Every time we add a new capability, the number is /// incremented. This allows for Otterscan to check if the node contains all API it diff --git a/crates/rpc/rpc-api/src/reth.rs b/crates/rpc/rpc-api/src/reth.rs index 1e9c4314ab..98c31b78f9 100644 --- a/crates/rpc/rpc-api/src/reth.rs +++ b/crates/rpc/rpc-api/src/reth.rs @@ -1,5 +1,6 @@ +use alloy_primitives::{Address, U256}; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_primitives::{Address, BlockId, U256}; +use reth_primitives::BlockId; use std::collections::HashMap; /// Reth API namespace for reth-specific methods diff --git a/crates/rpc/rpc-api/src/trace.rs b/crates/rpc/rpc-api/src/trace.rs index aa2a5693a8..13518c73d1 100644 --- a/crates/rpc/rpc-api/src/trace.rs +++ b/crates/rpc/rpc-api/src/trace.rs @@ -1,5 +1,6 @@ +use alloy_primitives::{Bytes, B256}; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_primitives::{BlockId, Bytes, B256}; +use reth_primitives::BlockId; use reth_rpc_types::{ state::StateOverride, trace::{ diff --git a/crates/rpc/rpc-api/src/txpool.rs b/crates/rpc/rpc-api/src/txpool.rs index 844bcceaab..34591aa6d4 100644 --- a/crates/rpc/rpc-api/src/txpool.rs +++ b/crates/rpc/rpc-api/src/txpool.rs @@ -1,5 +1,5 @@ +use alloy_primitives::Address; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_primitives::Address; use reth_rpc_types::{ txpool::{TxpoolContent, TxpoolContentFrom, TxpoolInspect, TxpoolStatus}, Transaction, WithOtherFields, diff --git a/crates/rpc/rpc-api/src/web3.rs b/crates/rpc/rpc-api/src/web3.rs index cf3887cbc8..c4017e03f2 100644 --- a/crates/rpc/rpc-api/src/web3.rs +++ b/crates/rpc/rpc-api/src/web3.rs @@ -1,5 +1,5 @@ +use alloy_primitives::{Bytes, B256}; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_primitives::{Bytes, B256}; /// Web3 rpc interface. #[cfg_attr(not(feature = "client"), rpc(server, namespace = "web3"))] diff --git a/crates/rpc/rpc-builder/Cargo.toml b/crates/rpc/rpc-builder/Cargo.toml index f4b8e4f64b..af7096ac86 100644 --- a/crates/rpc/rpc-builder/Cargo.toml +++ b/crates/rpc/rpc-builder/Cargo.toml @@ -29,6 +29,10 @@ reth-transaction-pool.workspace = true reth-evm.workspace = true reth-engine-primitives.workspace = true reth-rpc-types.workspace = true +reth-primitives.workspace = true + +# ethereum +alloy-network.workspace = true # rpc/net jsonrpsee = { workspace = true, features = ["server"] } @@ -54,7 +58,6 @@ reth-network-peers.workspace = true reth-evm-ethereum.workspace = true reth-ethereum-engine-primitives.workspace = true reth-payload-builder = { workspace = true, features = ["test-utils"] } -reth-primitives.workspace = true reth-provider = { workspace = true, features = ["test-utils"] } reth-rpc-api = { workspace = true, features = ["client"] } reth-rpc-engine-api.workspace = true @@ -65,6 +68,8 @@ reth-transaction-pool = { workspace = true, features = ["test-utils"] } reth-tokio-util.workspace = true reth-node-api.workspace = true +alloy-primitives.workspace = true + tokio = { workspace = true, features = ["rt", "rt-multi-thread"] } serde_json.workspace = true clap = { workspace = true, features = ["derive"] } diff --git a/crates/rpc/rpc-builder/src/config.rs b/crates/rpc/rpc-builder/src/config.rs index 1cac81f4c8..4e86b2c81f 100644 --- a/crates/rpc/rpc-builder/src/config.rs +++ b/crates/rpc/rpc-builder/src/config.rs @@ -95,6 +95,7 @@ impl RethRpcServerConfig for RpcServerArgs { .max_logs_per_response(self.rpc_max_logs_per_response.unwrap_or_max() as usize) .eth_proof_window(self.rpc_eth_proof_window) .rpc_gas_cap(self.rpc_gas_cap) + .rpc_max_simulate_blocks(self.rpc_max_simulate_blocks) .state_cache(self.state_cache_config()) .gpo_config(self.gas_price_oracle_config()) .proof_permits(self.rpc_proof_permits) diff --git a/crates/rpc/rpc-builder/src/eth.rs b/crates/rpc/rpc-builder/src/eth.rs index af0097c23b..50f6ed8a00 100644 --- a/crates/rpc/rpc-builder/src/eth.rs +++ b/crates/rpc/rpc-builder/src/eth.rs @@ -1,4 +1,5 @@ use reth_evm::ConfigureEvm; +use reth_primitives::Header; use reth_provider::{BlockReader, CanonStateSubscriptions, EvmEnvProvider, StateProviderFactory}; use reth_rpc::{EthFilter, EthPubSub}; use reth_rpc_eth_types::{ @@ -75,7 +76,7 @@ impl where Provider: StateProviderFactory + BlockReader + EvmEnvProvider + Clone + Unpin + 'static, Pool: Send + Sync + Clone + 'static, - EvmConfig: ConfigureEvm, + EvmConfig: ConfigureEvm
, Network: Clone + 'static, Tasks: TaskSpawner + Clone + 'static, Events: CanonStateSubscriptions + Clone + 'static, diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 6c454bff6b..9e450356b0 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -18,6 +18,7 @@ //! ``` //! use reth_evm::ConfigureEvm; //! use reth_network_api::{NetworkInfo, Peers}; +//! use reth_primitives::Header; //! use reth_provider::{AccountReader, CanonStateSubscriptions, ChangeSetReader, FullRpcProvider}; //! use reth_rpc::EthApi; //! use reth_rpc_builder::{ @@ -37,7 +38,7 @@ //! Pool: TransactionPool + 'static, //! Network: NetworkInfo + Peers + Clone + 'static, //! Events: CanonStateSubscriptions + Clone + 'static, -//! EvmConfig: ConfigureEvm, +//! EvmConfig: ConfigureEvm
, //! { //! // configure the rpc module per transport //! let transports = TransportRpcModuleConfig::default().with_http(vec![ @@ -69,6 +70,7 @@ //! use reth_engine_primitives::EngineTypes; //! use reth_evm::ConfigureEvm; //! use reth_network_api::{NetworkInfo, Peers}; +//! use reth_primitives::Header; //! use reth_provider::{AccountReader, CanonStateSubscriptions, ChangeSetReader, FullRpcProvider}; //! use reth_rpc::EthApi; //! use reth_rpc_api::EngineApiServer; @@ -94,7 +96,7 @@ //! Events: CanonStateSubscriptions + Clone + 'static, //! EngineApi: EngineApiServer, //! EngineT: EngineTypes, -//! EvmConfig: ConfigureEvm, +//! EvmConfig: ConfigureEvm
, //! { //! // configure the rpc module per transport //! let transports = TransportRpcModuleConfig::default().with_http(vec![ @@ -154,6 +156,7 @@ use reth_chainspec::ChainSpec; use reth_engine_primitives::EngineTypes; use reth_evm::ConfigureEvm; use reth_network_api::{noop::NoopNetwork, NetworkInfo, Peers}; +use reth_primitives::Header; use reth_provider::{ AccountReader, BlockReader, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, EvmEnvProvider, FullRpcProvider, StateProviderFactory, @@ -165,7 +168,7 @@ use reth_rpc::{ use reth_rpc_api::servers::*; use reth_rpc_eth_api::{ helpers::{Call, EthApiSpec, EthTransactions, LoadPendingBlock, TraceExt}, - EthApiServer, FullEthApiServer, + EthApiServer, EthApiTypes, FullEthApiServer, RpcReceipt, }; use reth_rpc_eth_types::{EthConfig, EthStateCache, EthSubscriptionIdProvider}; use reth_rpc_layer::{AuthLayer, Claims, JwtAuthValidator, JwtSecret}; @@ -207,6 +210,7 @@ pub use eth::EthHandlers; // Rpc server metrics mod metrics; pub use metrics::{MeteredRequestFuture, RpcRequestMetricsService}; +use reth_node_core::rpc::types::AnyTransactionReceipt; /// Convenience function for starting a server in one step. #[allow(clippy::too_many_arguments)] @@ -227,8 +231,10 @@ where Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, Events: CanonStateSubscriptions + Clone + 'static, - EvmConfig: ConfigureEvm, - EthApi: FullEthApiServer, + EvmConfig: ConfigureEvm
, + EthApi: FullEthApiServer< + NetworkTypes: alloy_network::Network, + >, { let module_config = module_config.into(); server_config @@ -413,7 +419,7 @@ where Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, Events: CanonStateSubscriptions + Clone + 'static, - EvmConfig: ConfigureEvm, + EvmConfig: ConfigureEvm
, { /// Configures all [`RpcModule`]s specific to the given [`TransportRpcModuleConfig`] which can /// be used to start the transport server(s). @@ -435,7 +441,9 @@ where where EngineT: EngineTypes, EngineApi: EngineApiServer, - EthApi: FullEthApiServer, + EthApi: FullEthApiServer< + NetworkTypes: alloy_network::Network, + >, { let Self { provider, pool, network, executor, events, evm_config } = self; @@ -462,13 +470,14 @@ where /// ```no_run /// use reth_evm::ConfigureEvm; /// use reth_network_api::noop::NoopNetwork; + /// use reth_primitives::Header; /// use reth_provider::test_utils::{NoopProvider, TestCanonStateSubscriptions}; /// use reth_rpc::EthApi; /// use reth_rpc_builder::RpcModuleBuilder; /// use reth_tasks::TokioTaskExecutor; /// use reth_transaction_pool::noop::NoopTransactionPool; /// - /// fn init(evm: Evm) { + /// fn init + 'static>(evm: Evm) { /// let mut registry = RpcModuleBuilder::default() /// .with_provider(NoopProvider::default()) /// .with_pool(NoopTransactionPool::default()) @@ -501,7 +510,9 @@ where eth: DynEthApiBuilder, ) -> TransportRpcModules<()> where - EthApi: FullEthApiServer, + EthApi: FullEthApiServer< + NetworkTypes: alloy_network::Network, + >, { let mut modules = TransportRpcModules::default(); @@ -654,7 +665,7 @@ where >, ) -> Self where - EvmConfig: ConfigureEvm, + EvmConfig: ConfigureEvm
, { let blocking_pool_guard = BlockingTaskGuard::new(config.eth.max_tracing_requests); @@ -791,7 +802,13 @@ where /// If called outside of the tokio runtime. See also [`Self::eth_api`] pub fn register_eth(&mut self) -> &mut Self where - EthApi: EthApiServer, + EthApi: EthApiServer< + reth_rpc_types::Transaction, + reth_rpc_types::Block, + RpcReceipt, + > + EthApiTypes< + NetworkTypes: alloy_network::Network, + >, { let eth_api = self.eth_api().clone(); self.modules.insert(RethRpcModule::Eth, eth_api.into_rpc().into()); @@ -808,7 +825,11 @@ where EthApi: EthApiServer< WithOtherFields, reth_rpc_types::Block>, - > + TraceExt, + RpcReceipt, + > + EthApiTypes< + NetworkTypes: alloy_network::Network, + > + TraceExt + + EthTransactions, { let otterscan_api = self.otterscan_api(); self.modules.insert(RethRpcModule::Ots, otterscan_api.into_rpc().into()); @@ -911,7 +932,11 @@ where EthApi: EthApiServer< WithOtherFields, reth_rpc_types::Block>, - > + TraceExt, + RpcReceipt, + > + EthApiTypes< + NetworkTypes: alloy_network::Network, + > + TraceExt + + EthTransactions, { let eth_api = self.eth_api().clone(); OtterscanApi::new(eth_api) @@ -957,7 +982,9 @@ where Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, Events: CanonStateSubscriptions + Clone + 'static, - EthApi: FullEthApiServer, + EthApi: FullEthApiServer< + NetworkTypes: alloy_network::Network, + >, { /// Configures the auth module that includes the /// * `engine_` namespace diff --git a/crates/rpc/rpc-builder/tests/it/auth.rs b/crates/rpc/rpc-builder/tests/it/auth.rs index 631ea81357..79a42d121c 100644 --- a/crates/rpc/rpc-builder/tests/it/auth.rs +++ b/crates/rpc/rpc-builder/tests/it/auth.rs @@ -1,9 +1,10 @@ //! Auth server tests use crate::utils::launch_auth; +use alloy_primitives::U64; use jsonrpsee::core::client::{ClientT, SubscriptionClientT}; use reth_ethereum_engine_primitives::EthEngineTypes; -use reth_primitives::{Block, U64}; +use reth_primitives::Block; use reth_rpc_api::clients::EngineApiClient; use reth_rpc_layer::JwtSecret; use reth_rpc_types::engine::{ForkchoiceState, PayloadId, TransitionConfiguration}; diff --git a/crates/rpc/rpc-builder/tests/it/http.rs b/crates/rpc/rpc-builder/tests/it/http.rs index 8e9c7ce608..500631427c 100644 --- a/crates/rpc/rpc-builder/tests/it/http.rs +++ b/crates/rpc/rpc-builder/tests/it/http.rs @@ -2,6 +2,7 @@ //! Standalone http tests use crate::utils::{launch_http, launch_http_ws, launch_ws}; +use alloy_primitives::{hex_literal::hex, Address, Bytes, TxHash, B256, B64, U256, U64}; use jsonrpsee::{ core::{ client::{ClientT, SubscriptionClientT}, @@ -12,9 +13,7 @@ use jsonrpsee::{ types::error::ErrorCode, }; use reth_network_peers::NodeRecord; -use reth_primitives::{ - hex_literal::hex, Address, BlockId, BlockNumberOrTag, Bytes, TxHash, B256, B64, U256, U64, -}; +use reth_primitives::{BlockId, BlockNumberOrTag, Receipt}; use reth_rpc_api::{ clients::{AdminApiClient, EthApiClient}, DebugApiClient, EthFilterApiClient, NetApiClient, OtterscanClient, TraceApiClient, @@ -173,68 +172,93 @@ where .unwrap(); // Implemented - EthApiClient::::protocol_version(client).await.unwrap(); - EthApiClient::::chain_id(client).await.unwrap(); - EthApiClient::::accounts(client).await.unwrap(); - EthApiClient::::get_account(client, address, block_number.into()) + EthApiClient::::protocol_version(client).await.unwrap(); + EthApiClient::::chain_id(client).await.unwrap(); + EthApiClient::::accounts(client).await.unwrap(); + EthApiClient::::get_account(client, address, block_number.into()) .await .unwrap(); - EthApiClient::::block_number(client).await.unwrap(); - EthApiClient::::get_code(client, address, None).await.unwrap(); - EthApiClient::::send_raw_transaction(client, tx).await.unwrap(); - EthApiClient::::fee_history(client, U64::from(0), block_number, None) + EthApiClient::::block_number(client).await.unwrap(); + EthApiClient::::get_code(client, address, None).await.unwrap(); + EthApiClient::::send_raw_transaction(client, tx).await.unwrap(); + EthApiClient::::fee_history( + client, + U64::from(0), + block_number, + None, + ) + .await + .unwrap(); + EthApiClient::::balance(client, address, None).await.unwrap(); + EthApiClient::::transaction_count(client, address, None) .await .unwrap(); - EthApiClient::::balance(client, address, None).await.unwrap(); - EthApiClient::::transaction_count(client, address, None).await.unwrap(); - EthApiClient::::storage_at(client, address, U256::default().into(), None) + EthApiClient::::storage_at( + client, + address, + U256::default().into(), + None, + ) + .await + .unwrap(); + EthApiClient::::block_by_hash(client, hash, false).await.unwrap(); + EthApiClient::::block_by_number(client, block_number, false) .await .unwrap(); - EthApiClient::::block_by_hash(client, hash, false).await.unwrap(); - EthApiClient::::block_by_number(client, block_number, false).await.unwrap(); - EthApiClient::::block_transaction_count_by_number(client, block_number) + EthApiClient::::block_transaction_count_by_number( + client, + block_number, + ) + .await + .unwrap(); + EthApiClient::::block_transaction_count_by_hash(client, hash) .await .unwrap(); - EthApiClient::::block_transaction_count_by_hash(client, hash) + EthApiClient::::block_uncles_count_by_hash(client, hash) .await .unwrap(); - EthApiClient::::block_uncles_count_by_hash(client, hash).await.unwrap(); - EthApiClient::::block_uncles_count_by_number(client, block_number) + EthApiClient::::block_uncles_count_by_number(client, block_number) .await .unwrap(); - EthApiClient::::uncle_by_block_hash_and_index(client, hash, index) + EthApiClient::::uncle_by_block_hash_and_index(client, hash, index) .await .unwrap(); - EthApiClient::::uncle_by_block_number_and_index( + EthApiClient::::uncle_by_block_number_and_index( client, block_number, index, ) .await .unwrap(); - EthApiClient::::sign(client, address, bytes.clone()).await.unwrap_err(); - EthApiClient::::sign_typed_data(client, address, typed_data) + EthApiClient::::sign(client, address, bytes.clone()) .await .unwrap_err(); - EthApiClient::::transaction_by_hash(client, tx_hash).await.unwrap(); - EthApiClient::::transaction_by_block_hash_and_index(client, hash, index) + EthApiClient::::sign_typed_data(client, address, typed_data) + .await + .unwrap_err(); + EthApiClient::::transaction_by_hash(client, tx_hash) .await .unwrap(); - EthApiClient::::transaction_by_block_number_and_index( + EthApiClient::::transaction_by_block_hash_and_index( + client, hash, index, + ) + .await + .unwrap(); + EthApiClient::::transaction_by_block_number_and_index( client, block_number, index, ) .await .unwrap(); - EthApiClient::::create_access_list( + EthApiClient::::create_access_list( client, call_request.clone(), Some(block_number.into()), ) .await .unwrap(); - EthApiClient::::estimate_gas( + EthApiClient::::estimate_gas( client, call_request.clone(), Some(block_number.into()), @@ -242,7 +266,7 @@ where ) .await .unwrap(); - EthApiClient::::call( + EthApiClient::::call( client, call_request.clone(), Some(block_number.into()), @@ -251,30 +275,38 @@ where ) .await .unwrap(); - EthApiClient::::syncing(client).await.unwrap(); - EthApiClient::::send_transaction(client, transaction_request) + EthApiClient::::syncing(client).await.unwrap(); + EthApiClient::::send_transaction(client, transaction_request) + .await + .unwrap_err(); + EthApiClient::::hashrate(client).await.unwrap(); + EthApiClient::::submit_hashrate( + client, + U256::default(), + B256::default(), + ) + .await + .unwrap(); + EthApiClient::::gas_price(client).await.unwrap_err(); + EthApiClient::::max_priority_fee_per_gas(client) .await .unwrap_err(); - EthApiClient::::hashrate(client).await.unwrap(); - EthApiClient::::submit_hashrate(client, U256::default(), B256::default()) + EthApiClient::::get_proof(client, address, vec![], None) .await .unwrap(); - EthApiClient::::gas_price(client).await.unwrap_err(); - EthApiClient::::max_priority_fee_per_gas(client).await.unwrap_err(); - EthApiClient::::get_proof(client, address, vec![], None).await.unwrap(); // Unimplemented assert!(is_unimplemented( - EthApiClient::::author(client).await.err().unwrap() + EthApiClient::::author(client).await.err().unwrap() )); assert!(is_unimplemented( - EthApiClient::::is_mining(client).await.err().unwrap() + EthApiClient::::is_mining(client).await.err().unwrap() )); assert!(is_unimplemented( - EthApiClient::::get_work(client).await.err().unwrap() + EthApiClient::::get_work(client).await.err().unwrap() )); assert!(is_unimplemented( - EthApiClient::::submit_work( + EthApiClient::::submit_work( client, B64::default(), B256::default(), @@ -285,7 +317,7 @@ where .unwrap() )); assert!(is_unimplemented( - EthApiClient::::sign_transaction(client, call_request.clone()) + EthApiClient::::sign_transaction(client, call_request.clone()) .await .err() .unwrap() @@ -369,7 +401,7 @@ where OtterscanClient::get_header_by_number(client, block_number).await.unwrap(); OtterscanClient::has_code(client, address, None).await.unwrap(); - OtterscanClient::has_code(client, address, Some(block_number)).await.unwrap(); + OtterscanClient::has_code(client, address, Some(block_number.into())).await.unwrap(); OtterscanClient::get_api_level(client).await.unwrap(); diff --git a/crates/rpc/rpc-builder/tests/it/middleware.rs b/crates/rpc/rpc-builder/tests/it/middleware.rs index 6ef6c7f677..e95ee07642 100644 --- a/crates/rpc/rpc-builder/tests/it/middleware.rs +++ b/crates/rpc/rpc-builder/tests/it/middleware.rs @@ -8,7 +8,7 @@ use reth_rpc::EthApi; use reth_rpc_builder::{RpcServerConfig, TransportRpcModuleConfig}; use reth_rpc_eth_api::EthApiClient; use reth_rpc_server_types::RpcModuleSelection; -use reth_rpc_types::{Block, Transaction}; +use reth_rpc_types::{Block, Receipt, Transaction}; use std::{ future::Future, pin::Pin, @@ -75,7 +75,7 @@ async fn test_rpc_middleware() { .unwrap(); let client = handle.http_client().unwrap(); - EthApiClient::::protocol_version(&client).await.unwrap(); + EthApiClient::::protocol_version(&client).await.unwrap(); let count = mylayer.count.load(Ordering::Relaxed); assert_eq!(count, 1); } diff --git a/crates/rpc/rpc-builder/tests/it/serde.rs b/crates/rpc/rpc-builder/tests/it/serde.rs index 5df41b2d19..e8d60f4c3b 100644 --- a/crates/rpc/rpc-builder/tests/it/serde.rs +++ b/crates/rpc/rpc-builder/tests/it/serde.rs @@ -1,11 +1,11 @@ //! various serde test use crate::utils::launch_http; +use alloy_primitives::U256; use jsonrpsee::{ core::{client::ClientT, traits::ToRpcParams}, types::Request, }; -use reth_primitives::U256; use reth_rpc_server_types::RethRpcModule; use serde_json::value::RawValue; diff --git a/crates/rpc/rpc-builder/tests/it/utils.rs b/crates/rpc/rpc-builder/tests/it/utils.rs index 85d81ea39f..ee7a064abd 100644 --- a/crates/rpc/rpc-builder/tests/it/utils.rs +++ b/crates/rpc/rpc-builder/tests/it/utils.rs @@ -17,7 +17,10 @@ use reth_rpc_layer::JwtSecret; use reth_rpc_server_types::RpcModuleSelection; use reth_rpc_types::engine::{ClientCode, ClientVersionV1}; use reth_tasks::TokioTaskExecutor; -use reth_transaction_pool::test_utils::{TestPool, TestPoolBuilder}; +use reth_transaction_pool::{ + noop::NoopTransactionPool, + test_utils::{TestPool, TestPoolBuilder}, +}; use tokio::sync::mpsc::unbounded_channel; /// Localhost with port 0 so a free port is used. @@ -43,6 +46,7 @@ pub async fn launch_auth(secret: JwtSecret) -> AuthServerHandle { MAINNET.clone(), beacon_engine_handle, spawn_test_payload_service().into(), + NoopTransactionPool::default(), Box::::default(), client, EngineCapabilities::default(), @@ -126,5 +130,5 @@ pub fn test_rpc_builder() -> RpcModuleBuilder< .with_network(NoopNetwork::default()) .with_executor(TokioTaskExecutor::default()) .with_events(TestCanonStateSubscriptions::default()) - .with_evm_config(EthEvmConfig::default()) + .with_evm_config(EthEvmConfig::new(MAINNET.clone())) } diff --git a/crates/rpc/rpc-engine-api/Cargo.toml b/crates/rpc/rpc-engine-api/Cargo.toml index d067515f6c..57f0832b5f 100644 --- a/crates/rpc/rpc-engine-api/Cargo.toml +++ b/crates/rpc/rpc-engine-api/Cargo.toml @@ -25,6 +25,11 @@ reth-tasks.workspace = true reth-rpc-types-compat.workspace = true reth-engine-primitives.workspace = true reth-evm.workspace = true +reth-transaction-pool.workspace = true + +# ethereum +alloy-eips.workspace = true +alloy-primitives.workspace = true # async tokio = { workspace = true, features = ["sync"] } diff --git a/crates/rpc/rpc-engine-api/src/capabilities.rs b/crates/rpc/rpc-engine-api/src/capabilities.rs index eb796aebcd..de4d962315 100644 --- a/crates/rpc/rpc-engine-api/src/capabilities.rs +++ b/crates/rpc/rpc-engine-api/src/capabilities.rs @@ -19,6 +19,7 @@ pub const CAPABILITIES: &[&str] = &[ "engine_getPayloadBodiesByRangeV1", "engine_getPayloadBodiesByHashV2", "engine_getPayloadBodiesByRangeV2", + "engine_getBlobsV1", ]; // The list of all supported Engine capabilities available over the engine endpoint. diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index d22590a3aa..a09ef86619 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -1,6 +1,8 @@ use crate::{ capabilities::EngineCapabilities, metrics::EngineApiMetrics, EngineApiError, EngineApiResult, }; +use alloy_eips::eip4844::BlobAndProofV1; +use alloy_primitives::{BlockHash, BlockNumber, B256, U64}; use async_trait::async_trait; use jsonrpsee_core::RpcResult; use reth_beacon_consensus::BeaconConsensusEngineHandle; @@ -12,9 +14,7 @@ use reth_payload_primitives::{ validate_payload_timestamp, EngineApiMessageVersion, PayloadAttributes, PayloadBuilderAttributes, PayloadOrAttributes, }; -use reth_primitives::{ - Block, BlockHash, BlockHashOrNumber, BlockNumber, EthereumHardfork, B256, U64, -}; +use reth_primitives::{Block, BlockHashOrNumber, EthereumHardfork}; use reth_rpc_api::EngineApiServer; use reth_rpc_types::engine::{ CancunPayloadFields, ClientVersionV1, ExecutionPayload, ExecutionPayloadBodiesV1, @@ -27,6 +27,7 @@ use reth_rpc_types_compat::engine::payload::{ }; use reth_storage_api::{BlockReader, HeaderProvider, StateProviderFactory}; use reth_tasks::TaskSpawner; +use reth_transaction_pool::TransactionPool; use std::{sync::Arc, time::Instant}; use tokio::sync::oneshot; use tracing::{trace, warn}; @@ -37,13 +38,16 @@ pub type EngineApiSender = oneshot::Sender>; /// The upper limit for payload bodies request. const MAX_PAYLOAD_BODIES_LIMIT: u64 = 1024; +/// The upper limit blobs `eth_getBlobs`. +const MAX_BLOB_LIMIT: usize = 128; + /// The Engine API implementation that grants the Consensus layer access to data and /// functions in the Execution layer that are crucial for the consensus process. -pub struct EngineApi { - inner: Arc>, +pub struct EngineApi { + inner: Arc>, } -struct EngineApiInner { +struct EngineApiInner { /// The provider to interact with the chain. provider: Provider, /// Consensus configuration @@ -60,19 +64,24 @@ struct EngineApiInner { client: ClientVersionV1, /// The list of all supported Engine capabilities available over the engine endpoint. capabilities: EngineCapabilities, + /// Transaction pool. + tx_pool: Pool, } -impl EngineApi +impl EngineApi where Provider: HeaderProvider + BlockReader + StateProviderFactory + EvmEnvProvider + 'static, EngineT: EngineTypes, + Pool: TransactionPool + 'static, { /// Create new instance of [`EngineApi`]. + #[allow(clippy::too_many_arguments)] pub fn new( provider: Provider, chain_spec: Arc, beacon_consensus: BeaconConsensusEngineHandle, payload_store: PayloadStore, + tx_pool: Pool, task_spawner: Box, client: ClientVersionV1, capabilities: EngineCapabilities, @@ -86,6 +95,7 @@ where metrics: EngineApiMetrics::default(), client, capabilities, + tx_pool, }); Self { inner } } @@ -609,10 +619,11 @@ where } #[async_trait] -impl EngineApiServer for EngineApi +impl EngineApiServer for EngineApi where Provider: HeaderProvider + BlockReader + StateProviderFactory + EvmEnvProvider + 'static, EngineT: EngineTypes, + Pool: TransactionPool + 'static, { /// Handler for `engine_newPayloadV1` /// See also @@ -904,9 +915,25 @@ where async fn exchange_capabilities(&self, _capabilities: Vec) -> RpcResult> { Ok(self.inner.capabilities.list()) } + + async fn get_blobs_v1( + &self, + versioned_hashes: Vec, + ) -> RpcResult>> { + trace!(target: "rpc::engine", "Serving engine_getBlobsV1"); + if versioned_hashes.len() > MAX_BLOB_LIMIT { + return Err(EngineApiError::BlobRequestTooLarge { len: versioned_hashes.len() }.into()) + } + + Ok(self + .inner + .tx_pool + .get_blobs_for_versioned_hashes(&versioned_hashes) + .map_err(|err| EngineApiError::Internal(Box::new(err)))?) + } } -impl std::fmt::Debug for EngineApi +impl std::fmt::Debug for EngineApi where EngineT: EngineTypes, { @@ -920,20 +947,21 @@ mod tests { use super::*; use assert_matches::assert_matches; use reth_beacon_consensus::{BeaconConsensusEngineEvent, BeaconEngineMessage}; - use reth_ethereum_engine_primitives::EthEngineTypes; - use reth_testing_utils::generators::random_block; - use reth_chainspec::MAINNET; + use reth_ethereum_engine_primitives::EthEngineTypes; use reth_payload_builder::test_utils::spawn_test_payload_service; use reth_primitives::SealedBlock; use reth_provider::test_utils::MockEthProvider; use reth_rpc_types::engine::{ClientCode, ClientVersionV1}; use reth_rpc_types_compat::engine::payload::execution_payload_from_sealed_block; use reth_tasks::TokioTaskExecutor; + use reth_testing_utils::generators::random_block; use reth_tokio_util::EventSender; + use reth_transaction_pool::noop::NoopTransactionPool; use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver}; - fn setup_engine_api() -> (EngineApiTestHandle, EngineApi, EthEngineTypes>) + fn setup_engine_api( + ) -> (EngineApiTestHandle, EngineApi, EthEngineTypes, NoopTransactionPool>) { let client = ClientVersionV1 { code: ClientCode::RH, @@ -953,6 +981,7 @@ mod tests { chain_spec.clone(), BeaconConsensusEngineHandle::new(to_engine, event_sender), payload_store.into(), + NoopTransactionPool::default(), task_executor, client, EngineCapabilities::default(), @@ -1115,7 +1144,7 @@ mod tests { // https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#specification-3 mod exchange_transition_configuration { use super::*; - use reth_primitives::U256; + use alloy_primitives::U256; use reth_testing_utils::generators::{self, BlockParams}; #[tokio::test] diff --git a/crates/rpc/rpc-engine-api/src/error.rs b/crates/rpc/rpc-engine-api/src/error.rs index a1e4dc3289..7fd5a112a1 100644 --- a/crates/rpc/rpc-engine-api/src/error.rs +++ b/crates/rpc/rpc-engine-api/src/error.rs @@ -1,10 +1,10 @@ +use alloy_primitives::{B256, U256}; use jsonrpsee_types::error::{ INTERNAL_ERROR_CODE, INVALID_PARAMS_CODE, INVALID_PARAMS_MSG, SERVER_ERROR_MSG, }; use reth_beacon_consensus::{BeaconForkChoiceUpdateError, BeaconOnNewPayloadError}; use reth_payload_builder::error::PayloadBuilderError; use reth_payload_primitives::EngineObjectValidationError; -use reth_primitives::{B256, U256}; use reth_rpc_types::ToRpcError; use thiserror::Error; @@ -42,6 +42,12 @@ pub enum EngineApiError { /// The length that was requested. len: u64, }, + /// Too many requested versioned hashes for blobs request + #[error("requested blob count too large: {len}")] + BlobRequestTooLarge { + /// The length that was requested. + len: usize, + }, /// Thrown if `engine_getPayloadBodiesByRangeV1` contains an invalid range #[error("invalid start ({start}) or count ({count})")] InvalidBodiesRange { @@ -145,7 +151,8 @@ impl From for jsonrpsee_types::error::ErrorObject<'static> { error.to_string(), None::<()>, ), - EngineApiError::PayloadRequestTooLarge { .. } => { + EngineApiError::PayloadRequestTooLarge { .. } | + EngineApiError::BlobRequestTooLarge { .. } => { jsonrpsee_types::error::ErrorObject::owned( REQUEST_TOO_LARGE_CODE, REQUEST_TOO_LARGE_MESSAGE, diff --git a/crates/rpc/rpc-engine-api/tests/it/payload.rs b/crates/rpc/rpc-engine-api/tests/it/payload.rs index 30fe594eb1..6afa6ef974 100644 --- a/crates/rpc/rpc-engine-api/tests/it/payload.rs +++ b/crates/rpc/rpc-engine-api/tests/it/payload.rs @@ -1,8 +1,9 @@ //! Some payload tests +use alloy_primitives::{Bytes, U256}; use alloy_rlp::{Decodable, Error as RlpError}; use assert_matches::assert_matches; -use reth_primitives::{proofs, Block, Bytes, SealedBlock, TransactionSigned, Withdrawals, U256}; +use reth_primitives::{proofs, Block, SealedBlock, TransactionSigned, Withdrawals}; use reth_rpc_types::engine::{ ExecutionPayload, ExecutionPayloadBodyV1, ExecutionPayloadV1, PayloadError, }; diff --git a/crates/rpc/rpc-eth-api/Cargo.toml b/crates/rpc/rpc-eth-api/Cargo.toml index fb9055b86d..ff2abc2311 100644 --- a/crates/rpc/rpc-eth-api/Cargo.toml +++ b/crates/rpc/rpc-eth-api/Cargo.toml @@ -34,8 +34,9 @@ reth-trie.workspace = true # ethereum alloy-dyn-abi = { workspace = true, features = ["eip712"] } -alloy-network.workspace = true alloy-json-rpc.workspace = true +alloy-network.workspace = true +alloy-primitives.workspace = true # rpc jsonrpsee = { workspace = true, features = ["server", "macros"] } @@ -63,4 +64,4 @@ optimism = [ ] bsc = [ "reth-primitives/bsc", -] \ No newline at end of file +] diff --git a/crates/rpc/rpc-eth-api/src/bundle.rs b/crates/rpc/rpc-eth-api/src/bundle.rs index bf3a623df2..563e6ff754 100644 --- a/crates/rpc/rpc-eth-api/src/bundle.rs +++ b/crates/rpc/rpc-eth-api/src/bundle.rs @@ -2,8 +2,8 @@ //! //! See also +use alloy_primitives::{Bytes, B256}; use jsonrpsee::proc_macros::rpc; -use reth_primitives::{Bytes, B256}; use reth_rpc_types::mev::{ CancelBundleRequest, CancelPrivateTransactionRequest, EthBundleHash, EthCallBundle, EthCallBundleResponse, EthSendBundle, PrivateTransactionRequest, diff --git a/crates/rpc/rpc-eth-api/src/core.rs b/crates/rpc/rpc-eth-api/src/core.rs index 59c907cc8e..5e6efdc4d7 100644 --- a/crates/rpc/rpc-eth-api/src/core.rs +++ b/crates/rpc/rpc-eth-api/src/core.rs @@ -1,42 +1,44 @@ //! Implementation of the [`jsonrpsee`] generated [`EthApiServer`] trait. Handles RPC requests for //! the `eth_` namespace. - use alloy_dyn_abi::TypedData; use alloy_json_rpc::RpcObject; -use jsonrpsee::{core::RpcResult, proc_macros::rpc, types::ErrorObjectOwned}; -use reth_primitives::{ - transaction::AccessListResult, Address, BlockId, BlockNumberOrTag, Bytes, B256, B64, U256, U64, -}; -use reth_rpc_eth_types::{utils::binary_search, EthApiError}; +use alloy_network::Network; +use alloy_primitives::{Address, Bytes, B256, B64, U256, U64}; +use jsonrpsee::{core::RpcResult, proc_macros::rpc}; +use reth_primitives::{transaction::AccessListResult, BlockId, BlockNumberOrTag}; use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; use reth_rpc_types::{ serde_helpers::JsonStorageKey, - simulate::{SimBlock, SimulatedBlock}, + simulate::{SimulatePayload, SimulatedBlock}, state::{EvmOverrides, StateOverride}, - AnyTransactionReceipt, BlockOverrides, BlockSidecar, BlockTransactions, Bundle, - EIP1186AccountProofResponse, EthCallResponse, FeeHistory, Header, Index, StateContext, - SyncStatus, TransactionRequest, Work, + AnyTransactionReceipt, BlockOverrides, BlockSidecar, Bundle, EIP1186AccountProofResponse, + EthCallResponse, FeeHistory, Header, Index, StateContext, SyncStatus, TransactionRequest, Work, }; -use reth_transaction_pool::{PoolTransaction, TransactionPool}; use tracing::trace; use crate::{ - helpers::{ - EthApiSpec, EthBlocks, EthCall, EthFees, EthState, EthTransactions, FullEthApi, LoadState, - }, - RpcBlock, RpcTransaction, + helpers::{EthApiSpec, EthBlocks, EthCall, EthFees, EthState, EthTransactions, FullEthApi}, + RpcBlock, RpcReceipt, RpcTransaction, }; /// Helper trait, unifies functionality that must be supported to implement all RPC methods for /// server. pub trait FullEthApiServer: - EthApiServer, RpcBlock> + FullEthApi + Clone + EthApiServer< + RpcTransaction, + RpcBlock, + RpcReceipt, + > + FullEthApi + + Clone { } impl FullEthApiServer for T where - T: EthApiServer, RpcBlock> - + FullEthApi + T: EthApiServer< + RpcTransaction, + RpcBlock, + RpcReceipt, + > + FullEthApi + Clone { } @@ -44,7 +46,7 @@ impl FullEthApiServer for T where /// Eth rpc interface: #[cfg_attr(not(feature = "client"), rpc(server, namespace = "eth"))] #[cfg_attr(feature = "client", rpc(server, client, namespace = "eth"))] -pub trait EthApi { +pub trait EthApi { /// Returns the protocol version encoded as a string. #[method(name = "protocolVersion")] async fn protocol_version(&self) -> RpcResult; @@ -101,10 +103,7 @@ pub trait EthApi { /// Returns all transaction receipts for a given block. #[method(name = "getBlockReceipts")] - async fn block_receipts( - &self, - block_id: BlockId, - ) -> RpcResult>>; + async fn block_receipts(&self, block_id: BlockId) -> RpcResult>>; /// Returns an uncle block of the given block and index. #[method(name = "getUncleByBlockHashAndIndex")] @@ -172,7 +171,7 @@ pub trait EthApi { /// Returns the receipt of a transaction by transaction hash. #[method(name = "getTransactionReceipt")] - async fn transaction_receipt(&self, hash: B256) -> RpcResult>; + async fn transaction_receipt(&self, hash: B256) -> RpcResult>; /// Returns the balance of the account of given address. #[method(name = "getBalance")] @@ -212,9 +211,9 @@ pub trait EthApi { #[method(name = "simulateV1")] async fn simulate_v1( &self, - opts: SimBlock, + opts: SimulatePayload, block_number: Option, - ) -> RpcResult>; + ) -> RpcResult>>; /// Executes a new message call immediately without creating a transaction on the block chain. #[method(name = "call")] @@ -370,9 +369,14 @@ pub trait EthApi { } #[async_trait::async_trait] -impl EthApiServer, RpcBlock> for T +impl + EthApiServer< + RpcTransaction, + RpcBlock, + RpcReceipt, + > for T where - T: FullEthApi, + T: FullEthApi>, jsonrpsee_types::error::ErrorObject<'static>: From, { /// Handler for: `eth_protocolVersion` @@ -466,7 +470,7 @@ where async fn block_receipts( &self, block_id: BlockId, - ) -> RpcResult>> { + ) -> RpcResult>>> { trace!(target: "rpc::eth", ?block_id, "Serving eth_getBlockReceipts"); Ok(EthBlocks::block_receipts(self, block_id).await?) } @@ -561,59 +565,15 @@ where nonce: U64, ) -> RpcResult>> { trace!(target: "rpc::eth", ?sender, ?nonce, "Serving eth_getTransactionBySenderAndNonce"); - let nonce = nonce.to::(); - - // Check the pool first - if let Some(tx) = LoadState::pool(self).get_transaction_by_sender_and_nonce(sender, nonce) { - let transaction = tx.transaction.clone().into_consensus(); - return Ok(Some(reth_rpc_types_compat::transaction::from_recovered(transaction))) - } - - // Check if the sender is a contract - if self.get_code(sender, None).await?.len() > 0 { - return Ok(None) - } - - let highest = EthState::transaction_count(self, sender, None).await?.saturating_to::(); - - // If the nonce is higher or equal to the highest nonce, the transaction is pending or not - // exists. - if nonce >= highest { - return Ok(None) - } - - // perform a binary search over the block range to find the block in which the sender's - // nonce reached the requested nonce. - let num = binary_search::<_, _, ErrorObjectOwned>( - 1, - self.block_number()?.saturating_to(), - |mid| { - async move { - let mid_nonce = EthState::transaction_count(self, sender, Some(mid.into())) - .await? - .saturating_to::(); - - // The `transaction_count` returns the `nonce` after the transaction was - // executed, which is the state of the account after the block, and we need to - // find the transaction whose nonce is the pre-state, so - // need to compare with `nonce`(no equal). - Ok(mid_nonce > nonce) - } - }, - ) - .await?; - - let Some(BlockTransactions::Full(transactions)) = - self.block_by_number(num.into(), true).await?.map(|block| block.transactions) - else { - return Err(EthApiError::UnknownBlockNumber.into()); - }; - - Ok(transactions.into_iter().find(|tx| *tx.from == *sender && tx.nonce == nonce)) + Ok(EthTransactions::get_transaction_by_sender_and_nonce(self, sender, nonce.to(), true) + .await?) } /// Handler for: `eth_getTransactionReceipt` - async fn transaction_receipt(&self, hash: B256) -> RpcResult> { + async fn transaction_receipt( + &self, + hash: B256, + ) -> RpcResult>> { trace!(target: "rpc::eth", ?hash, "Serving eth_getTransactionReceipt"); Ok(EthTransactions::transaction_receipt(self, hash).await?) } @@ -666,11 +626,11 @@ where /// Handler for: `eth_simulateV1` async fn simulate_v1( &self, - opts: SimBlock, + payload: SimulatePayload, block_number: Option, - ) -> RpcResult> { + ) -> RpcResult>>> { trace!(target: "rpc::eth", ?block_number, "Serving eth_simulateV1"); - Ok(EthCall::simulate_v1(self, opts, block_number).await?) + Ok(EthCall::simulate_v1(self, payload, block_number).await?) } /// Handler for: `eth_call` diff --git a/crates/rpc/rpc-eth-api/src/helpers/block.rs b/crates/rpc/rpc-eth-api/src/helpers/block.rs index b7d64d4113..40d4d2b8fe 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/block.rs @@ -3,9 +3,9 @@ use std::sync::Arc; use futures::Future; -use reth_primitives::{BlockId, Receipt, SealedBlock, SealedBlockWithSenders, TransactionMeta}; +use reth_primitives::{BlockId, Receipt, SealedBlock, SealedBlockWithSenders}; use reth_provider::{BlockIdReader, BlockReader, BlockReaderIdExt, HeaderProvider}; -use reth_rpc_eth_types::{EthApiError, EthStateCache, ReceiptBuilder}; +use reth_rpc_eth_types::{EthApiError, EthStateCache}; use reth_rpc_types::{AnyTransactionReceipt, BlockSidecar, Header, Index}; use reth_rpc_types_compat::block::{from_block, uncle_block_from_header}; @@ -40,15 +40,12 @@ pub trait EthBlocks: LoadBlock { ) -> impl Future>, Self::Error>> + Send { async move { - let block = match self.block_with_senders(block_id).await? { - Some(block) => block, - None => return Ok(None), - }; + let Some(block) = self.block_with_senders(block_id).await? else { return Ok(None) }; let block_hash = block.hash(); let total_difficulty = EthBlocks::provider(self) .header_td_by_number(block.number) .map_err(Self::Error::from_eth_err)? - .ok_or(EthApiError::UnknownBlockNumber)?; + .ok_or(EthApiError::HeaderNotFound(block_id))?; let block = from_block(block.unseal(), total_difficulty, full.into(), Some(block_hash)) .map_err(Self::Error::from_eth_err)?; Ok(Some(block)) @@ -96,44 +93,7 @@ pub trait EthBlocks: LoadBlock { block_id: BlockId, ) -> impl Future>, Self::Error>> + Send where - Self: LoadReceipt, - { - async move { - if let Some((block, receipts)) = self.load_block_and_receipts(block_id).await? { - let block_number = block.number; - let base_fee = block.base_fee_per_gas; - let block_hash = block.hash(); - let excess_blob_gas = block.excess_blob_gas; - let timestamp = block.timestamp; - let block = block.unseal(); - - let receipts = block - .body - .into_iter() - .zip(receipts.iter()) - .enumerate() - .map(|(idx, (tx, receipt))| { - let meta = TransactionMeta { - tx_hash: tx.hash, - index: idx as u64, - block_hash, - block_number, - base_fee, - excess_blob_gas, - timestamp, - }; - - ReceiptBuilder::new(&tx, meta, receipt, &receipts) - .map(|builder| builder.build()) - .map_err(Self::Error::from_eth_err) - }) - .collect::, Self::Error>>(); - return receipts.map(Some) - } - - Ok(None) - } - } + Self: LoadReceipt; /// Helper method that loads a bock and all its receipts. fn load_block_and_receipts( diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index e58bfb3634..61fde1f95d 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -2,10 +2,13 @@ //! methods. use super::{LoadBlock, LoadPendingBlock, LoadState, LoadTransaction, SpawnBlocking, Trace}; +use crate::{AsEthApiError, FromEthApiError, FromEvmError, IntoEthApiError}; +use alloy_primitives::{Bytes, TxKind, B256, U256}; use cfg_if::cfg_if; use futures::Future; #[cfg(feature = "bsc")] use reth_chainspec::BscHardforks; +use reth_chainspec::MIN_TRANSACTION_GAS; #[cfg(feature = "bsc")] use reth_errors::RethError; use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; @@ -14,14 +17,15 @@ use reth_primitives::system_contracts::get_upgrade_system_contracts; #[cfg(feature = "bsc")] use reth_primitives::system_contracts::is_system_transaction; use reth_primitives::{ + basefee::calc_next_block_base_fee, revm_primitives::{ BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ExecutionResult, HaltReason, ResultAndState, TransactTo, TxEnv, }, transaction::AccessListResult, - Bytes, TransactionSignedEcRecovered, TxKind, B256, U256, + Header, TransactionSignedEcRecovered, }; -use reth_provider::{ChainSpecProvider, StateProvider}; +use reth_provider::{ChainSpecProvider, HeaderProvider, StateProvider}; use reth_revm::{database::StateProviderDatabase, db::CacheDB, DatabaseRef}; use reth_rpc_eth_types::{ cache::db::{StateCacheDbRefMutWrapper, StateProviderTraitObjWrapper}, @@ -30,26 +34,24 @@ use reth_rpc_eth_types::{ apply_block_overrides, apply_state_overrides, caller_gas_allowance, cap_tx_gas_limit_with_caller_allowance, get_precompiles, CallFees, }, + simulate::{self, EthSimulateError}, EthApiError, RevertError, RpcInvalidTransactionError, StateCacheDb, }; -use reth_rpc_server_types::constants::gas_oracle::{ - CALL_STIPEND_GAS, ESTIMATE_GAS_ERROR_RATIO, MIN_TRANSACTION_GAS, -}; +use reth_rpc_server_types::constants::gas_oracle::{CALL_STIPEND_GAS, ESTIMATE_GAS_ERROR_RATIO}; use reth_rpc_types::{ - simulate::{SimBlock, SimulatedBlock}, + simulate::{SimBlock, SimulatePayload, SimulatedBlock}, state::{EvmOverrides, StateOverride}, - BlockId, Bundle, EthCallResponse, StateContext, TransactionInfo, TransactionRequest, + Block, BlockId, Bundle, EthCallResponse, StateContext, TransactionInfo, TransactionRequest, + WithOtherFields, }; #[cfg(feature = "bsc")] use revm::bsc::SYSTEM_ADDRESS; #[cfg(feature = "bsc")] use revm::db::AccountState::{NotExisting, Touched}; -use revm::{Database, DatabaseCommit}; -use revm_inspectors::access_list::AccessListInspector; +use revm::{Database, DatabaseCommit, GetInspector}; +use revm_inspectors::{access_list::AccessListInspector, transfer::TransferInspector}; use tracing::trace; -use crate::{AsEthApiError, FromEthApiError, FromEvmError, IntoEthApiError}; - /// Execution related functions for the [`EthApiServer`](crate::EthApiServer) trait in /// the `eth_` namespace. pub trait EthCall: Call + LoadPendingBlock { @@ -69,10 +71,158 @@ pub trait EthCall: Call + LoadPendingBlock { /// See also: fn simulate_v1( &self, - _opts: SimBlock, - _block_number: Option, - ) -> impl Future, Self::Error>> + Send { - async move { Err(EthApiError::Unsupported("eth_simulateV1 is not supported.").into()) } + payload: SimulatePayload, + block: Option, + ) -> impl Future< + Output = Result< + Vec>>>, + Self::Error, + >, + > + Send + where + Self: LoadBlock, + { + async move { + if payload.block_state_calls.len() > self.max_simulate_blocks() as usize { + return Err(EthApiError::InvalidParams("too many blocks.".to_string()).into()) + } + + let SimulatePayload { + block_state_calls, + trace_transfers, + validation, + return_full_transactions, + } = payload; + + if block_state_calls.is_empty() { + return Err(EthApiError::InvalidParams(String::from("calls are empty.")).into()) + } + + // Build cfg and block env, we'll reuse those. + let (mut cfg, mut block_env, block) = + self.evm_env_at(block.unwrap_or_default()).await?; + + // Gas cap for entire operation + let total_gas_limit = self.call_gas_limit() as u128; + + let base_block = self.block(block).await?.ok_or(EthApiError::HeaderNotFound(block))?; + let mut parent_hash = base_block.header.hash(); + let total_difficulty = LoadPendingBlock::provider(self) + .header_td_by_number(block_env.number.to()) + .map_err(Self::Error::from_eth_err)? + .ok_or(EthApiError::HeaderNotFound(block))?; + + // Only enforce base fee if validation is enabled + cfg.disable_base_fee = !validation; + // Always disable EIP-3607 + cfg.disable_eip3607 = true; + + let this = self.clone(); + self.spawn_with_state_at_block(block, move |state| { + let mut db = CacheDB::new(StateProviderDatabase::new(state)); + let mut blocks: Vec< + SimulatedBlock>>, + > = Vec::with_capacity(block_state_calls.len()); + let mut gas_used = 0; + for block in block_state_calls { + // Increase number and timestamp for every new block + block_env.number += U256::from(1); + block_env.timestamp += U256::from(1); + + if validation { + let chain_spec = LoadPendingBlock::provider(&this).chain_spec(); + let base_fee_params = + chain_spec.base_fee_params_at_timestamp(block_env.timestamp.to()); + let base_fee = if let Some(latest) = blocks.last() { + let header = &latest.inner.header; + calc_next_block_base_fee( + header.gas_used, + header.gas_limit, + header.base_fee_per_gas.unwrap_or_default(), + base_fee_params, + ) + } else { + base_block + .header + .next_block_base_fee(base_fee_params) + .unwrap_or_default() as u128 + }; + block_env.basefee = U256::from(base_fee); + } else { + block_env.basefee = U256::ZERO; + } + + let SimBlock { block_overrides, state_overrides, mut calls } = block; + + if let Some(block_overrides) = block_overrides { + apply_block_overrides(block_overrides, &mut db, &mut block_env); + } + if let Some(state_overrides) = state_overrides { + apply_state_overrides(state_overrides, &mut db)?; + } + + if (total_gas_limit - gas_used) < block_env.gas_limit.to() { + return Err( + EthApiError::Other(Box::new(EthSimulateError::GasLimitReached)).into() + ) + } + + // Resolve transactions, populate missing fields and enforce calls correctness. + let transactions = simulate::resolve_transactions( + &mut calls, + validation, + block_env.gas_limit.to(), + cfg.chain_id, + &mut db, + )?; + + let mut calls = calls.into_iter().peekable(); + let mut results = Vec::with_capacity(calls.len()); + + while let Some(tx) = calls.next() { + let env = this.build_call_evm_env(cfg.clone(), block_env.clone(), tx)?; + + let (res, env) = { + if trace_transfers { + this.transact_with_inspector( + &mut db, + env, + TransferInspector::new(false).with_logs(true), + )? + } else { + this.transact(&mut db, env)? + } + }; + + if calls.peek().is_some() { + // need to apply the state changes of this call before executing the + // next call + db.commit(res.state); + } + + results.push((env.tx.caller, res.result)); + } + + let block = simulate::build_block( + results, + transactions, + &block_env, + parent_hash, + total_difficulty, + return_full_transactions, + &db, + )?; + + parent_hash = block.inner.header.hash; + gas_used += block.inner.header.gas_used; + + blocks.push(block); + } + + Ok(blocks) + }) + .await + } } /// Executes the call request (`eth_call`) and returns the output @@ -121,7 +271,7 @@ pub trait EthCall: Call + LoadPendingBlock { self.block_with_senders(target_block) )?; - let Some(block) = block else { return Err(EthApiError::UnknownBlockNumber.into()) }; + let block = block.ok_or(EthApiError::HeaderNotFound(target_block))?; let gas_limit = self.call_gas_limit(); // we're essentially replaying the transactions in the block here, hence we need the @@ -316,10 +466,13 @@ pub trait Call: LoadState + SpawnBlocking { /// Data access in default trait method implementations. fn call_gas_limit(&self) -> u64; + /// Returns the maximum number of blocks accepted for `eth_simulateV1`. + fn max_simulate_blocks(&self) -> u64; + /// Returns a handle for reading evm config. /// /// Data access in default (L1) trait method implementations. - fn evm_config(&self) -> &impl ConfigureEvm; + fn evm_config(&self) -> &impl ConfigureEvm
; /// Executes the closure with the state that corresponds to the given [`BlockId`]. fn with_state_at_block(&self, at: BlockId, f: F) -> Result @@ -347,6 +500,24 @@ pub trait Call: LoadState + SpawnBlocking { Ok((res, env)) } + /// Executes the [`EnvWithHandlerCfg`] against the given [Database] without committing state + /// changes. + fn transact_with_inspector( + &self, + db: DB, + env: EnvWithHandlerCfg, + inspector: impl GetInspector, + ) -> Result<(ResultAndState, EnvWithHandlerCfg), Self::Error> + where + DB: Database, + EthApiError: From, + { + let mut evm = self.evm_config().evm_with_env_and_inspector(db, env, inspector); + let res = evm.transact().map_err(Self::Error::from_evm_err)?; + let (_, env) = evm.into_db_and_env_with_handler_cfg(); + Ok((res, env)) + } + /// Executes the call request at the given [`BlockId`]. fn transact_call_at( &self, @@ -971,7 +1142,8 @@ pub trait Call: LoadState + SpawnBlocking { blob_versioned_hashes, max_fee_per_blob_gas, authorization_list, - .. + transaction_type: _, + sidecar: _, } = request; let CallFees { max_priority_fee_per_gas, gas_price, max_fee_per_blob_gas } = @@ -1073,25 +1245,16 @@ pub trait Call: LoadState + SpawnBlocking { // set nonce to None so that the correct nonce is chosen by the EVM request.nonce = None; - // apply block overrides, we need to apply them first so that they take effect when we we - // create the evm env via `build_call_evm_env`, e.g. basefee - if let Some(mut block_overrides) = overrides.block { - if let Some(block_hashes) = block_overrides.block_hash.take() { - // override block hashes - db.block_hashes - .extend(block_hashes.into_iter().map(|(num, hash)| (U256::from(num), hash))) - } - apply_block_overrides(*block_overrides, &mut block); + if let Some(block_overrides) = overrides.block { + apply_block_overrides(*block_overrides, db, &mut block); } - - let request_gas = request.gas; - let mut env = self.build_call_evm_env(cfg, block, request)?; - - // apply state overrides if let Some(state_overrides) = overrides.state { apply_state_overrides(state_overrides, db)?; } + let request_gas = request.gas; + let mut env = self.build_call_evm_env(cfg, block, request)?; + if request_gas.is_none() { // No gas limit was provided in the request, so we need to cap the transaction gas limit if env.tx.gas_price > U256::ZERO { diff --git a/crates/rpc/rpc-eth-api/src/helpers/fee.rs b/crates/rpc/rpc-eth-api/src/helpers/fee.rs index a8fbc7e7fa..c05b9881f9 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/fee.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/fee.rs @@ -1,8 +1,8 @@ //! Loads fee history from database. Helper trait for `eth_` fee and transaction RPC methods. +use alloy_primitives::U256; use futures::Future; use reth_chainspec::ChainSpec; -use reth_primitives::U256; use reth_provider::{BlockIdReader, BlockReaderIdExt, ChainSpecProvider, HeaderProvider}; use reth_rpc_eth_types::{ fee_history::calculate_reward_percentiles_for_block, EthApiError, EthStateCache, @@ -82,12 +82,10 @@ pub trait EthFees: LoadFee { block_count = block_count.saturating_sub(1); } - let Some(end_block) = LoadFee::provider(self) + let end_block = LoadFee::provider(self) .block_number_for_id(newest_block.into()) .map_err(Self::Error::from_eth_err)? - else { - return Err(EthApiError::UnknownBlockNumber.into()) - }; + .ok_or(EthApiError::HeaderNotFound(newest_block.into()))?; // need to add 1 to the end block to get the correct (inclusive) range let end_block_plus = end_block + 1; @@ -293,13 +291,11 @@ pub trait LoadFee: LoadBlock { let base_fee = self .block(BlockNumberOrTag::Pending.into()) .await? - .ok_or(EthApiError::UnknownBlockNumber)? + .ok_or(EthApiError::HeaderNotFound(BlockNumberOrTag::Pending.into()))? .base_fee_per_gas - .ok_or_else(|| { - EthApiError::InvalidTransaction( - RpcInvalidTransactionError::TxTypeNotSupported, - ) - })?; + .ok_or(EthApiError::InvalidTransaction( + RpcInvalidTransactionError::TxTypeNotSupported, + ))?; U256::from(base_fee) } }; diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index 68c311d5ed..5b5aae2e1c 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -4,6 +4,7 @@ use std::time::{Duration, Instant}; use crate::{EthApiTypes, FromEthApiError, FromEvmError}; +use alloy_primitives::{BlockNumber, B256, U256}; use futures::Future; use reth_chainspec::{ChainSpec, EthereumHardforks}; use reth_evm::{ @@ -18,9 +19,8 @@ use reth_primitives::{ BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, EVMError, Env, ExecutionResult, InvalidTransaction, ResultAndState, SpecId, }, - Block, BlockNumber, Header, IntoRecoveredTransaction, Receipt, Requests, - SealedBlockWithSenders, SealedHeader, TransactionSignedEcRecovered, B256, - EMPTY_OMMER_ROOT_HASH, U256, + Block, Header, IntoRecoveredTransaction, Receipt, Requests, SealedBlockWithSenders, + SealedHeader, TransactionSignedEcRecovered, EMPTY_OMMER_ROOT_HASH, }; use reth_provider::{ BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ProviderError, @@ -30,6 +30,7 @@ use reth_revm::{ database::StateProviderDatabase, state_change::post_block_withdrawals_balance_increments, }; use reth_rpc_eth_types::{EthApiError, PendingBlock, PendingBlockEnv, PendingBlockEnvOrigin}; +use reth_rpc_types::BlockNumberOrTag; use reth_transaction_pool::{BestTransactionsAttributes, TransactionPool}; use reth_trie::HashedPostState; use revm::{db::states::bundle_state::BundleRetention, DatabaseCommit, State}; @@ -65,7 +66,7 @@ pub trait LoadPendingBlock: EthApiTypes { /// Returns a handle for reading evm config. /// /// Data access in default (L1) trait method implementations. - fn evm_config(&self) -> &impl ConfigureEvm; + fn evm_config(&self) -> &impl ConfigureEvm
; /// Configures the [`CfgEnvWithHandlerCfg`] and [`BlockEnv`] for the pending block /// @@ -82,7 +83,7 @@ pub trait LoadPendingBlock: EthApiTypes { .provider() .latest_header() .map_err(Self::Error::from_eth_err)? - .ok_or_else(|| EthApiError::UnknownBlockNumber)?; + .ok_or(EthApiError::HeaderNotFound(BlockNumberOrTag::Latest.into()))?; let (mut latest_header, block_hash) = latest.split(); // child block diff --git a/crates/rpc/rpc-eth-api/src/helpers/receipt.rs b/crates/rpc/rpc-eth-api/src/helpers/receipt.rs index 39da504750..fbd81e6fd7 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/receipt.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/receipt.rs @@ -3,10 +3,10 @@ use futures::Future; use reth_primitives::{Receipt, TransactionMeta, TransactionSigned}; -use reth_rpc_eth_types::{EthApiError, EthStateCache, ReceiptBuilder}; +use reth_rpc_eth_types::EthStateCache; use reth_rpc_types::AnyTransactionReceipt; -use crate::{EthApiTypes, FromEthApiError}; +use crate::EthApiTypes; /// Assembles transaction receipt data w.r.t to network. /// @@ -23,17 +23,5 @@ pub trait LoadReceipt: EthApiTypes + Send + Sync { tx: TransactionSigned, meta: TransactionMeta, receipt: Receipt, - ) -> impl Future> + Send { - async move { - // get all receipts for the block - let all_receipts = self - .cache() - .get_receipts(meta.block_hash) - .await - .map_err(Self::Error::from_eth_err)? - .ok_or_else(|| EthApiError::UnknownBlockNumber)?; - - Ok(ReceiptBuilder::new(&tx, meta, &receipt, &all_receipts)?.build()) - } - } + ) -> impl Future> + Send; } diff --git a/crates/rpc/rpc-eth-api/src/helpers/signer.rs b/crates/rpc/rpc-eth-api/src/helpers/signer.rs index 5b71761431..3580410b6d 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/signer.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/signer.rs @@ -3,8 +3,9 @@ use std::result; use alloy_dyn_abi::TypedData; +use alloy_primitives::Address; use dyn_clone::DynClone; -use reth_primitives::{Address, Signature, TransactionSigned}; +use reth_primitives::{Signature, TransactionSigned}; use reth_rpc_eth_types::SignError; use reth_rpc_types::TypedTransactionRequest; diff --git a/crates/rpc/rpc-eth-api/src/helpers/spec.rs b/crates/rpc/rpc-eth-api/src/helpers/spec.rs index f8f257fdbb..f404ec7db1 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/spec.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/spec.rs @@ -2,11 +2,11 @@ use std::sync::Arc; +use alloy_primitives::{Address, U256, U64}; use futures::Future; use reth_chainspec::{ChainInfo, ChainSpec}; use reth_errors::{RethError, RethResult}; use reth_network_api::NetworkInfo; -use reth_primitives::{Address, U256, U64}; use reth_provider::{BlockNumReader, ChainSpecProvider, StageCheckpointReader}; use reth_rpc_types::{Stage, SyncInfo, SyncStatus}; diff --git a/crates/rpc/rpc-eth-api/src/helpers/state.rs b/crates/rpc/rpc-eth-api/src/helpers/state.rs index 5145bbade6..86c6895b69 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/state.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/state.rs @@ -1,11 +1,12 @@ //! Loads a pending block from database. Helper trait for `eth_` block, transaction, call and trace //! RPC methods. +use alloy_primitives::{Address, Bytes, B256, U256}; use futures::Future; use reth_chainspec::ChainSpec; use reth_errors::RethError; use reth_evm::ConfigureEvmEnv; -use reth_primitives::{Address, BlockId, Bytes, Header, B256, KECCAK_EMPTY, U256}; +use reth_primitives::{BlockId, Header, KECCAK_EMPTY}; use reth_provider::{ BlockIdReader, ChainSpecProvider, StateProvider, StateProviderBox, StateProviderFactory, }; @@ -42,14 +43,7 @@ pub trait EthState: LoadState + SpawnBlocking { address: Address, block_id: Option, ) -> impl Future> + Send { - self.spawn_blocking_io(move |this| { - Ok(this - .state_at_block_id_or_latest(block_id)? - .account_code(address) - .map_err(Self::Error::from_eth_err)? - .unwrap_or_default() - .original_bytes()) - }) + LoadState::get_code(self, address, block_id) } /// Returns balance of given account, at given blocknumber. @@ -105,7 +99,7 @@ pub trait EthState: LoadState + SpawnBlocking { let block_number = LoadState::provider(self) .block_number_for_id(block_id) .map_err(Self::Error::from_eth_err)? - .ok_or(EthApiError::UnknownBlockNumber)?; + .ok_or(EthApiError::HeaderNotFound(block_id))?; let max_window = self.max_proof_window(); if chain_info.best_number.saturating_sub(block_number) > max_window { return Err(EthApiError::ExceedsMaxProofWindow.into()) @@ -115,7 +109,8 @@ pub trait EthState: LoadState + SpawnBlocking { let _permit = self .acquire_owned() .await - .map_err(|err| EthApiError::Internal(RethError::other(err)))?; + .map_err(RethError::other) + .map_err(EthApiError::Internal)?; self.spawn_blocking_io(move |this| { let state = this.state_at_block_id(block_id)?; let storage_keys = keys.iter().map(|key| key.0).collect::>(); @@ -229,7 +224,7 @@ pub trait LoadState: EthApiTypes { let block_hash = LoadPendingBlock::provider(self) .block_hash_for_id(at) .map_err(Self::Error::from_eth_err)? - .ok_or_else(|| EthApiError::UnknownBlockNumber)?; + .ok_or(EthApiError::HeaderNotFound(at))?; let (cfg, env) = self .cache() .get_evm_env(block_hash) @@ -274,25 +269,52 @@ pub trait LoadState: EthApiTypes { Self: SpawnBlocking, { self.spawn_blocking_io(move |this| { + // first fetch the on chain nonce + let nonce = this + .state_at_block_id_or_latest(block_id)? + .account_nonce(address) + .map_err(Self::Error::from_eth_err)? + .unwrap_or_default(); + if block_id == Some(BlockId::pending()) { + // for pending tag we need to find the highest nonce in the pool let address_txs = this.pool().get_transactions_by_sender(address); - if let Some(highest_nonce) = + if let Some(highest_pool_nonce) = address_txs.iter().map(|item| item.transaction.nonce()).max() { - let tx_count = highest_nonce.checked_add(1).ok_or(Self::Error::from( - EthApiError::InvalidTransaction(RpcInvalidTransactionError::NonceMaxValue), - ))?; + // and the corresponding txcount is nonce + 1 + let next_nonce = + nonce.max(highest_pool_nonce).checked_add(1).ok_or_else(|| { + Self::Error::from(EthApiError::InvalidTransaction( + RpcInvalidTransactionError::NonceMaxValue, + )) + })?; + + let tx_count = nonce.max(next_nonce); return Ok(U256::from(tx_count)) } } - let state = this.state_at_block_id_or_latest(block_id)?; - Ok(U256::from( - state - .account_nonce(address) - .map_err(Self::Error::from_eth_err)? - .unwrap_or_default(), - )) + Ok(U256::from(nonce)) + }) + } + + /// Returns code of given account, at the given identifier. + fn get_code( + &self, + address: Address, + block_id: Option, + ) -> impl Future> + Send + where + Self: SpawnBlocking, + { + self.spawn_blocking_io(move |this| { + Ok(this + .state_at_block_id_or_latest(block_id)? + .account_code(address) + .map_err(Self::Error::from_eth_err)? + .unwrap_or_default() + .original_bytes()) }) } } diff --git a/crates/rpc/rpc-eth-api/src/helpers/trace.rs b/crates/rpc/rpc-eth-api/src/helpers/trace.rs index d8932dead3..ef80617680 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/trace.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/trace.rs @@ -1,12 +1,13 @@ //! Loads a pending block from database. Helper trait for `eth_` call and trace RPC methods. use super::{Call, LoadBlock, LoadPendingBlock, LoadState, LoadTransaction}; +use alloy_primitives::B256; use cfg_if::cfg_if; use futures::Future; use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; #[cfg(feature = "bsc")] use reth_primitives::system_contracts::is_system_transaction; -use reth_primitives::B256; +use reth_primitives::Header; use reth_revm::database::StateProviderDatabase; use reth_rpc_eth_types::{ cache::db::{StateCacheDb, StateCacheDbRefMutWrapper, StateProviderTraitObjWrapper}, @@ -26,7 +27,7 @@ pub trait Trace: LoadState { /// Returns a handle for reading evm config. /// /// Data access in default (L1) trait method implementations. - fn evm_config(&self) -> &impl ConfigureEvm; + fn evm_config(&self) -> &impl ConfigureEvm
; /// Executes the [`EnvWithHandlerCfg`] against the given [Database] without committing state /// changes. diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index d97a49f2d2..d1fb13fdd6 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -2,31 +2,32 @@ //! network. use alloy_dyn_abi::TypedData; +use alloy_primitives::{Address, Bytes, TxHash, TxKind, B256, U256}; use futures::Future; use reth_primitives::{ - Address, BlockId, Bytes, Receipt, SealedBlockWithSenders, TransactionMeta, TransactionSigned, - TxHash, TxKind, B256, U256, + BlockId, Receipt, SealedBlockWithSenders, TransactionMeta, TransactionSigned, }; -use reth_provider::{BlockReaderIdExt, ReceiptProvider, TransactionsProvider}; +use reth_provider::{BlockNumReader, BlockReaderIdExt, ReceiptProvider, TransactionsProvider}; use reth_rpc_eth_types::{ - utils::recover_raw_transaction, EthApiError, EthResult, EthStateCache, SignError, - TransactionSource, + utils::{binary_search, recover_raw_transaction}, + EthApiError, EthStateCache, SignError, TransactionSource, }; use reth_rpc_types::{ transaction::{ EIP1559TransactionRequest, EIP2930TransactionRequest, EIP4844TransactionRequest, LegacyTransactionRequest, }, - AnyTransactionReceipt, BlockSidecar, TransactionInfo, TransactionRequest, + AnyTransactionReceipt, BlockNumberOrTag, BlockSidecar, TransactionInfo, TransactionRequest, TypedTransactionRequest, }; -use reth_rpc_types_compat::transaction::from_recovered_with_block_context; +use reth_rpc_types_compat::transaction::{from_recovered, from_recovered_with_block_context}; use reth_transaction_pool::{PoolTransaction, TransactionOrigin, TransactionPool}; use crate::{FromEthApiError, IntoEthApiError, RpcTransaction}; use super::{ - Call, EthApiSpec, EthSigner, LoadBlock, LoadFee, LoadPendingBlock, LoadReceipt, SpawnBlocking, + Call, EthApiSpec, EthSigner, LoadBlock, LoadFee, LoadPendingBlock, LoadReceipt, LoadState, + SpawnBlocking, }; /// Transaction related functions for the [`EthApiServer`](crate::EthApiServer) trait in @@ -214,6 +215,82 @@ pub trait EthTransactions: LoadTransaction { } } + /// Find a transaction by sender's address and nonce. + fn get_transaction_by_sender_and_nonce( + &self, + sender: Address, + nonce: u64, + include_pending: bool, + ) -> impl Future>, Self::Error>> + Send + where + Self: LoadBlock + LoadState, + { + async move { + // Check the pool first + if include_pending { + if let Some(tx) = + LoadState::pool(self).get_transaction_by_sender_and_nonce(sender, nonce) + { + let transaction = tx.transaction.clone().into_consensus(); + return Ok(Some(from_recovered(transaction))); + } + } + + // Check if the sender is a contract + if self.get_code(sender, None).await?.len() > 0 { + return Ok(None); + } + + let highest = self.transaction_count(sender, None).await?.saturating_to::(); + + // If the nonce is higher or equal to the highest nonce, the transaction is pending or + // not exists. + if nonce >= highest { + return Ok(None); + } + + let Ok(high) = LoadBlock::provider(self).best_block_number() else { + return Err(EthApiError::HeaderNotFound(BlockNumberOrTag::Latest.into()).into()); + }; + + // Perform a binary search over the block range to find the block in which the sender's + // nonce reached the requested nonce. + let num = binary_search::<_, _, Self::Error>(1, high, |mid| async move { + let mid_nonce = + self.transaction_count(sender, Some(mid.into())).await?.saturating_to::(); + + Ok(mid_nonce > nonce) + }) + .await?; + + let block_id = num.into(); + self.block_with_senders(block_id) + .await? + .and_then(|block| { + let block_hash = block.hash(); + let block_number = block.number; + let base_fee_per_gas = block.base_fee_per_gas; + + block + .into_transactions_ecrecovered() + .enumerate() + .find(|(_, tx)| tx.signer() == sender && tx.nonce() == nonce) + .map(|(index, tx)| { + let tx_info = TransactionInfo { + hash: Some(tx.hash()), + block_hash: Some(block_hash), + block_number: Some(block_number), + base_fee: base_fee_per_gas.map(u128::from), + index: Some(index as u64), + }; + from_recovered_with_block_context(tx, tx_info) + }) + }) + .ok_or(EthApiError::HeaderNotFound(block_id).into()) + .map(Some) + } + } + /// Get transaction, as raw bytes, by [`BlockId`] and index of transaction within that block. /// /// Returns `Ok(None)` if the block does not exist, or index is out of range. @@ -527,21 +604,25 @@ pub trait EthTransactions: LoadTransaction { fn rpc_transaction_sidecar( &self, hash: B256, - ) -> impl Future>> + Send + ) -> impl Future, Self::Error>> + Send where Self: LoadReceipt + 'static, { async move { - let meta = match LoadTransaction::provider(self).transaction_by_hash_with_meta(hash)? { + let meta = match LoadTransaction::provider(self) + .transaction_by_hash_with_meta(hash) + .map_err(Self::Error::from_eth_err)? + { Some((_, meta)) => meta, None => return Ok(None), }; // If no block sidecars found, return None - let Some(sidecars) = LoadTransaction::cache(self).get_sidecars(meta.block_hash).await? - else { - return Ok(None); - }; + let sidecars = + match LoadTransaction::cache(self).get_sidecars(meta.block_hash).await.unwrap() { + Some(sidecars) => sidecars, + None => return Ok(None), + }; Ok(sidecars.iter().find(|item| item.tx_hash == hash).map(|sidecar| BlockSidecar { blob_sidecar: sidecar.blob_transaction_sidecar.clone(), diff --git a/crates/rpc/rpc-eth-api/src/lib.rs b/crates/rpc/rpc-eth-api/src/lib.rs index 30623d6b8e..ec6490917c 100644 --- a/crates/rpc/rpc-eth-api/src/lib.rs +++ b/crates/rpc/rpc-eth-api/src/lib.rs @@ -24,7 +24,7 @@ pub use core::{EthApiServer, FullEthApiServer}; pub use filter::EthFilterApiServer; pub use helpers::error::{AsEthApiError, FromEthApiError, FromEvmError, IntoEthApiError}; pub use pubsub::EthPubSubApiServer; -pub use types::{EthApiTypes, RpcBlock, RpcTransaction}; +pub use types::{EthApiTypes, RpcBlock, RpcReceipt, RpcTransaction}; #[cfg(feature = "client")] pub use bundle::{EthBundleApiClient, EthCallBundleApiClient}; diff --git a/crates/rpc/rpc-eth-api/src/types.rs b/crates/rpc/rpc-eth-api/src/types.rs index 3f0d526b01..91a9ffdc69 100644 --- a/crates/rpc/rpc-eth-api/src/types.rs +++ b/crates/rpc/rpc-eth-api/src/types.rs @@ -19,8 +19,11 @@ pub trait EthApiTypes: Send + Sync + Clone { + Send + Sync; /// Blockchain primitive types, specific to network, e.g. block and transaction. - // todo: remove restriction `reth_rpc_types::Transaction` - type NetworkTypes: Network>; + // todo: remove restriction [`reth_rpc_types::Transaction`] + type NetworkTypes: Network< + TransactionResponse = WithOtherFields, + HeaderResponse = reth_rpc_types::Header, + >; } impl EthApiTypes for () { @@ -32,4 +35,7 @@ impl EthApiTypes for () { pub type RpcTransaction = ::TransactionResponse; /// Adapter for network specific block type. -pub type RpcBlock = Block>; +pub type RpcBlock = Block, ::HeaderResponse>; + +/// Adapter for network specific receipt type. +pub type RpcReceipt = ::ReceiptResponse; diff --git a/crates/rpc/rpc-eth-types/Cargo.toml b/crates/rpc/rpc-eth-types/Cargo.toml index dfb266e2ba..d3935bc6a0 100644 --- a/crates/rpc/rpc-eth-types/Cargo.toml +++ b/crates/rpc/rpc-eth-types/Cargo.toml @@ -29,6 +29,8 @@ reth-transaction-pool.workspace = true reth-trie.workspace = true # ethereum +alloy-primitives.workspace = true +alloy-consensus.workspace = true alloy-sol-types.workspace = true revm.workspace = true revm-inspectors.workspace = true diff --git a/crates/rpc/rpc-eth-types/src/builder/config.rs b/crates/rpc/rpc-eth-types/src/builder/config.rs index 2edc81e8d7..a016d02158 100644 --- a/crates/rpc/rpc-eth-types/src/builder/config.rs +++ b/crates/rpc/rpc-eth-types/src/builder/config.rs @@ -7,7 +7,7 @@ use crate::{ }; use reth_rpc_server_types::constants::{ default_max_tracing_requests, DEFAULT_ETH_PROOF_WINDOW, DEFAULT_MAX_BLOCKS_PER_FILTER, - DEFAULT_MAX_LOGS_PER_RESPONSE, DEFAULT_PROOF_PERMITS, + DEFAULT_MAX_LOGS_PER_RESPONSE, DEFAULT_MAX_SIMULATE_BLOCKS, DEFAULT_PROOF_PERMITS, }; use serde::{Deserialize, Serialize}; @@ -33,6 +33,8 @@ pub struct EthConfig { /// /// Defaults to [`RPC_DEFAULT_GAS_CAP`] pub rpc_gas_cap: u64, + /// Max number of blocks for `eth_simulateV1`. + pub rpc_max_simulate_blocks: u64, /// /// Sets TTL for stale filters pub stale_filter_ttl: Duration, @@ -62,6 +64,7 @@ impl Default for EthConfig { max_blocks_per_filter: DEFAULT_MAX_BLOCKS_PER_FILTER, max_logs_per_response: DEFAULT_MAX_LOGS_PER_RESPONSE, rpc_gas_cap: RPC_DEFAULT_GAS_CAP.into(), + rpc_max_simulate_blocks: DEFAULT_MAX_SIMULATE_BLOCKS, stale_filter_ttl: DEFAULT_STALE_FILTER_TTL, fee_history_cache: FeeHistoryCacheConfig::default(), proof_permits: DEFAULT_PROOF_PERMITS, @@ -106,6 +109,12 @@ impl EthConfig { self } + /// Configures the maximum gas limit for `eth_call` and call tracing RPC methods + pub const fn rpc_max_simulate_blocks(mut self, max_blocks: u64) -> Self { + self.rpc_max_simulate_blocks = max_blocks; + self + } + /// Configures the maximum proof window for historical proof generation. pub const fn eth_proof_window(mut self, window: u64) -> Self { self.eth_proof_window = window; diff --git a/crates/rpc/rpc-eth-types/src/cache/db.rs b/crates/rpc/rpc-eth-types/src/cache/db.rs index 2437f99864..82f90ebc50 100644 --- a/crates/rpc/rpc-eth-types/src/cache/db.rs +++ b/crates/rpc/rpc-eth-types/src/cache/db.rs @@ -2,8 +2,8 @@ //! in default implementation of //! `reth_rpc_eth_api::helpers::Call`. +use alloy_primitives::{Address, B256, U256}; use reth_errors::ProviderResult; -use reth_primitives::{Address, B256, U256}; use reth_revm::{database::StateProviderDatabase, db::CacheDB, DatabaseRef}; use reth_storage_api::StateProvider; use reth_trie::HashedStorage; @@ -27,11 +27,9 @@ impl<'a> reth_storage_api::StateRootProvider for StateProviderTraitObjWrapper<'a fn state_root_from_nodes( &self, - nodes: reth_trie::updates::TrieUpdates, - hashed_state: reth_trie::HashedPostState, - prefix_sets: reth_trie::prefix_set::TriePrefixSetsMut, + input: reth_trie::TrieInput, ) -> reth_errors::ProviderResult { - self.0.state_root_from_nodes(nodes, hashed_state, prefix_sets) + self.0.state_root_from_nodes(input) } fn state_root_with_updates( @@ -43,11 +41,9 @@ impl<'a> reth_storage_api::StateRootProvider for StateProviderTraitObjWrapper<'a fn state_root_from_nodes_with_updates( &self, - nodes: reth_trie::updates::TrieUpdates, - hashed_state: reth_trie::HashedPostState, - prefix_sets: reth_trie::prefix_set::TriePrefixSetsMut, + input: reth_trie::TrieInput, ) -> reth_errors::ProviderResult<(B256, reth_trie::updates::TrieUpdates)> { - self.0.state_root_from_nodes_with_updates(nodes, hashed_state, prefix_sets) + self.0.state_root_from_nodes_with_updates(input) } } @@ -64,19 +60,27 @@ impl<'a> reth_storage_api::StorageRootProvider for StateProviderTraitObjWrapper< impl<'a> reth_storage_api::StateProofProvider for StateProviderTraitObjWrapper<'a> { fn proof( &self, - hashed_state: reth_trie::HashedPostState, + input: reth_trie::TrieInput, address: revm_primitives::Address, slots: &[B256], ) -> reth_errors::ProviderResult { - self.0.proof(hashed_state, address, slots) + self.0.proof(input, address, slots) + } + + fn multiproof( + &self, + input: reth_trie::TrieInput, + targets: std::collections::HashMap>, + ) -> ProviderResult { + self.0.multiproof(input, targets) } fn witness( &self, - overlay: reth_trie::HashedPostState, + input: reth_trie::TrieInput, target: reth_trie::HashedPostState, - ) -> reth_errors::ProviderResult> { - self.0.witness(overlay, target) + ) -> reth_errors::ProviderResult> { + self.0.witness(input, target) } } @@ -92,15 +96,15 @@ impl<'a> reth_storage_api::AccountReader for StateProviderTraitObjWrapper<'a> { impl<'a> reth_storage_api::BlockHashReader for StateProviderTraitObjWrapper<'a> { fn block_hash( &self, - block_number: reth_primitives::BlockNumber, + block_number: alloy_primitives::BlockNumber, ) -> reth_errors::ProviderResult> { self.0.block_hash(block_number) } fn canonical_hashes_range( &self, - start: reth_primitives::BlockNumber, - end: reth_primitives::BlockNumber, + start: alloy_primitives::BlockNumber, + end: alloy_primitives::BlockNumber, ) -> reth_errors::ProviderResult> { self.0.canonical_hashes_range(start, end) } @@ -145,8 +149,8 @@ impl<'a> StateProvider for StateProviderTraitObjWrapper<'a> { fn storage( &self, account: revm_primitives::Address, - storage_key: reth_primitives::StorageKey, - ) -> reth_errors::ProviderResult> { + storage_key: alloy_primitives::StorageKey, + ) -> reth_errors::ProviderResult> { self.0.storage(account, storage_key) } } diff --git a/crates/rpc/rpc-eth-types/src/cache/mod.rs b/crates/rpc/rpc-eth-types/src/cache/mod.rs index 459a12d9b4..7976ba388a 100644 --- a/crates/rpc/rpc-eth-types/src/cache/mod.rs +++ b/crates/rpc/rpc-eth-types/src/cache/mod.rs @@ -1,13 +1,14 @@ //! Async caching support for eth RPC +use alloy_primitives::B256; use futures::{future::Either, Stream, StreamExt}; use reth_chain_state::CanonStateNotification; use reth_errors::{ProviderError, ProviderResult}; use reth_evm::{provider::EvmEnvProvider, ConfigureEvm}; use reth_execution_types::Chain; use reth_primitives::{ - BlobSidecars, Block, BlockHashOrNumber, BlockWithSenders, Receipt, SealedBlock, - SealedBlockWithSenders, TransactionSigned, TransactionSignedEcRecovered, B256, + BlobSidecars, Block, BlockHashOrNumber, BlockWithSenders, Header, Receipt, SealedBlock, + SealedBlockWithSenders, TransactionSigned, TransactionSignedEcRecovered, }; use reth_storage_api::{BlockReader, StateProviderFactory, TransactionVariant}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; @@ -111,7 +112,7 @@ impl EthStateCache { ) -> Self where Provider: StateProviderFactory + BlockReader + EvmEnvProvider + Clone + Unpin + 'static, - EvmConfig: ConfigureEvm, + EvmConfig: ConfigureEvm
, { Self::spawn_with(provider, config, TokioTaskExecutor::default(), evm_config) } @@ -129,7 +130,7 @@ impl EthStateCache { where Provider: StateProviderFactory + BlockReader + EvmEnvProvider + Clone + Unpin + 'static, Tasks: TaskSpawner + Clone + 'static, - EvmConfig: ConfigureEvm, + EvmConfig: ConfigureEvm
, { let EthStateCacheConfig { max_blocks, max_receipts, max_envs, max_concurrent_db_requests } = config; @@ -332,7 +333,7 @@ impl EthStateCacheService, { fn on_new_block(&mut self, block_hash: B256, res: ProviderResult>) { if let Some(queued) = self.full_block_cache.remove(&block_hash) { @@ -419,7 +420,7 @@ impl Future for EthStateCacheService, { type Output = (); diff --git a/crates/rpc/rpc-eth-types/src/error.rs b/crates/rpc/rpc-eth-types/src/error.rs index d52bd50398..0d2db251d9 100644 --- a/crates/rpc/rpc-eth-types/src/error.rs +++ b/crates/rpc/rpc-eth-types/src/error.rs @@ -2,20 +2,21 @@ use std::time::Duration; +use alloy_primitives::{Address, Bytes, U256}; use alloy_sol_types::decode_revert_reason; use reth_errors::RethError; -use reth_primitives::{revm_primitives::InvalidHeader, Address, Bytes}; +use reth_primitives::{revm_primitives::InvalidHeader, BlockId}; use reth_rpc_server_types::result::{ - internal_rpc_err, invalid_params_rpc_err, rpc_err, rpc_error_with_code, + block_id_to_str, internal_rpc_err, invalid_params_rpc_err, rpc_err, rpc_error_with_code, }; use reth_rpc_types::{ error::EthRpcErrorCode, request::TransactionInputError, BlockError, ToRpcError, }; use reth_transaction_pool::error::{ - Eip4844PoolTransactionError, InvalidPoolTransactionError, PoolError, PoolErrorKind, - PoolTransactionError, + Eip4844PoolTransactionError, Eip7702PoolTransactionError, InvalidPoolTransactionError, + PoolError, PoolErrorKind, PoolTransactionError, }; -use revm::primitives::{EVMError, ExecutionResult, HaltReason, OutOfGasError}; +use revm::primitives::{EVMError, ExecutionResult, HaltReason, InvalidTransaction, OutOfGasError}; use revm_inspectors::tracing::MuxError; use tracing::error; @@ -37,18 +38,15 @@ pub enum EthApiError { /// Errors related to the transaction pool #[error(transparent)] PoolError(RpcPoolError), - /// When an unknown block number is encountered - #[error("unknown block number")] - UnknownBlockNumber, - /// Thrown when querying for `finalized` or `safe` block before the merge transition is - /// finalized, - /// - /// op-node now checks for either `Unknown block` OR `unknown block`: - /// - /// - /// TODO(#8045): Temporary, until a version of is pushed through that doesn't require this to figure out the EL sync status. - #[error("unknown block")] - UnknownSafeOrFinalizedBlock, + /// Header not found for block hash/number/tag + #[error("header not found")] + HeaderNotFound(BlockId), + /// Header range not found for start block hash/number/tag to end block hash/number/tag + #[error("header range not found, start block {0:?}, end block {1:?}")] + HeaderRangeNotFound(BlockId, BlockId), + /// Receipts not found for block hash/number/tag + #[error("receipts not found")] + ReceiptsNotFound(BlockId), /// Thrown when an unknown block or transaction index is encountered #[error("unknown block or tx index")] UnknownBlockOrTxIndex, @@ -171,14 +169,29 @@ impl From for jsonrpsee_types::error::ErrorObject<'static> { EthApiError::EvmCustom(_) | EthApiError::EvmPrecompile(_) | EthApiError::InvalidRewardPercentiles => internal_rpc_err(error.to_string()), - EthApiError::UnknownBlockNumber | - EthApiError::UnknownBlockOrTxIndex | - EthApiError::UnknownParentBlock => { + EthApiError::UnknownBlockOrTxIndex | EthApiError::UnknownParentBlock => { rpc_error_with_code(EthRpcErrorCode::ResourceNotFound.code(), error.to_string()) } - EthApiError::UnknownSafeOrFinalizedBlock => { - rpc_error_with_code(EthRpcErrorCode::UnknownBlock.code(), error.to_string()) - } + // TODO(onbjerg): We rewrite the error message here because op-node does string matching + // on the error message. + // + // Until https://github.com/ethereum-optimism/optimism/pull/11759 is released, this must be kept around. + EthApiError::HeaderNotFound(id) => rpc_error_with_code( + EthRpcErrorCode::ResourceNotFound.code(), + format!("block not found: {}", block_id_to_str(id)), + ), + EthApiError::ReceiptsNotFound(id) => rpc_error_with_code( + EthRpcErrorCode::ResourceNotFound.code(), + format!("{error}: {}", block_id_to_str(id)), + ), + EthApiError::HeaderRangeNotFound(start_id, end_id) => rpc_error_with_code( + EthRpcErrorCode::ResourceNotFound.code(), + format!( + "{error}: start block: {}, end block: {}", + block_id_to_str(start_id), + block_id_to_str(end_id), + ), + ), EthApiError::Unsupported(msg) => internal_rpc_err(msg), EthApiError::InternalJsTracerError(msg) => internal_rpc_err(msg), EthApiError::InvalidParams(msg) => invalid_params_rpc_err(msg), @@ -221,15 +234,15 @@ impl From for EthApiError { fn from(error: reth_errors::ProviderError) -> Self { use reth_errors::ProviderError; match error { - ProviderError::HeaderNotFound(_) | - ProviderError::BlockHashNotFound(_) | - ProviderError::BestBlockNotFound | - ProviderError::BlockNumberForTransactionIndexNotFound | - ProviderError::TotalDifficultyNotFound { .. } | - ProviderError::UnknownBlockHash(_) => Self::UnknownBlockNumber, - ProviderError::FinalizedBlockNotFound | ProviderError::SafeBlockNotFound => { - Self::UnknownSafeOrFinalizedBlock + ProviderError::HeaderNotFound(hash) => Self::HeaderNotFound(hash.into()), + ProviderError::BlockHashNotFound(hash) | ProviderError::UnknownBlockHash(hash) => { + Self::HeaderNotFound(hash.into()) } + ProviderError::BestBlockNotFound => Self::HeaderNotFound(BlockId::latest()), + ProviderError::BlockNumberForTransactionIndexNotFound => Self::UnknownBlockOrTxIndex, + ProviderError::TotalDifficultyNotFound(num) => Self::HeaderNotFound(num.into()), + ProviderError::FinalizedBlockNotFound => Self::HeaderNotFound(BlockId::finalized()), + ProviderError::SafeBlockNotFound => Self::HeaderNotFound(BlockId::safe()), err => Self::Internal(err.into()), } } @@ -241,7 +254,12 @@ where { fn from(err: EVMError) -> Self { match err { - EVMError::Transaction(err) => RpcInvalidTransactionError::from(err).into(), + EVMError::Transaction(invalid_tx) => match invalid_tx { + InvalidTransaction::NonceTooLow { tx, state } => { + Self::InvalidTransaction(RpcInvalidTransactionError::NonceTooLow { tx, state }) + } + _ => RpcInvalidTransactionError::from(invalid_tx).into(), + }, EVMError::Header(InvalidHeader::PrevrandaoNotSet) => Self::PrevrandaoNotSet, EVMError::Header(InvalidHeader::ExcessBlobGasNotSet) => Self::ExcessBlobGasNotSet, EVMError::Database(err) => err.into(), @@ -268,8 +286,13 @@ where #[derive(thiserror::Error, Debug)] pub enum RpcInvalidTransactionError { /// returned if the nonce of a transaction is lower than the one present in the local chain. - #[error("nonce too low")] - NonceTooLow, + #[error("nonce too low: next nonce {state}, tx nonce {tx}")] + NonceTooLow { + /// The nonce of the transaction. + tx: u64, + /// The current state of the nonce in the local chain. + state: u64, + }, /// returned if the nonce of a transaction is higher than the next one expected based on the /// local chain. #[error("nonce too high")] @@ -284,9 +307,14 @@ pub enum RpcInvalidTransactionError { /// thrown if creation transaction provides the init code bigger than init code size limit. #[error("max initcode size exceeded")] MaxInitCodeSizeExceeded, - /// Represents the inability to cover max cost + value (account balance too low). - #[error("insufficient funds for gas * price + value")] - InsufficientFunds, + /// Represents the inability to cover max fee + value (account balance too low). + #[error("insufficient funds for gas * price + value: have {balance} want {cost}")] + InsufficientFunds { + /// Transaction cost. + cost: U256, + /// Current balance of transaction sender. + balance: U256, + }, /// Thrown when calculating gas usage #[error("gas uint64 overflow")] GasUintOverflow, @@ -397,7 +425,7 @@ impl RpcInvalidTransactionError { impl RpcInvalidTransactionError { /// Returns the rpc error code for this error. - const fn error_code(&self) -> i32 { + pub const fn error_code(&self) -> i32 { match self { Self::InvalidChainId | Self::GasTooLow | Self::GasTooHigh => { EthRpcErrorCode::InvalidInput.code() @@ -456,12 +484,14 @@ impl From for RpcInvalidTransactionError { InvalidTransaction::CallerGasLimitMoreThanBlock | InvalidTransaction::CallGasCostMoreThanGasLimit => Self::GasTooHigh, InvalidTransaction::RejectCallerWithCode => Self::SenderNoEOA, - InvalidTransaction::LackOfFundForMaxFee { .. } => Self::InsufficientFunds, + InvalidTransaction::LackOfFundForMaxFee { fee, balance } => { + Self::InsufficientFunds { cost: *fee, balance: *balance } + } InvalidTransaction::OverflowPaymentInTransaction => Self::GasUintOverflow, InvalidTransaction::NonceOverflowInTransaction => Self::NonceMaxValue, InvalidTransaction::CreateInitCodeSizeLimit => Self::MaxInitCodeSizeExceeded, InvalidTransaction::NonceTooHigh { .. } => Self::NonceTooHigh, - InvalidTransaction::NonceTooLow { .. } => Self::NonceTooLow, + InvalidTransaction::NonceTooLow { tx, state } => Self::NonceTooLow { tx, state }, InvalidTransaction::AccessListNotSupported => Self::AccessListNotSupported, InvalidTransaction::MaxFeePerBlobGasNotSupported => Self::MaxFeePerBlobGasNotSupported, InvalidTransaction::BlobVersionedHashesNotSupported => { @@ -498,8 +528,12 @@ impl From for RpcInvalidTransactionErr // This conversion is used to convert any transaction errors that could occur inside the // txpool (e.g. `eth_sendRawTransaction`) to their corresponding RPC match err { - InvalidTransactionError::InsufficientFunds { .. } => Self::InsufficientFunds, - InvalidTransactionError::NonceNotConsistent => Self::NonceTooLow, + InvalidTransactionError::InsufficientFunds(res) => { + Self::InsufficientFunds { cost: res.expected, balance: res.got } + } + InvalidTransactionError::NonceNotConsistent { tx, state } => { + Self::NonceTooLow { tx, state } + } InvalidTransactionError::OldLegacyChainId => { // Note: this should be unreachable since Spurious Dragon now enabled Self::OldLegacyChainId @@ -545,7 +579,8 @@ impl RevertError { } } - const fn error_code(&self) -> i32 { + /// Returns error code to return for this error. + pub const fn error_code(&self) -> i32 { EthRpcErrorCode::ExecutionError.code() } } @@ -598,9 +633,12 @@ pub enum RpcPoolError { /// Custom pool error #[error(transparent)] PoolTransactionError(Box), - /// Eip-4844 related error + /// EIP-4844 related error #[error(transparent)] Eip4844(#[from] Eip4844PoolTransactionError), + /// EIP-7702 related error + #[error(transparent)] + Eip7702(#[from] Eip7702PoolTransactionError), /// Thrown if a conflicting transaction type is already in the pool /// /// In other words, thrown if a transaction with the same sender that violates the exclusivity @@ -652,8 +690,9 @@ impl From for RpcPoolError { InvalidPoolTransactionError::Underpriced => Self::Underpriced, InvalidPoolTransactionError::Other(err) => Self::PoolTransactionError(err), InvalidPoolTransactionError::Eip4844(err) => Self::Eip4844(err), - InvalidPoolTransactionError::Overdraft => { - Self::Invalid(RpcInvalidTransactionError::InsufficientFunds) + InvalidPoolTransactionError::Eip7702(err) => Self::Eip7702(err), + InvalidPoolTransactionError::Overdraft { cost, balance } => { + Self::Invalid(RpcInvalidTransactionError::InsufficientFunds { cost, balance }) } } } @@ -701,6 +740,8 @@ pub fn ensure_success(result: ExecutionResult) -> EthResult { #[cfg(test)] mod tests { + use revm_primitives::b256; + use super::*; #[test] @@ -708,4 +749,32 @@ mod tests { let err = EthApiError::ExecutionTimedOut(Duration::from_secs(10)); assert_eq!(err.to_string(), "execution aborted (timeout = 10s)"); } + + #[test] + fn header_not_found_message() { + let err: jsonrpsee_types::error::ErrorObject<'static> = + EthApiError::HeaderNotFound(BlockId::hash(b256!( + "1a15e3c30cf094a99826869517b16d185d45831d3a494f01030b0001a9d3ebb9" + ))) + .into(); + assert_eq!(err.message(), "block not found: hash 0x1a15e3c30cf094a99826869517b16d185d45831d3a494f01030b0001a9d3ebb9"); + let err: jsonrpsee_types::error::ErrorObject<'static> = + EthApiError::HeaderNotFound(BlockId::hash_canonical(b256!( + "1a15e3c30cf094a99826869517b16d185d45831d3a494f01030b0001a9d3ebb9" + ))) + .into(); + assert_eq!(err.message(), "block not found: canonical hash 0x1a15e3c30cf094a99826869517b16d185d45831d3a494f01030b0001a9d3ebb9"); + let err: jsonrpsee_types::error::ErrorObject<'static> = + EthApiError::HeaderNotFound(BlockId::number(100000)).into(); + assert_eq!(err.message(), "block not found: number 0x186a0"); + let err: jsonrpsee_types::error::ErrorObject<'static> = + EthApiError::HeaderNotFound(BlockId::latest()).into(); + assert_eq!(err.message(), "block not found: latest"); + let err: jsonrpsee_types::error::ErrorObject<'static> = + EthApiError::HeaderNotFound(BlockId::safe()).into(); + assert_eq!(err.message(), "block not found: safe"); + let err: jsonrpsee_types::error::ErrorObject<'static> = + EthApiError::HeaderNotFound(BlockId::finalized()).into(); + assert_eq!(err.message(), "block not found: finalized"); + } } diff --git a/crates/rpc/rpc-eth-types/src/fee_history.rs b/crates/rpc/rpc-eth-types/src/fee_history.rs index 94e52eb959..17e1fea8aa 100644 --- a/crates/rpc/rpc-eth-types/src/fee_history.rs +++ b/crates/rpc/rpc-eth-types/src/fee_history.rs @@ -6,6 +6,7 @@ use std::{ sync::{atomic::Ordering::SeqCst, Arc}, }; +use alloy_primitives::B256; use futures::{ future::{Fuse, FusedFuture}, FutureExt, Stream, StreamExt, @@ -16,7 +17,7 @@ use reth_chainspec::{ChainSpec, ChainSpecProvider}; use reth_primitives::{ basefee::calc_next_block_base_fee, eip4844::{calc_blob_gasprice, calculate_excess_blob_gas}, - Receipt, SealedBlock, TransactionSigned, B256, + Receipt, SealedBlock, TransactionSigned, }; use reth_rpc_types::TxGasAndReward; use reth_storage_api::BlockReaderIdExt; diff --git a/crates/rpc/rpc-eth-types/src/gas_oracle.rs b/crates/rpc/rpc-eth-types/src/gas_oracle.rs index 5075a6d4a7..b5f9ca58d0 100644 --- a/crates/rpc/rpc-eth-types/src/gas_oracle.rs +++ b/crates/rpc/rpc-eth-types/src/gas_oracle.rs @@ -3,9 +3,11 @@ use std::fmt::{self, Debug, Formatter}; +use alloy_primitives::{B256, U256}; use derive_more::{Deref, DerefMut, From, Into}; -use reth_primitives::{constants::GWEI_TO_WEI, BlockNumberOrTag, B256, U256}; +use reth_primitives::{constants::GWEI_TO_WEI, BlockNumberOrTag}; use reth_rpc_server_types::constants; +use reth_rpc_types::BlockId; use reth_storage_api::BlockReaderIdExt; use schnellru::{ByLength, LruMap}; use serde::{Deserialize, Serialize}; @@ -118,7 +120,7 @@ where let header = self .provider .sealed_header_by_number_or_tag(BlockNumberOrTag::Latest)? - .ok_or(EthApiError::UnknownBlockNumber)?; + .ok_or(EthApiError::HeaderNotFound(BlockId::latest()))?; let mut inner = self.inner.lock().await; @@ -153,7 +155,7 @@ where let (parent_hash, block_values) = self .get_block_values(current_hash, SAMPLE_NUMBER) .await? - .ok_or(EthApiError::UnknownBlockNumber)?; + .ok_or(EthApiError::HeaderNotFound(current_hash.into()))?; inner .lowest_effective_tip_cache .insert(current_hash, (parent_hash, block_values.clone())); diff --git a/crates/rpc/rpc-eth-types/src/id_provider.rs b/crates/rpc/rpc-eth-types/src/id_provider.rs index 642d87578f..a2020d0b21 100644 --- a/crates/rpc/rpc-eth-types/src/id_provider.rs +++ b/crates/rpc/rpc-eth-types/src/id_provider.rs @@ -46,7 +46,7 @@ fn to_quantity(val: u128) -> SubscriptionId<'static> { #[cfg(test)] mod tests { use super::*; - use reth_primitives::U128; + use alloy_primitives::U128; #[test] fn test_id_provider_quantity() { diff --git a/crates/rpc/rpc-eth-types/src/lib.rs b/crates/rpc/rpc-eth-types/src/lib.rs index 1897a9fd70..fba893c15f 100644 --- a/crates/rpc/rpc-eth-types/src/lib.rs +++ b/crates/rpc/rpc-eth-types/src/lib.rs @@ -18,6 +18,7 @@ pub mod logs_utils; pub mod pending_block; pub mod receipt; pub mod revm_utils; +pub mod simulate; pub mod transaction; pub mod utils; diff --git a/crates/rpc/rpc-eth-types/src/logs_utils.rs b/crates/rpc/rpc-eth-types/src/logs_utils.rs index a08db4ce48..d332a04459 100644 --- a/crates/rpc/rpc-eth-types/src/logs_utils.rs +++ b/crates/rpc/rpc-eth-types/src/logs_utils.rs @@ -2,9 +2,10 @@ //! //! Log parsing for building filter. +use alloy_primitives::TxHash; use reth_chainspec::ChainInfo; use reth_errors::ProviderError; -use reth_primitives::{BlockNumHash, Receipt, TxHash}; +use reth_primitives::{BlockNumHash, Receipt}; use reth_rpc_server_types::result::rpc_error_with_code; use reth_rpc_types::{FilterId, FilteredParams, Log}; use reth_storage_api::BlockReader; @@ -167,7 +168,7 @@ pub fn append_matching_block_logs( /// Returns true if the log matches the filter and should be included pub fn log_matches_filter( block: BlockNumHash, - log: &reth_primitives::Log, + log: &alloy_primitives::Log, params: &FilteredParams, ) -> bool { if params.filter.is_some() && diff --git a/crates/rpc/rpc-eth-types/src/pending_block.rs b/crates/rpc/rpc-eth-types/src/pending_block.rs index 8a9503d81e..949e205dcf 100644 --- a/crates/rpc/rpc-eth-types/src/pending_block.rs +++ b/crates/rpc/rpc-eth-types/src/pending_block.rs @@ -4,10 +4,9 @@ use std::time::Instant; +use alloy_primitives::B256; use derive_more::Constructor; -use reth_primitives::{ - BlockId, BlockNumberOrTag, Receipt, SealedBlockWithSenders, SealedHeader, B256, -}; +use reth_primitives::{BlockId, BlockNumberOrTag, Receipt, SealedBlockWithSenders, SealedHeader}; use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}; /// Configured [`BlockEnv`] and [`CfgEnvWithHandlerCfg`] for a pending block. diff --git a/crates/rpc/rpc-eth-types/src/receipt.rs b/crates/rpc/rpc-eth-types/src/receipt.rs index 34b10fd1c2..63d148d441 100644 --- a/crates/rpc/rpc-eth-types/src/receipt.rs +++ b/crates/rpc/rpc-eth-types/src/receipt.rs @@ -1,6 +1,7 @@ //! RPC receipt response builder, extends a layer one receipt with layer two data. -use reth_primitives::{Address, Receipt, TransactionMeta, TransactionSigned, TxKind}; +use alloy_primitives::{Address, TxKind}; +use reth_primitives::{Receipt, TransactionMeta, TransactionSigned}; use reth_rpc_types::{ AnyReceiptEnvelope, AnyTransactionReceipt, Log, OtherFields, ReceiptWithBloom, TransactionReceipt, WithOtherFields, @@ -13,9 +14,9 @@ use super::{EthApiError, EthResult}; #[derive(Debug)] pub struct ReceiptBuilder { /// The base response body, contains L1 fields. - base: TransactionReceipt>, + pub base: TransactionReceipt>, /// Additional L2 fields. - other: OtherFields, + pub other: OtherFields, } impl ReceiptBuilder { diff --git a/crates/rpc/rpc-eth-types/src/revm_utils.rs b/crates/rpc/rpc-eth-types/src/revm_utils.rs index 52ae7ee0a1..0e1cb9d7da 100644 --- a/crates/rpc/rpc-eth-types/src/revm_utils.rs +++ b/crates/rpc/rpc-eth-types/src/revm_utils.rs @@ -1,6 +1,6 @@ //! utilities for working with revm -use reth_primitives::{Address, B256, U256}; +use alloy_primitives::{Address, B256, U256}; use reth_rpc_types::{ state::{AccountOverride, StateOverride}, BlockOverrides, @@ -47,16 +47,19 @@ where DB: Database, EthApiError: From<::Error>, { - Ok(db - // Get the caller account. - .basic(env.caller)? - // Get the caller balance. - .map(|acc| acc.balance) - .unwrap_or_default() - // Subtract transferred value from the caller balance. + // Get the caller account. + let caller = db.basic(env.caller)?; + // Get the caller balance. + let balance = caller.map(|acc| acc.balance).unwrap_or_default(); + // Get transaction value. + let value = env.value; + // Subtract transferred value from the caller balance. Return error if the caller has + // insufficient funds. + let balance = balance .checked_sub(env.value) - // Return error if the caller has insufficient funds. - .ok_or_else(|| RpcInvalidTransactionError::InsufficientFunds)? + .ok_or_else(|| RpcInvalidTransactionError::InsufficientFunds { cost: value, balance })?; + + Ok(balance // Calculate the amount of gas the caller can afford with the specified gas price. .checked_div(env.gas_price) // This will be 0 if gas price is 0. It is fine, because we check it before. @@ -133,6 +136,7 @@ impl CallFees { RpcInvalidTransactionError::TipAboveFeeCap.into(), ) } + // ref Ok(min( max_fee, block_base_fee.checked_add(max_priority_fee_per_gas).ok_or_else(|| { @@ -142,7 +146,7 @@ impl CallFees { } None => Ok(block_base_fee .checked_add(max_priority_fee_per_gas.unwrap_or(U256::ZERO)) - .ok_or_else(|| EthApiError::from(RpcInvalidTransactionError::TipVeryHigh))?), + .ok_or(EthApiError::from(RpcInvalidTransactionError::TipVeryHigh))?), } } @@ -202,8 +206,12 @@ impl CallFees { } } -/// Applies the given block overrides to the env -pub fn apply_block_overrides(overrides: BlockOverrides, env: &mut BlockEnv) { +/// Applies the given block overrides to the env and updates overridden block hashes in the db. +pub fn apply_block_overrides( + overrides: BlockOverrides, + db: &mut CacheDB, + env: &mut BlockEnv, +) { let BlockOverrides { number, difficulty, @@ -212,9 +220,14 @@ pub fn apply_block_overrides(overrides: BlockOverrides, env: &mut BlockEnv) { coinbase, random, base_fee, - block_hash: _, + block_hash, } = overrides; + if let Some(block_hashes) = block_hash { + // override block hashes + db.block_hashes.extend(block_hashes.into_iter().map(|(num, hash)| (U256::from(num), hash))) + } + if let Some(number) = number { env.number = number; } diff --git a/crates/rpc/rpc-eth-types/src/simulate.rs b/crates/rpc/rpc-eth-types/src/simulate.rs new file mode 100644 index 0000000000..c36f77599a --- /dev/null +++ b/crates/rpc/rpc-eth-types/src/simulate.rs @@ -0,0 +1,302 @@ +//! Utilities for serving `eth_simulateV1` + +use alloy_consensus::{TxEip4844Variant, TxType, TypedTransaction}; +use jsonrpsee_types::ErrorObject; +use reth_primitives::{ + logs_bloom, + proofs::{calculate_receipt_root, calculate_transaction_root}, + BlockWithSenders, Receipt, Signature, Transaction, TransactionSigned, TransactionSignedNoHash, +}; +use reth_revm::database::StateProviderDatabase; +use reth_rpc_server_types::result::rpc_err; +use reth_rpc_types::{ + simulate::{SimCallResult, SimulateError, SimulatedBlock}, + Block, BlockTransactionsKind, ToRpcError, TransactionRequest, WithOtherFields, +}; +use reth_rpc_types_compat::block::from_block; +use reth_storage_api::StateRootProvider; +use reth_trie::{HashedPostState, HashedStorage}; +use revm::{db::CacheDB, Database}; +use revm_primitives::{keccak256, Address, BlockEnv, Bytes, ExecutionResult, TxKind, B256, U256}; + +use crate::{ + cache::db::StateProviderTraitObjWrapper, EthApiError, RevertError, RpcInvalidTransactionError, +}; + +/// Errors which may occur during `eth_simulateV1` execution. +#[derive(Debug, thiserror::Error)] +pub enum EthSimulateError { + /// Total gas limit of transactions for the block exceeds the block gas limit. + #[error("Block gas limit exceeded by the block's transactions")] + BlockGasLimitExceeded, + /// Max gas limit for entire operation exceeded. + #[error("Client adjustable limit reached")] + GasLimitReached, +} + +impl EthSimulateError { + const fn error_code(&self) -> i32 { + match self { + Self::BlockGasLimitExceeded => -38015, + Self::GasLimitReached => -38026, + } + } +} + +impl ToRpcError for EthSimulateError { + fn to_rpc_error(&self) -> ErrorObject<'static> { + rpc_err(self.error_code(), self.to_string(), None) + } +} + +/// Goes over the list of [`TransactionRequest`]s and populates missing fields trying to resolve +/// them into [`TransactionSigned`]. +/// +/// If validation is enabled, the function will return error if any of the transactions can't be +/// built right away. +pub fn resolve_transactions( + txs: &mut [TransactionRequest], + validation: bool, + block_gas_limit: u128, + chain_id: u64, + db: &mut DB, +) -> Result, EthApiError> +where + EthApiError: From, +{ + let mut transactions = Vec::with_capacity(txs.len()); + + let default_gas_limit = { + let total_specified_gas = txs.iter().filter_map(|tx| tx.gas).sum::(); + let txs_without_gas_limit = txs.iter().filter(|tx| tx.gas.is_none()).count(); + + if total_specified_gas > block_gas_limit { + return Err(EthApiError::Other(Box::new(EthSimulateError::BlockGasLimitExceeded))) + } + + if txs_without_gas_limit > 0 { + (block_gas_limit - total_specified_gas) / txs_without_gas_limit as u128 + } else { + 0 + } + }; + + for tx in txs { + if tx.buildable_type().is_none() && validation { + return Err(EthApiError::TransactionConversionError); + } + // If we're missing any fields and validation is disabled, we try filling nonce, gas and + // gas price. + let tx_type = tx.preferred_type(); + + let from = if let Some(from) = tx.from { + from + } else { + tx.from = Some(Address::ZERO); + Address::ZERO + }; + + if tx.nonce.is_none() { + tx.nonce = Some(db.basic(from)?.map(|acc| acc.nonce).unwrap_or_default()); + } + + if tx.gas.is_none() { + tx.gas = Some(default_gas_limit); + } + + if tx.chain_id.is_none() { + tx.chain_id = Some(chain_id); + } + + if tx.to.is_none() { + tx.to = Some(TxKind::Create); + } + + match tx_type { + TxType::Legacy | TxType::Eip2930 => { + if tx.gas_price.is_none() { + tx.gas_price = Some(0); + } + } + _ => { + if tx.max_fee_per_gas.is_none() { + tx.max_fee_per_gas = Some(0); + tx.max_priority_fee_per_gas = Some(0); + } + } + } + + let Ok(tx) = tx.clone().build_typed_tx() else { + return Err(EthApiError::TransactionConversionError) + }; + + // Create an empty signature for the transaction. + let signature = + Signature { odd_y_parity: false, r: Default::default(), s: Default::default() }; + + let tx = match tx { + TypedTransaction::Legacy(tx) => { + TransactionSignedNoHash { transaction: Transaction::Legacy(tx), signature } + .with_hash() + } + TypedTransaction::Eip2930(tx) => { + TransactionSignedNoHash { transaction: Transaction::Eip2930(tx), signature } + .with_hash() + } + TypedTransaction::Eip1559(tx) => { + TransactionSignedNoHash { transaction: Transaction::Eip1559(tx), signature } + .with_hash() + } + TypedTransaction::Eip4844(tx) => { + let tx = match tx { + TxEip4844Variant::TxEip4844(tx) => tx, + TxEip4844Variant::TxEip4844WithSidecar(tx) => tx.tx, + }; + TransactionSignedNoHash { transaction: Transaction::Eip4844(tx), signature } + .with_hash() + } + TypedTransaction::Eip7702(tx) => { + TransactionSignedNoHash { transaction: Transaction::Eip7702(tx), signature } + .with_hash() + } + }; + + transactions.push(tx); + } + + Ok(transactions) +} + +/// Handles outputs of the calls execution and builds a [`SimulatedBlock`]. +pub fn build_block( + results: Vec<(Address, ExecutionResult)>, + transactions: Vec, + block_env: &BlockEnv, + parent_hash: B256, + total_difficulty: U256, + full_transactions: bool, + db: &CacheDB>>, +) -> Result>>, EthApiError> { + let mut calls: Vec = Vec::with_capacity(results.len()); + let mut senders = Vec::with_capacity(results.len()); + let mut receipts = Vec::new(); + + let mut log_index = 0; + for (transaction_index, ((sender, result), tx)) in + results.into_iter().zip(transactions.iter()).enumerate() + { + senders.push(sender); + + let call = match result { + ExecutionResult::Halt { reason, gas_used } => { + let error = RpcInvalidTransactionError::halt(reason, tx.gas_limit()); + SimCallResult { + return_value: Bytes::new(), + error: Some(SimulateError { + code: error.error_code(), + message: error.to_string(), + }), + gas_used, + logs: Vec::new(), + status: false, + } + } + ExecutionResult::Revert { output, gas_used } => { + let error = RevertError::new(output.clone()); + SimCallResult { + return_value: output, + error: Some(SimulateError { + code: error.error_code(), + message: error.to_string(), + }), + gas_used, + status: false, + logs: Vec::new(), + } + } + ExecutionResult::Success { output, gas_used, logs, .. } => SimCallResult { + return_value: output.into_data(), + error: None, + gas_used, + logs: logs + .into_iter() + .map(|log| { + log_index += 1; + reth_rpc_types::Log { + inner: log, + log_index: Some(log_index - 1), + transaction_index: Some(transaction_index as u64), + transaction_hash: Some(tx.hash()), + block_number: Some(block_env.number.to()), + block_timestamp: Some(block_env.timestamp.to()), + ..Default::default() + } + }) + .collect(), + status: true, + }, + }; + + receipts.push( + #[allow(clippy::needless_update)] + Receipt { + tx_type: tx.tx_type(), + success: call.status, + cumulative_gas_used: call.gas_used + calls.iter().map(|c| c.gas_used).sum::(), + logs: call.logs.iter().map(|log| &log.inner).cloned().collect(), + ..Default::default() + } + .into(), + ); + + calls.push(call); + } + + let mut hashed_state = HashedPostState::default(); + for (address, account) in &db.accounts { + let hashed_address = keccak256(address); + hashed_state.accounts.insert(hashed_address, Some(account.info.clone().into())); + + let storage = hashed_state + .storages + .entry(hashed_address) + .or_insert_with(|| HashedStorage::new(account.account_state.is_storage_cleared())); + + for (slot, value) in &account.storage { + let slot = B256::from(*slot); + let hashed_slot = keccak256(slot); + storage.storage.insert(hashed_slot, *value); + } + } + + let state_root = db.db.0.state_root(hashed_state)?; + + let header = reth_primitives::Header { + beneficiary: block_env.coinbase, + difficulty: block_env.difficulty, + number: block_env.number.to(), + timestamp: block_env.timestamp.to(), + base_fee_per_gas: Some(block_env.basefee.to()), + gas_limit: block_env.gas_limit.to(), + gas_used: calls.iter().map(|c| c.gas_used).sum::(), + blob_gas_used: Some(0), + parent_hash, + receipts_root: calculate_receipt_root(&receipts), + transactions_root: calculate_transaction_root(&transactions), + state_root, + logs_bloom: logs_bloom(receipts.iter().flat_map(|r| r.receipt.logs.iter())), + mix_hash: block_env.prevrandao.unwrap_or_default(), + ..Default::default() + }; + + let block = BlockWithSenders { + block: reth_primitives::Block { header, body: transactions, ..Default::default() }, + senders, + }; + + let txs_kind = + if full_transactions { BlockTransactionsKind::Full } else { BlockTransactionsKind::Hashes }; + + let block = from_block(block, total_difficulty, txs_kind, None)?; + Ok(SimulatedBlock { inner: block, calls }) +} diff --git a/crates/rpc/rpc-eth-types/src/transaction.rs b/crates/rpc/rpc-eth-types/src/transaction.rs index 89ff584e44..0ea3710828 100644 --- a/crates/rpc/rpc-eth-types/src/transaction.rs +++ b/crates/rpc/rpc-eth-types/src/transaction.rs @@ -1,7 +1,8 @@ //! Helper types for `reth_rpc_eth_api::EthApiServer` implementation. //! //! Transaction wrapper that labels transaction with its origin. -use reth_primitives::{TransactionSignedEcRecovered, B256}; +use alloy_primitives::B256; +use reth_primitives::TransactionSignedEcRecovered; use reth_rpc_types::{Transaction, TransactionInfo, WithOtherFields}; use reth_rpc_types_compat::transaction::from_recovered_with_block_context; @@ -40,16 +41,7 @@ impl TransactionSource { match self { Self::Pool(tx) => { let hash = tx.hash(); - ( - tx, - TransactionInfo { - hash: Some(hash), - index: None, - block_hash: None, - block_number: None, - base_fee: None, - }, - ) + (tx, TransactionInfo { hash: Some(hash), ..Default::default() }) } Self::Block { transaction, index, block_hash, block_number, base_fee } => { let hash = transaction.hash(); diff --git a/crates/rpc/rpc-eth-types/src/utils.rs b/crates/rpc/rpc-eth-types/src/utils.rs index ca2901ba86..bb7c3d6484 100644 --- a/crates/rpc/rpc-eth-types/src/utils.rs +++ b/crates/rpc/rpc-eth-types/src/utils.rs @@ -1,6 +1,7 @@ //! Commonly used code snippets -use reth_primitives::{Bytes, PooledTransactionsElement, PooledTransactionsElementEcRecovered}; +use alloy_primitives::Bytes; +use reth_primitives::{PooledTransactionsElement, PooledTransactionsElementEcRecovered}; use std::future::Future; use super::{EthApiError, EthResult}; diff --git a/crates/rpc/rpc-layer/Cargo.toml b/crates/rpc/rpc-layer/Cargo.toml index 69500df288..ec8dcb8229 100644 --- a/crates/rpc/rpc-layer/Cargo.toml +++ b/crates/rpc/rpc-layer/Cargo.toml @@ -11,7 +11,7 @@ repository.workspace = true workspace = true [dependencies] -alloy-rpc-types-engine = { workspace = true, features = ["jwt"] } +alloy-rpc-types-engine = { workspace = true, features = ["jwt", "serde"] } http.workspace = true jsonrpsee-http-client.workspace = true diff --git a/crates/rpc/rpc-server-types/Cargo.toml b/crates/rpc/rpc-server-types/Cargo.toml index 628654ebaf..e908af0af7 100644 --- a/crates/rpc/rpc-server-types/Cargo.toml +++ b/crates/rpc/rpc-server-types/Cargo.toml @@ -17,7 +17,6 @@ reth-network-api.workspace = true reth-primitives.workspace = true reth-rpc-types.workspace = true - # ethereum alloy-primitives.workspace = true diff --git a/crates/rpc/rpc-server-types/src/constants.rs b/crates/rpc/rpc-server-types/src/constants.rs index e433bda0d4..0bc4418193 100644 --- a/crates/rpc/rpc-server-types/src/constants.rs +++ b/crates/rpc/rpc-server-types/src/constants.rs @@ -45,11 +45,15 @@ pub const DEFAULT_ENGINE_API_IPC_ENDPOINT: &str = r"\\.\pipe\reth_engine_api.ipc #[cfg(not(windows))] pub const DEFAULT_ENGINE_API_IPC_ENDPOINT: &str = "/tmp/reth_engine_api.ipc"; +/// The default limit for blocks count in `eth_simulateV1`. +pub const DEFAULT_MAX_SIMULATE_BLOCKS: u64 = 256; + /// The default eth historical proof window. pub const DEFAULT_ETH_PROOF_WINDOW: u64 = 0; -/// Maximum eth historical proof window. Equivalent to roughly one month of data. -pub const MAX_ETH_PROOF_WINDOW: u64 = 216_000; +/// Maximum eth historical proof window. Equivalent to roughly one and a half months of data on a 12 +/// second block time, and a week on a 2 second block time. +pub const MAX_ETH_PROOF_WINDOW: u64 = 7 * 24 * 60 * 60 / 2; /// GPO specific constants pub mod gas_oracle { @@ -81,8 +85,6 @@ pub mod gas_oracle { /// for more complex calls. pub const RPC_DEFAULT_GAS_CAP: u64 = 50_000_000; - /// Gas per transaction not creating a contract. - pub const MIN_TRANSACTION_GAS: u64 = 21_000u64; /// Allowed error ratio for gas estimation /// Taken from Geth's implementation in order to pass the hive tests /// diff --git a/crates/rpc/rpc-server-types/src/result.rs b/crates/rpc/rpc-server-types/src/result.rs index 252c78f241..3dc76f0d8f 100644 --- a/crates/rpc/rpc-server-types/src/result.rs +++ b/crates/rpc/rpc-server-types/src/result.rs @@ -1,16 +1,18 @@ //! Additional helpers for converting errors. -use std::fmt::Display; +use std::fmt; use jsonrpsee_core::RpcResult; +use reth_primitives::BlockId; use reth_rpc_types::engine::PayloadError; /// Helper trait to easily convert various `Result` types into [`RpcResult`] pub trait ToRpcResult: Sized { - /// Converts the error of the [Result] to an [`RpcResult`] via the `Err` [Display] impl. + /// Converts result to [`RpcResult`] by converting error variant to + /// [`jsonrpsee_types::error::ErrorObject`] fn to_rpc_result(self) -> RpcResult where - Err: Display, + Err: fmt::Display, { self.map_internal_err(|err| err.to_string()) } @@ -142,12 +144,27 @@ pub fn rpc_err( code, msg.into(), data.map(|data| { - jsonrpsee_core::to_json_raw_value(&reth_primitives::hex::encode_prefixed(data)) + jsonrpsee_core::to_json_raw_value(&alloy_primitives::hex::encode_prefixed(data)) .expect("serializing String can't fail") }), ) } +/// Formats a [`BlockId`] into an error message. +pub fn block_id_to_str(id: BlockId) -> String { + match id { + BlockId::Hash(h) => { + if h.require_canonical == Some(true) { + format!("canonical hash {}", h.block_hash) + } else { + format!("hash {}", h.block_hash) + } + } + BlockId::Number(n) if n.is_number() => format!("number {n}"), + BlockId::Number(n) => format!("{n}"), + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/rpc/rpc-testing-util/Cargo.toml b/crates/rpc/rpc-testing-util/Cargo.toml index 19cef0b02d..59672595d5 100644 --- a/crates/rpc/rpc-testing-util/Cargo.toml +++ b/crates/rpc/rpc-testing-util/Cargo.toml @@ -17,6 +17,9 @@ reth-primitives.workspace = true reth-rpc-types.workspace = true reth-rpc-api = { workspace = true, features = ["client"] } +# ethereum +alloy-primitives.workspace = true + # async futures.workspace = true diff --git a/crates/rpc/rpc-testing-util/src/debug.rs b/crates/rpc/rpc-testing-util/src/debug.rs index 5bb003dac8..0ec5311fe9 100644 --- a/crates/rpc/rpc-testing-util/src/debug.rs +++ b/crates/rpc/rpc-testing-util/src/debug.rs @@ -6,9 +6,10 @@ use std::{ task::{Context, Poll}, }; +use alloy_primitives::{TxHash, B256}; use futures::{Stream, StreamExt}; use jsonrpsee::core::client::Error as RpcError; -use reth_primitives::{BlockId, TxHash, B256}; +use reth_primitives::{BlockId, Receipt}; use reth_rpc_api::{clients::DebugApiClient, EthApiClient}; use reth_rpc_types::{ trace::{ @@ -77,7 +78,7 @@ pub trait DebugApiExt { impl DebugApiExt for T where - T: EthApiClient + DebugApiClient + Sync, + T: EthApiClient + DebugApiClient + Sync, { type Provider = T; diff --git a/crates/rpc/rpc-testing-util/src/trace.rs b/crates/rpc/rpc-testing-util/src/trace.rs index db8933c0ad..5a1843935e 100644 --- a/crates/rpc/rpc-testing-util/src/trace.rs +++ b/crates/rpc/rpc-testing-util/src/trace.rs @@ -1,8 +1,9 @@ //! Helpers for testing trace calls. +use alloy_primitives::{Bytes, TxHash, B256}; use futures::{Stream, StreamExt}; use jsonrpsee::core::client::Error as RpcError; -use reth_primitives::{BlockId, Bytes, TxHash, B256}; +use reth_primitives::BlockId; use reth_rpc_api::clients::TraceApiClient; use reth_rpc_types::{ trace::{ @@ -545,7 +546,6 @@ mod tests { let mut stream = client.replay_transactions(transactions, trace_types); let mut successes = 0; let mut failures = 0; - let mut all_results = Vec::new(); assert_is_stream(&stream); @@ -554,12 +554,10 @@ mod tests { Ok((trace_result, tx_hash)) => { println!("Success for tx_hash {tx_hash:?}: {trace_result:?}"); successes += 1; - all_results.push(Ok((trace_result, tx_hash))); } Err((error, tx_hash)) => { println!("Error for tx_hash {tx_hash:?}: {error:?}"); failures += 1; - all_results.push(Err((error, tx_hash))); } } } @@ -656,7 +654,6 @@ mod tests { let mut stream = client.trace_call_stream(trace_call_request); let mut successes = 0; let mut failures = 0; - let mut all_results = Vec::new(); assert_is_stream(&stream); @@ -665,12 +662,10 @@ mod tests { Ok(trace_result) => { println!("Success: {trace_result:?}"); successes += 1; - all_results.push(Ok(trace_result)); } Err((error, request)) => { println!("Error for request {request:?}: {error:?}"); failures += 1; - all_results.push(Err((error, request))); } } } diff --git a/crates/rpc/rpc-testing-util/tests/it/main.rs b/crates/rpc/rpc-testing-util/tests/it/main.rs index 4f65af8f95..4e754ffcc2 100644 --- a/crates/rpc/rpc-testing-util/tests/it/main.rs +++ b/crates/rpc/rpc-testing-util/tests/it/main.rs @@ -1,3 +1,4 @@ +#![allow(missing_docs)] mod trace; const fn main() {} diff --git a/crates/rpc/rpc-testing-util/tests/it/trace.rs b/crates/rpc/rpc-testing-util/tests/it/trace.rs index 619c1ed433..184fc0f3e7 100644 --- a/crates/rpc/rpc-testing-util/tests/it/trace.rs +++ b/crates/rpc/rpc-testing-util/tests/it/trace.rs @@ -1,14 +1,16 @@ -use std::{collections::HashSet, time::Instant}; +//! Integration tests for the trace API. use futures::StreamExt; use jsonrpsee::http_client::HttpClientBuilder; use jsonrpsee_http_client::HttpClient; +use reth_primitives::Receipt; use reth_rpc_api_testing_util::{debug::DebugApiExt, trace::TraceApiExt, utils::parse_env_url}; use reth_rpc_eth_api::EthApiClient; use reth_rpc_types::{ trace::{filter::TraceFilter, parity::TraceType, tracerequest::TraceCallRequest}, Block, Transaction, }; +use std::{collections::HashSet, time::Instant}; /// This is intended to be run locally against a running node. /// @@ -110,7 +112,7 @@ async fn debug_trace_block_entire_chain() { let client = HttpClientBuilder::default().build(url).unwrap(); let current_block: u64 = - >::block_number(&client) + >::block_number(&client) .await .unwrap() .try_into() diff --git a/crates/rpc/rpc-types-compat/Cargo.toml b/crates/rpc/rpc-types-compat/Cargo.toml index a589ef418c..23304c51bd 100644 --- a/crates/rpc/rpc-types-compat/Cargo.toml +++ b/crates/rpc/rpc-types-compat/Cargo.toml @@ -12,9 +12,13 @@ description = "Compatibility layer for reth-primitives and ethereum RPC types" workspace = true [dependencies] +# reth reth-primitives.workspace = true reth-rpc-types.workspace = true reth-trie-common.workspace = true + +# ethereum +alloy-primitives.workspace = true alloy-rlp.workspace = true alloy-rpc-types.workspace = true diff --git a/crates/rpc/rpc-types-compat/src/block.rs b/crates/rpc/rpc-types-compat/src/block.rs index 3b3c22c83b..1981c0555c 100644 --- a/crates/rpc/rpc-types-compat/src/block.rs +++ b/crates/rpc/rpc-types-compat/src/block.rs @@ -1,10 +1,11 @@ //! Compatibility functions for rpc `Block` type. use crate::transaction::from_recovered_with_block_context; +use alloy_primitives::{B256, U256}; use alloy_rlp::Encodable; use alloy_rpc_types::{Transaction, TransactionInfo}; use reth_primitives::{ - Block as PrimitiveBlock, BlockWithSenders, Header as PrimitiveHeader, Withdrawals, B256, U256, + Block as PrimitiveBlock, BlockWithSenders, Header as PrimitiveHeader, Withdrawals, }; use reth_rpc_types::{ Block, BlockError, BlockTransactions, BlockTransactionsKind, Header, WithOtherFields, diff --git a/crates/rpc/rpc-types-compat/src/engine/payload.rs b/crates/rpc/rpc-types-compat/src/engine/payload.rs index 9b3ab75151..b72d66e49a 100644 --- a/crates/rpc/rpc-types-compat/src/engine/payload.rs +++ b/crates/rpc/rpc-types-compat/src/engine/payload.rs @@ -1,10 +1,10 @@ //! Standalone Conversion Functions for Handling Different Versions of Execution Payloads in //! Ethereum's Engine +use alloy_primitives::{B256, U256}; use reth_primitives::{ constants::{EMPTY_OMMER_ROOT_HASH, MAXIMUM_EXTRA_DATA_SIZE}, - proofs::{self}, - Block, Header, Request, SealedBlock, TransactionSigned, UintTryTo, Withdrawals, B256, U256, + proofs, Block, Header, Request, SealedBlock, TransactionSigned, Withdrawals, }; use reth_rpc_types::engine::{ payload::{ExecutionPayloadBodyV1, ExecutionPayloadFieldV2, ExecutionPayloadInputV2}, @@ -25,7 +25,7 @@ pub fn try_payload_v1_to_block(payload: ExecutionPayloadV1) -> Result, _>>()?; let transactions_root = proofs::calculate_transaction_root(&transactions); @@ -50,7 +50,7 @@ pub fn try_payload_v1_to_block(payload: ExecutionPayloadV1) -> Result, Eth::Error>>()? @@ -286,7 +286,7 @@ where .into_iter() .map(|tx| { tx.into_ecrecovered_unchecked() - .ok_or_else(|| EthApiError::InvalidTransactionSignature) + .ok_or(EthApiError::InvalidTransactionSignature) .map_err(Eth::Error::from_eth_err) }) .collect::, Eth::Error>>()? @@ -306,14 +306,14 @@ where .provider .block_hash_for_id(block_id) .map_err(Eth::Error::from_eth_err)? - .ok_or_else(|| EthApiError::UnknownBlockNumber)?; + .ok_or(EthApiError::HeaderNotFound(block_id))?; let ((cfg, block_env, _), block) = futures::try_join!( self.inner.eth_api.evm_env_at(block_hash.into()), self.inner.eth_api.block_with_senders(block_id), )?; - let block = block.ok_or_else(|| EthApiError::UnknownBlockNumber)?; + let block = block.ok_or(EthApiError::HeaderNotFound(block_id))?; // we need to get the state of the parent block because we're replaying this block on top of // its parent block's state let state_at = block.parent_hash; @@ -449,7 +449,7 @@ where Ok(inspector) }) .await?; - return Ok(FourByteFrame::from(inspector).into()) + return Ok(FourByteFrame::from(&inspector).into()) } GethDebugBuiltInTracerType::CallTracer => { let call_config = tracer_config @@ -495,7 +495,7 @@ where let frame = inspector .with_transaction_gas_limit(env.tx.gas_limit) .into_geth_builder() - .geth_prestate_traces(&res, prestate_config, db) + .geth_prestate_traces(&res, &prestate_config, db) .map_err(Eth::Error::from_eth_err)?; Ok(frame) }) @@ -529,6 +529,11 @@ where .await?; return Ok(frame) } + GethDebugBuiltInTracerType::FlatCallTracer => { + return Err( + EthApiError::Unsupported("Flatcall tracer is not supported yet").into() + ) + } }, #[cfg(not(feature = "js-tracer"))] GethDebugTracerType::JsTracer(_) => { @@ -610,7 +615,7 @@ where )?; let opts = opts.unwrap_or_default(); - let block = block.ok_or_else(|| EthApiError::UnknownBlockNumber)?; + let block = block.ok_or(EthApiError::HeaderNotFound(target_block))?; let GethDebugTracingCallOptions { tracing_options, mut state_overrides, .. } = opts; let gas_limit = self.inner.eth_api.call_gas_limit(); @@ -709,12 +714,13 @@ where pub async fn debug_execution_witness( &self, block_id: BlockNumberOrTag, - ) -> Result, Eth::Error> { + include_preimages: bool, + ) -> Result { let ((cfg, block_env, _), maybe_block) = futures::try_join!( self.inner.eth_api.evm_env_at(block_id.into()), self.inner.eth_api.block_with_senders(block_id.into()), )?; - let block = maybe_block.ok_or(EthApiError::UnknownBlockNumber)?; + let block = maybe_block.ok_or(EthApiError::HeaderNotFound(block_id.into()))?; let this = self.clone(); @@ -722,10 +728,8 @@ where .eth_api .spawn_with_state_at_block(block.parent_hash.into(), move |state| { let evm_config = Call::evm_config(this.eth_api()).clone(); - let mut db = StateBuilder::new() - .with_database(StateProviderDatabase::new(state)) - .with_bundle_update() - .build(); + let mut db = + StateBuilder::new().with_database(StateProviderDatabase::new(state)).build(); pre_block_beacon_root_contract_call( &mut db, @@ -764,17 +768,18 @@ where db.commit(res.state); } - // Merge all state transitions - db.merge_transitions(BundleRetention::Reverts); + // No need to merge transitions and create the bundle state, we will use Revm's + // cache directly. - // Take the bundle state - let bundle_state = db.take_bundle(); + // Initialize a map of preimages. + let mut state_preimages = HashMap::new(); // Grab all account proofs for the data accessed during block execution. // // Note: We grab *all* accounts in the cache here, as the `BundleState` prunes - // referenced accounts + storage slots. - let mut hashed_state = HashedPostState::from_bundle_state(&bundle_state.state); + // referenced accounts + storage slots. Cache is a superset of `BundleState`, so we + // can just query it to get the latest state of all accounts and storage slots. + let mut hashed_state = HashedPostState::default(); for (address, account) in db.cache.accounts { let hashed_address = keccak256(address); hashed_state.accounts.insert( @@ -788,9 +793,19 @@ where .or_insert_with(|| HashedStorage::new(account.status.was_destroyed())); if let Some(account) = account.account { + if include_preimages { + state_preimages + .insert(hashed_address, alloy_rlp::encode(address).into()); + } + for (slot, value) in account.storage { - let hashed_slot = keccak256(B256::from(slot)); + let slot = B256::from(slot); + let hashed_slot = keccak256(slot); storage.storage.insert(hashed_slot, value); + + if include_preimages { + state_preimages.insert(hashed_slot, alloy_rlp::encode(slot).into()); + } } } } @@ -798,10 +813,10 @@ where // Generate an execution witness for the aggregated state of accessed accounts. // Destruct the cache database to retrieve the state provider. let state_provider = db.database.into_inner(); - let witness = state_provider - .witness(HashedPostState::default(), hashed_state) - .map_err(Into::into)?; - Ok(witness) + let state = + state_provider.witness(Default::default(), hashed_state).map_err(Into::into)?; + + Ok(ExecutionWitness { state, keys: include_preimages.then_some(state_preimages) }) }) .await } @@ -829,7 +844,7 @@ where GethDebugBuiltInTracerType::FourByteTracer => { let mut inspector = FourByteInspector::default(); let (res, _) = self.eth_api().inspect(db, env, &mut inspector)?; - return Ok((FourByteFrame::from(inspector).into(), res.state)) + return Ok((FourByteFrame::from(&inspector).into(), res.state)) } GethDebugBuiltInTracerType::CallTracer => { let call_config = tracer_config @@ -862,7 +877,7 @@ where let frame = inspector .with_transaction_gas_limit(env.tx.gas_limit) .into_geth_builder() - .geth_prestate_traces(&res, prestate_config, db) + .geth_prestate_traces(&res, &prestate_config, db) .map_err(Eth::Error::from_eth_err)?; return Ok((frame.into(), res.state)) @@ -884,6 +899,11 @@ where .map_err(Eth::Error::from_eth_err)?; return Ok((frame.into(), res.state)) } + GethDebugBuiltInTracerType::FlatCallTracer => { + return Err( + EthApiError::Unsupported("Flatcall tracer is not supported yet").into() + ) + } }, #[cfg(not(feature = "js-tracer"))] GethDebugTracerType::JsTracer(_) => { @@ -967,7 +987,7 @@ where .provider .block_by_id(block_id) .to_rpc_result()? - .ok_or_else(|| EthApiError::UnknownBlockNumber)?; + .ok_or(EthApiError::HeaderNotFound(block_id))?; let mut res = Vec::new(); block.encode(&mut res); Ok(res.into()) @@ -1073,20 +1093,21 @@ where async fn debug_execution_witness( &self, block: BlockNumberOrTag, - ) -> RpcResult> { + include_preimages: bool, + ) -> RpcResult { let _permit = self.acquire_trace_permit().await; - Self::debug_execution_witness(self, block).await.map_err(Into::into) + Self::debug_execution_witness(self, block, include_preimages).await.map_err(Into::into) } /// Handler for `debug_traceCall` async fn debug_trace_call( &self, request: TransactionRequest, - block_number: Option, + block_id: Option, opts: Option, ) -> RpcResult { let _permit = self.acquire_trace_permit().await; - Self::debug_trace_call(self, request, block_number, opts.unwrap_or_default()) + Self::debug_trace_call(self, request, block_id, opts.unwrap_or_default()) .await .map_err(Into::into) } diff --git a/crates/rpc/rpc/src/engine.rs b/crates/rpc/rpc/src/engine.rs index d218342137..53df19ff9c 100644 --- a/crates/rpc/rpc/src/engine.rs +++ b/crates/rpc/rpc/src/engine.rs @@ -1,10 +1,11 @@ +use alloy_eips::{BlockId, BlockNumberOrTag}; use alloy_network::Network; +use alloy_primitives::{Address, Bytes, B256, U256, U64}; use jsonrpsee::core::RpcResult as Result; -use reth_primitives::{Address, BlockId, BlockNumberOrTag, Bytes, B256, U256, U64}; use reth_rpc_api::{EngineEthApiServer, EthApiServer, EthFilterApiServer}; /// Re-export for convenience pub use reth_rpc_engine_api::EngineApi; -use reth_rpc_eth_api::{EthApiTypes, RpcBlock, RpcTransaction}; +use reth_rpc_eth_api::{EthApiTypes, RpcBlock, RpcReceipt, RpcTransaction}; use reth_rpc_types::{ state::StateOverride, BlockOverrides, EIP1186AccountProofResponse, Filter, JsonStorageKey, Log, SyncStatus, TransactionRequest, WithOtherFields, @@ -36,8 +37,11 @@ impl EngineEthApi { impl EngineEthApiServer> for EngineEthApi where - Eth: EthApiServer, RpcBlock> - + EthApiTypes< + Eth: EthApiServer< + RpcTransaction, + RpcBlock, + RpcReceipt, + > + EthApiTypes< NetworkTypes: Network< TransactionResponse = WithOtherFields, >, @@ -69,19 +73,19 @@ where async fn call( &self, request: TransactionRequest, - block_number: Option, + block_id: Option, state_overrides: Option, block_overrides: Option>, ) -> Result { self.eth - .call(request, block_number, state_overrides, block_overrides) + .call(request, block_id, state_overrides, block_overrides) .instrument(engine_span!()) .await } /// Handler for: `eth_getCode` - async fn get_code(&self, address: Address, block_number: Option) -> Result { - self.eth.get_code(address, block_number).instrument(engine_span!()).await + async fn get_code(&self, address: Address, block_id: Option) -> Result { + self.eth.get_code(address, block_id).instrument(engine_span!()).await } /// Handler for: `eth_getBlockByHash` diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index 9cabc1f6f5..484b8dfe86 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -2,12 +2,12 @@ use std::sync::Arc; +use alloy_primitives::{keccak256, U256}; use jsonrpsee::core::RpcResult; use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; use reth_primitives::{ - keccak256, revm_primitives::db::{DatabaseCommit, DatabaseRef}, - PooledTransactionsElement, U256, + PooledTransactionsElement, }; use reth_revm::database::StateProviderDatabase; use reth_rpc_eth_api::{FromEthApiError, FromEvmError}; @@ -87,7 +87,7 @@ where .iter() .filter_map(|(tx, _)| { if let PooledTransactionsElement::BlobTransaction(tx) = tx { - Some(tx.transaction.blob_gas()) + Some(tx.transaction.tx.blob_gas()) } else { None } @@ -128,7 +128,7 @@ where let parent = LoadPendingBlock::provider(&self.inner.eth_api) .header_by_number(parent_block) .map_err(Eth::Error::from_eth_err)? - .ok_or_else(|| EthApiError::UnknownBlockNumber)?; + .ok_or(EthApiError::HeaderNotFound(parent_block.into()))?; if let Some(base_fee) = parent.next_block_base_fee( LoadPendingBlock::provider(&self.inner.eth_api) .chain_spec() diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs index c79007fe3d..73b10b05ec 100644 --- a/crates/rpc/rpc/src/eth/core.rs +++ b/crates/rpc/rpc/src/eth/core.rs @@ -4,9 +4,10 @@ use std::sync::Arc; use alloy_network::AnyNetwork; +use alloy_primitives::U256; use derive_more::Deref; use reth_node_api::{BuilderProvider, FullNodeComponents}; -use reth_primitives::{BlockNumberOrTag, U256}; +use reth_primitives::BlockNumberOrTag; use reth_provider::{BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider}; use reth_rpc_eth_api::{ helpers::{EthSigner, SpawnBlocking}, @@ -56,6 +57,7 @@ where eth_cache: EthStateCache, gas_oracle: GasPriceOracle, gas_cap: impl Into, + max_simulate_blocks: u64, eth_proof_window: u64, blocking_task_pool: BlockingTaskPool, fee_history_cache: FeeHistoryCache, @@ -69,6 +71,7 @@ where eth_cache, gas_oracle, gas_cap, + max_simulate_blocks, eth_proof_window, blocking_task_pool, fee_history_cache, @@ -106,6 +109,7 @@ where ctx.cache.clone(), ctx.new_gas_price_oracle(), ctx.config.rpc_gas_cap, + ctx.config.rpc_max_simulate_blocks, ctx.config.eth_proof_window, blocking_task_pool, ctx.new_fee_history_cache(), @@ -185,6 +189,8 @@ pub struct EthApiInner { gas_oracle: GasPriceOracle, /// Maximum gas limit for `eth_call` and call tracing RPC methods. gas_cap: u64, + /// Maximum number of blocks for `eth_simulateV1`. + max_simulate_blocks: u64, /// The maximum number of blocks into the past for generating state proofs. eth_proof_window: u64, /// The block number at which the node started @@ -217,6 +223,7 @@ where eth_cache: EthStateCache, gas_oracle: GasPriceOracle, gas_cap: impl Into, + max_simulate_blocks: u64, eth_proof_window: u64, blocking_task_pool: BlockingTaskPool, fee_history_cache: FeeHistoryCache, @@ -243,6 +250,7 @@ where eth_cache, gas_oracle, gas_cap: gas_cap.into().into(), + max_simulate_blocks, eth_proof_window, starting_block, task_spawner: Box::new(task_spawner), @@ -304,6 +312,12 @@ impl EthApiInner u64 { + self.max_simulate_blocks + } + /// Returns a handle to the gas oracle. #[inline] pub const fn gas_oracle(&self) -> &GasPriceOracle { @@ -349,11 +363,12 @@ impl EthApiInner( provider: P, ) -> EthApi { - let evm_config = EthEvmConfig::default(); - let cache = EthStateCache::spawn(provider.clone(), Default::default(), evm_config); + let evm_config = EthEvmConfig::new(provider.chain_spec()); + let cache = EthStateCache::spawn(provider.clone(), Default::default(), evm_config.clone()); let fee_history_cache = FeeHistoryCache::new(cache.clone(), FeeHistoryCacheConfig::default()); @@ -395,6 +412,7 @@ mod tests { cache.clone(), GasPriceOracle::new(provider, Default::default(), cache), gas_cap, + DEFAULT_MAX_SIMULATE_BLOCKS, DEFAULT_ETH_PROOF_WINDOW, BlockingTaskPool::build().expect("failed to build tracing pool"), fee_history_cache, @@ -490,7 +508,7 @@ mod tests { /// Invalid block range #[tokio::test] async fn test_fee_history_empty() { - let response = as EthApiServer<_, _>>::fee_history( + let response = as EthApiServer<_, _, _>>::fee_history( &build_test_eth_api(NoopProvider::default()), U64::from(1), BlockNumberOrTag::Latest, @@ -512,7 +530,7 @@ mod tests { let (eth_api, _, _) = prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); - let response = as EthApiServer<_, _>>::fee_history( + let response = as EthApiServer<_, _, _>>::fee_history( ð_api, U64::from(newest_block + 1), newest_block.into(), @@ -535,7 +553,7 @@ mod tests { let (eth_api, _, _) = prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); - let response = as EthApiServer<_, _>>::fee_history( + let response = as EthApiServer<_, _, _>>::fee_history( ð_api, U64::from(1), (newest_block + 1000).into(), @@ -558,7 +576,7 @@ mod tests { let (eth_api, _, _) = prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); - let response = as EthApiServer<_, _>>::fee_history( + let response = as EthApiServer<_, _, _>>::fee_history( ð_api, U64::from(0), newest_block.into(), diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index c75b5cd093..8f905929b7 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -9,10 +9,11 @@ use std::{ time::{Duration, Instant}, }; +use alloy_primitives::TxHash; use async_trait::async_trait; use jsonrpsee::{core::RpcResult, server::IdProvider}; use reth_chainspec::ChainInfo; -use reth_primitives::{IntoRecoveredTransaction, TransactionSignedEcRecovered, TxHash}; +use reth_primitives::{IntoRecoveredTransaction, TransactionSignedEcRecovered}; use reth_provider::{BlockIdReader, BlockReader, EvmEnvProvider, ProviderError}; use reth_rpc_eth_api::EthFilterApiServer; use reth_rpc_eth_types::{ @@ -166,11 +167,10 @@ where // Note: we need to fetch the block hashes from inclusive range // [start_block..best_block] let end_block = best_number + 1; - let block_hashes = self - .inner - .provider - .canonical_hashes_range(start_block, end_block) - .map_err(|_| EthApiError::UnknownBlockNumber)?; + let block_hashes = + self.inner.provider.canonical_hashes_range(start_block, end_block).map_err( + |_| EthApiError::HeaderRangeNotFound(start_block.into(), end_block.into()), + )?; Ok(FilterChanges::Hashes(block_hashes)) } FilterKind::Log(filter) => { @@ -369,7 +369,7 @@ where .eth_cache .get_receipts(block_hash) .await? - .ok_or_else(|| EthApiError::UnknownBlockNumber)?; + .ok_or(EthApiError::HeaderNotFound(block_hash.into()))?; let mut all_logs = Vec::new(); let filter = FilteredParams::new(Some(filter)); diff --git a/crates/rpc/rpc/src/eth/helpers/block.rs b/crates/rpc/rpc/src/eth/helpers/block.rs index 92f8d5e034..5e16ce64d8 100644 --- a/crates/rpc/rpc/src/eth/helpers/block.rs +++ b/crates/rpc/rpc/src/eth/helpers/block.rs @@ -1,8 +1,13 @@ //! Contains RPC handler implementations specific to blocks. +use reth_primitives::{BlockId, TransactionMeta}; use reth_provider::{BlockReaderIdExt, HeaderProvider}; -use reth_rpc_eth_api::helpers::{EthBlocks, LoadBlock, LoadPendingBlock, SpawnBlocking}; -use reth_rpc_eth_types::EthStateCache; +use reth_rpc_eth_api::{ + helpers::{EthBlocks, LoadBlock, LoadPendingBlock, LoadReceipt, SpawnBlocking}, + FromEthApiError, +}; +use reth_rpc_eth_types::{EthStateCache, ReceiptBuilder}; +use reth_rpc_types::AnyTransactionReceipt; use crate::EthApi; @@ -15,6 +20,48 @@ where fn provider(&self) -> impl HeaderProvider { self.inner.provider() } + + async fn block_receipts( + &self, + block_id: BlockId, + ) -> Result>, Self::Error> + where + Self: LoadReceipt, + { + if let Some((block, receipts)) = self.load_block_and_receipts(block_id).await? { + let block_number = block.number; + let base_fee = block.base_fee_per_gas; + let block_hash = block.hash(); + let excess_blob_gas = block.excess_blob_gas; + let timestamp = block.timestamp; + let block = block.unseal(); + + let receipts = block + .body + .into_iter() + .zip(receipts.iter()) + .enumerate() + .map(|(idx, (tx, receipt))| { + let meta = TransactionMeta { + tx_hash: tx.hash, + index: idx as u64, + block_hash, + block_number, + base_fee, + excess_blob_gas, + timestamp, + }; + + ReceiptBuilder::new(&tx, meta, receipt, &receipts) + .map(|builder| builder.build()) + .map_err(Self::Error::from_eth_err) + }) + .collect::, Self::Error>>(); + return receipts.map(Some) + } + + Ok(None) + } } impl LoadBlock for EthApi diff --git a/crates/rpc/rpc/src/eth/helpers/call.rs b/crates/rpc/rpc/src/eth/helpers/call.rs index c442c46b4b..396bf9bd08 100644 --- a/crates/rpc/rpc/src/eth/helpers/call.rs +++ b/crates/rpc/rpc/src/eth/helpers/call.rs @@ -1,6 +1,7 @@ //! Contains RPC handler implementations specific to endpoints that call/execute within evm. use reth_evm::ConfigureEvm; +use reth_primitives::Header; use reth_rpc_eth_api::helpers::{Call, EthCall, LoadPendingBlock, LoadState, SpawnBlocking}; use crate::EthApi; @@ -13,7 +14,7 @@ impl EthCall for EthApi Call for EthApi where Self: LoadState + SpawnBlocking, - EvmConfig: ConfigureEvm, + EvmConfig: ConfigureEvm
, { #[inline] fn call_gas_limit(&self) -> u64 { @@ -21,7 +22,12 @@ where } #[inline] - fn evm_config(&self) -> &impl ConfigureEvm { + fn max_simulate_blocks(&self) -> u64 { + self.inner.max_simulate_blocks() + } + + #[inline] + fn evm_config(&self) -> &impl ConfigureEvm
{ self.inner.evm_config() } } diff --git a/crates/rpc/rpc/src/eth/helpers/pending_block.rs b/crates/rpc/rpc/src/eth/helpers/pending_block.rs index 0660a92da7..04775a718b 100644 --- a/crates/rpc/rpc/src/eth/helpers/pending_block.rs +++ b/crates/rpc/rpc/src/eth/helpers/pending_block.rs @@ -2,6 +2,7 @@ use reth_chainspec::ChainSpec; use reth_evm::ConfigureEvm; +use reth_primitives::Header; use reth_provider::{BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProviderFactory}; use reth_rpc_eth_api::helpers::{LoadPendingBlock, SpawnBlocking}; use reth_rpc_eth_types::PendingBlock; @@ -18,7 +19,7 @@ where + ChainSpecProvider + StateProviderFactory, Pool: TransactionPool, - EvmConfig: ConfigureEvm, + EvmConfig: ConfigureEvm
, { #[inline] fn provider( @@ -41,7 +42,7 @@ where } #[inline] - fn evm_config(&self) -> &impl ConfigureEvm { + fn evm_config(&self) -> &impl ConfigureEvm
{ self.inner.evm_config() } } diff --git a/crates/rpc/rpc/src/eth/helpers/receipt.rs b/crates/rpc/rpc/src/eth/helpers/receipt.rs index db1fee781f..77a4058240 100644 --- a/crates/rpc/rpc/src/eth/helpers/receipt.rs +++ b/crates/rpc/rpc/src/eth/helpers/receipt.rs @@ -1,7 +1,9 @@ //! Builds an RPC receipt response w.r.t. data layout of network. -use reth_rpc_eth_api::helpers::LoadReceipt; -use reth_rpc_eth_types::EthStateCache; +use reth_primitives::{Receipt, TransactionMeta, TransactionSigned}; +use reth_rpc_eth_api::{helpers::LoadReceipt, FromEthApiError}; +use reth_rpc_eth_types::{EthApiError, EthStateCache, ReceiptBuilder}; +use reth_rpc_types::AnyTransactionReceipt; use crate::EthApi; @@ -13,4 +15,22 @@ where fn cache(&self) -> &EthStateCache { self.inner.cache() } + + async fn build_transaction_receipt( + &self, + tx: TransactionSigned, + meta: TransactionMeta, + receipt: Receipt, + ) -> Result { + let hash = meta.block_hash; + // get all receipts for the block + let all_receipts = self + .cache() + .get_receipts(hash) + .await + .map_err(Self::Error::from_eth_err)? + .ok_or(EthApiError::HeaderNotFound(hash.into()))?; + + Ok(ReceiptBuilder::new(&tx, meta, &receipt, &all_receipts)?.build()) + } } diff --git a/crates/rpc/rpc/src/eth/helpers/signer.rs b/crates/rpc/rpc/src/eth/helpers/signer.rs index d7e075d332..960c3b6ca2 100644 --- a/crates/rpc/rpc/src/eth/helpers/signer.rs +++ b/crates/rpc/rpc/src/eth/helpers/signer.rs @@ -3,9 +3,8 @@ use std::collections::HashMap; use alloy_dyn_abi::TypedData; -use reth_primitives::{ - eip191_hash_message, sign_message, Address, Signature, TransactionSigned, B256, -}; +use alloy_primitives::{eip191_hash_message, Address, B256}; +use reth_primitives::{sign_message, Signature, TransactionSigned}; use reth_rpc_eth_api::helpers::{signer::Result, AddDevSigners, EthSigner}; use reth_rpc_eth_types::SignError; use reth_rpc_types::TypedTransactionRequest; @@ -104,7 +103,7 @@ impl EthSigner for DevSigner { mod tests { use std::str::FromStr; - use reth_primitives::U256; + use alloy_primitives::U256; use super::*; diff --git a/crates/rpc/rpc/src/eth/helpers/spec.rs b/crates/rpc/rpc/src/eth/helpers/spec.rs index 202587a3e8..1f42f09d77 100644 --- a/crates/rpc/rpc/src/eth/helpers/spec.rs +++ b/crates/rpc/rpc/src/eth/helpers/spec.rs @@ -1,6 +1,6 @@ +use alloy_primitives::U256; use reth_chainspec::ChainSpec; use reth_network_api::NetworkInfo; -use reth_primitives::U256; use reth_provider::{BlockNumReader, ChainSpecProvider, StageCheckpointReader}; use reth_rpc_eth_api::helpers::EthApiSpec; use reth_transaction_pool::TransactionPool; diff --git a/crates/rpc/rpc/src/eth/helpers/state.rs b/crates/rpc/rpc/src/eth/helpers/state.rs index 059db3fe59..c0696adc53 100644 --- a/crates/rpc/rpc/src/eth/helpers/state.rs +++ b/crates/rpc/rpc/src/eth/helpers/state.rs @@ -43,25 +43,28 @@ where #[cfg(test)] mod tests { use super::*; + use alloy_primitives::{Address, StorageKey, StorageValue, U256}; + use reth_chainspec::MAINNET; use reth_evm_ethereum::EthEvmConfig; - use reth_primitives::{ - constants::ETHEREUM_BLOCK_GAS_LIMIT, Address, StorageKey, StorageValue, KECCAK_EMPTY, U256, - }; + use reth_primitives::{constants::ETHEREUM_BLOCK_GAS_LIMIT, KECCAK_EMPTY}; use reth_provider::test_utils::{ExtendedAccount, MockEthProvider, NoopProvider}; use reth_rpc_eth_api::helpers::EthState; use reth_rpc_eth_types::{ EthStateCache, FeeHistoryCache, FeeHistoryCacheConfig, GasPriceOracle, }; - use reth_rpc_server_types::constants::{DEFAULT_ETH_PROOF_WINDOW, DEFAULT_PROOF_PERMITS}; + use reth_rpc_server_types::constants::{ + DEFAULT_ETH_PROOF_WINDOW, DEFAULT_MAX_SIMULATE_BLOCKS, DEFAULT_PROOF_PERMITS, + }; use reth_tasks::pool::BlockingTaskPool; use reth_transaction_pool::test_utils::{testing_pool, TestPool}; use std::collections::HashMap; fn noop_eth_api() -> EthApi { let pool = testing_pool(); - let evm_config = EthEvmConfig::default(); + let evm_config = EthEvmConfig::new(MAINNET.clone()); - let cache = EthStateCache::spawn(NoopProvider::default(), Default::default(), evm_config); + let cache = + EthStateCache::spawn(NoopProvider::default(), Default::default(), evm_config.clone()); EthApi::new( NoopProvider::default(), pool, @@ -69,6 +72,7 @@ mod tests { cache.clone(), GasPriceOracle::new(NoopProvider::default(), Default::default(), cache.clone()), ETHEREUM_BLOCK_GAS_LIMIT, + DEFAULT_MAX_SIMULATE_BLOCKS, DEFAULT_ETH_PROOF_WINDOW, BlockingTaskPool::build().expect("failed to build tracing pool"), FeeHistoryCache::new(cache, FeeHistoryCacheConfig::default()), @@ -81,12 +85,13 @@ mod tests { accounts: HashMap, ) -> EthApi { let pool = testing_pool(); - let evm_config = EthEvmConfig::default(); - let mock_provider = MockEthProvider::default(); + + let evm_config = EthEvmConfig::new(mock_provider.chain_spec()); mock_provider.extend_accounts(accounts); - let cache = EthStateCache::spawn(mock_provider.clone(), Default::default(), evm_config); + let cache = + EthStateCache::spawn(mock_provider.clone(), Default::default(), evm_config.clone()); EthApi::new( mock_provider.clone(), pool, @@ -94,6 +99,7 @@ mod tests { cache.clone(), GasPriceOracle::new(mock_provider, Default::default(), cache.clone()), ETHEREUM_BLOCK_GAS_LIMIT, + DEFAULT_MAX_SIMULATE_BLOCKS, DEFAULT_ETH_PROOF_WINDOW, BlockingTaskPool::build().expect("failed to build tracing pool"), FeeHistoryCache::new(cache, FeeHistoryCacheConfig::default()), diff --git a/crates/rpc/rpc/src/eth/helpers/trace.rs b/crates/rpc/rpc/src/eth/helpers/trace.rs index fe1ee9f13c..c40b7acf50 100644 --- a/crates/rpc/rpc/src/eth/helpers/trace.rs +++ b/crates/rpc/rpc/src/eth/helpers/trace.rs @@ -1,6 +1,7 @@ //! Contains RPC handler implementations specific to tracing. use reth_evm::ConfigureEvm; +use reth_primitives::Header; use reth_rpc_eth_api::helpers::{LoadState, Trace}; use crate::EthApi; @@ -8,10 +9,10 @@ use crate::EthApi; impl Trace for EthApi where Self: LoadState, - EvmConfig: ConfigureEvm, + EvmConfig: ConfigureEvm
, { #[inline] - fn evm_config(&self) -> &impl ConfigureEvm { + fn evm_config(&self) -> &impl ConfigureEvm
{ self.inner.evm_config() } } diff --git a/crates/rpc/rpc/src/eth/helpers/transaction.rs b/crates/rpc/rpc/src/eth/helpers/transaction.rs index 42af336811..17eff7b357 100644 --- a/crates/rpc/rpc/src/eth/helpers/transaction.rs +++ b/crates/rpc/rpc/src/eth/helpers/transaction.rs @@ -52,15 +52,19 @@ where #[cfg(test)] mod tests { + use alloy_primitives::{hex_literal::hex, Bytes}; + use reth_chainspec::ChainSpecProvider; use reth_evm_ethereum::EthEvmConfig; use reth_network_api::noop::NoopNetwork; - use reth_primitives::{constants::ETHEREUM_BLOCK_GAS_LIMIT, hex_literal::hex, Bytes}; + use reth_primitives::constants::ETHEREUM_BLOCK_GAS_LIMIT; use reth_provider::test_utils::NoopProvider; use reth_rpc_eth_api::helpers::EthTransactions; use reth_rpc_eth_types::{ EthStateCache, FeeHistoryCache, FeeHistoryCacheConfig, GasPriceOracle, }; - use reth_rpc_server_types::constants::{DEFAULT_ETH_PROOF_WINDOW, DEFAULT_PROOF_PERMITS}; + use reth_rpc_server_types::constants::{ + DEFAULT_ETH_PROOF_WINDOW, DEFAULT_MAX_SIMULATE_BLOCKS, DEFAULT_PROOF_PERMITS, + }; use reth_tasks::pool::BlockingTaskPool; use reth_transaction_pool::{test_utils::testing_pool, TransactionPool}; @@ -73,8 +77,8 @@ mod tests { let pool = testing_pool(); - let evm_config = EthEvmConfig::default(); - let cache = EthStateCache::spawn(noop_provider, Default::default(), evm_config); + let evm_config = EthEvmConfig::new(noop_provider.chain_spec()); + let cache = EthStateCache::spawn(noop_provider, Default::default(), evm_config.clone()); let fee_history_cache = FeeHistoryCache::new(cache.clone(), FeeHistoryCacheConfig::default()); let eth_api = EthApi::new( @@ -84,6 +88,7 @@ mod tests { cache.clone(), GasPriceOracle::new(noop_provider, Default::default(), cache.clone()), ETHEREUM_BLOCK_GAS_LIMIT, + DEFAULT_MAX_SIMULATE_BLOCKS, DEFAULT_ETH_PROOF_WINDOW, BlockingTaskPool::build().expect("failed to build tracing pool"), fee_history_cache, diff --git a/crates/rpc/rpc/src/eth/pubsub.rs b/crates/rpc/rpc/src/eth/pubsub.rs index d84f0f263a..64fe17e171 100644 --- a/crates/rpc/rpc/src/eth/pubsub.rs +++ b/crates/rpc/rpc/src/eth/pubsub.rs @@ -2,12 +2,13 @@ use std::sync::Arc; +use alloy_primitives::TxHash; use futures::StreamExt; use jsonrpsee::{ server::SubscriptionMessage, types::ErrorObject, PendingSubscriptionSink, SubscriptionSink, }; use reth_network_api::NetworkInfo; -use reth_primitives::{IntoRecoveredTransaction, TxHash}; +use reth_primitives::IntoRecoveredTransaction; use reth_provider::{BlockReader, CanonStateSubscriptions, EvmEnvProvider}; use reth_rpc_eth_api::pubsub::EthPubSubApiServer; use reth_rpc_eth_types::logs_utils; diff --git a/crates/rpc/rpc/src/net.rs b/crates/rpc/rpc/src/net.rs index 2900f84896..75e9462e66 100644 --- a/crates/rpc/rpc/src/net.rs +++ b/crates/rpc/rpc/src/net.rs @@ -1,6 +1,6 @@ +use alloy_primitives::U64; use jsonrpsee::core::RpcResult as Result; use reth_network_api::PeersInfo; -use reth_primitives::U64; use reth_rpc_api::NetApiServer; use reth_rpc_eth_api::helpers::EthApiSpec; diff --git a/crates/rpc/rpc/src/otterscan.rs b/crates/rpc/rpc/src/otterscan.rs index 06b95c2e14..94bc8c2d13 100644 --- a/crates/rpc/rpc/src/otterscan.rs +++ b/crates/rpc/rpc/src/otterscan.rs @@ -1,10 +1,13 @@ +use alloy_eips::{BlockId, BlockNumberOrTag}; use alloy_network::Network; -use alloy_primitives::Bytes; +use alloy_primitives::{Address, Bytes, TxHash, B256, U256}; use async_trait::async_trait; use jsonrpsee::{core::RpcResult, types::ErrorObjectOwned}; -use reth_primitives::{Address, BlockNumberOrTag, TxHash, B256, U256}; use reth_rpc_api::{EthApiServer, OtterscanServer}; -use reth_rpc_eth_api::{helpers::TraceExt, EthApiTypes, RpcBlock, RpcTransaction}; +use reth_rpc_eth_api::{ + helpers::{EthTransactions, TraceExt}, + EthApiTypes, RpcBlock, RpcReceipt, RpcTransaction, +}; use reth_rpc_eth_types::{utils::binary_search, EthApiError}; use reth_rpc_server_types::result::internal_rpc_err; use reth_rpc_types::{ @@ -41,18 +44,18 @@ impl OtterscanApi { impl OtterscanApi where Eth: EthApiTypes< - NetworkTypes: Network>, + NetworkTypes: Network< + TransactionResponse = WithOtherFields, + ReceiptResponse = AnyTransactionReceipt, + >, >, { /// Constructs a `BlockDetails` from a block and its receipts. fn block_details( &self, - block: Option>, - receipts: Option>, + block: RpcBlock, + receipts: Vec>, ) -> RpcResult { - let block = block.ok_or_else(|| EthApiError::UnknownBlockNumber)?; - let receipts = receipts.ok_or_else(|| EthApiError::UnknownBlockNumber)?; - // blob fee is burnt, so we don't need to calculate it let total_fees = receipts .iter() @@ -66,12 +69,17 @@ where #[async_trait] impl OtterscanServer for OtterscanApi where - Eth: EthApiServer, RpcBlock> - + EthApiTypes< + Eth: EthApiServer< + RpcTransaction, + RpcBlock, + RpcReceipt, + > + EthApiTypes< NetworkTypes: Network< TransactionResponse = WithOtherFields, + ReceiptResponse = AnyTransactionReceipt, >, > + TraceExt + + EthTransactions + 'static, { /// Handler for `{ots,erigon}_getHeaderByNumber` @@ -80,8 +88,8 @@ where } /// Handler for `ots_hasCode` - async fn has_code(&self, address: Address, block_number: Option) -> RpcResult { - self.eth.get_code(address, block_number.map(Into::into)).await.map(|code| !code.is_empty()) + async fn has_code(&self, address: Address, block_id: Option) -> RpcResult { + EthApiServer::get_code(&self.eth, address, block_id).await.map(|code| !code.is_empty()) } /// Handler for `ots_getApiLevel` @@ -109,7 +117,9 @@ where value: op.value, r#type: match op.kind { TransferKind::Call => OperationType::OpTransfer, - TransferKind::Create => OperationType::OpCreate, + TransferKind::Create | TransferKind::EofCreate => { + OperationType::OpCreate + } TransferKind::Create2 => OperationType::OpCreate2, TransferKind::SelfDestruct => OperationType::OpSelfDestruct, }, @@ -168,18 +178,27 @@ where /// Handler for `ots_getBlockDetails` async fn get_block_details(&self, block_number: u64) -> RpcResult { - let block = self.eth.block_by_number(block_number.into(), true); - let receipts = self.eth.block_receipts(block_number.into()); + let block_id = block_number.into(); + let block = self.eth.block_by_number(block_id, true); + let block_id = block_id.into(); + let receipts = self.eth.block_receipts(block_id); let (block, receipts) = futures::try_join!(block, receipts)?; - self.block_details(block, receipts) + self.block_details( + block.ok_or(EthApiError::HeaderNotFound(block_id))?, + receipts.ok_or(EthApiError::ReceiptsNotFound(block_id))?, + ) } /// Handler for `getBlockDetailsByHash` async fn get_block_details_by_hash(&self, block_hash: B256) -> RpcResult { let block = self.eth.block_by_hash(block_hash, true); - let receipts = self.eth.block_receipts(block_hash.into()); + let block_id = block_hash.into(); + let receipts = self.eth.block_receipts(block_id); let (block, receipts) = futures::try_join!(block, receipts)?; - self.block_details(block, receipts) + self.block_details( + block.ok_or(EthApiError::HeaderNotFound(block_id))?, + receipts.ok_or(EthApiError::ReceiptsNotFound(block_id))?, + ) } /// Handler for `getBlockTransactions` @@ -189,13 +208,15 @@ where page_number: usize, page_size: usize, ) -> RpcResult>> { + let block_id = block_number.into(); // retrieve full block and its receipts - let block = self.eth.block_by_number(block_number.into(), true); - let receipts = self.eth.block_receipts(block_number.into()); + let block = self.eth.block_by_number(block_id, true); + let block_id = block_id.into(); + let receipts = self.eth.block_receipts(block_id); let (block, receipts) = futures::try_join!(block, receipts)?; - let mut block = block.ok_or_else(|| EthApiError::UnknownBlockNumber)?; - let mut receipts = receipts.ok_or_else(|| internal_rpc_err("receipts not found"))?; + let mut block = block.ok_or(EthApiError::HeaderNotFound(block_id))?; + let mut receipts = receipts.ok_or(EthApiError::ReceiptsNotFound(block_id))?; // check if the number of transactions matches the number of receipts let tx_len = block.transactions.len(); @@ -279,51 +300,11 @@ where sender: Address, nonce: u64, ) -> RpcResult> { - // Check if the sender is a contract - if self.has_code(sender, None).await? { - return Ok(None) - } - - let highest = - EthApiServer::transaction_count(&self.eth, sender, None).await?.saturating_to::(); - - // If the nonce is higher or equal to the highest nonce, the transaction is pending or not - // exists. - if nonce >= highest { - return Ok(None) - } - - // perform a binary search over the block range to find the block in which the sender's - // nonce reached the requested nonce. - let num = binary_search::<_, _, ErrorObjectOwned>( - 1, - self.eth.block_number()?.saturating_to(), - |mid| { - async move { - let mid_nonce = - EthApiServer::transaction_count(&self.eth, sender, Some(mid.into())) - .await? - .saturating_to::(); - - // The `transaction_count` returns the `nonce` after the transaction was - // executed, which is the state of the account after the block, and we need to - // find the transaction whose nonce is the pre-state, so - // need to compare with `nonce`(no equal). - Ok(mid_nonce > nonce) - } - }, - ) - .await?; - - let Some(BlockTransactions::Full(transactions)) = - self.eth.block_by_number(num.into(), true).await?.map(|block| block.transactions) - else { - return Err(EthApiError::UnknownBlockNumber.into()); - }; - - Ok(transactions - .into_iter() - .find(|tx| *tx.from == *sender && tx.nonce == nonce) + Ok(self + .eth + .get_transaction_by_sender_and_nonce(sender, nonce, false) + .await + .map_err(Into::into)? .map(|tx| tx.hash)) } @@ -338,7 +319,9 @@ where self.eth.block_number()?.saturating_to(), |mid| { Box::pin(async move { - Ok(!self.eth.get_code(address, Some(mid.into())).await?.is_empty()) + Ok(!EthApiServer::get_code(&self.eth, address, Some(mid.into())) + .await? + .is_empty()) }) }, ) @@ -371,7 +354,7 @@ where ) if contract == address => Some(ContractCreator { hash: tx_trace .transaction_hash - .ok_or_else(|| EthApiError::TransactionNotFound)?, + .ok_or(EthApiError::TransactionNotFound)?, creator, }), _ => None, diff --git a/crates/rpc/rpc/src/reth.rs b/crates/rpc/rpc/src/reth.rs index 33dc749204..3e8e842aed 100644 --- a/crates/rpc/rpc/src/reth.rs +++ b/crates/rpc/rpc/src/reth.rs @@ -1,9 +1,10 @@ use std::{collections::HashMap, future::Future, sync::Arc}; +use alloy_primitives::{Address, U256}; use async_trait::async_trait; use jsonrpsee::core::RpcResult; use reth_errors::RethResult; -use reth_primitives::{Address, BlockId, U256}; +use reth_primitives::BlockId; use reth_provider::{BlockReaderIdExt, ChangeSetReader, StateProviderFactory}; use reth_rpc_api::RethApiServer; use reth_rpc_eth_types::{EthApiError, EthResult}; @@ -64,7 +65,7 @@ where fn try_balance_changes_in_block(&self, block_id: BlockId) -> EthResult> { let Some(block_number) = self.provider().block_number_for_id(block_id)? else { - return Err(EthApiError::UnknownBlockNumber) + return Err(EthApiError::HeaderNotFound(block_id)) }; let state = self.provider().state_by_block_id(block_id)?; diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index cb9f0ab269..83e41d9a92 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -1,5 +1,6 @@ use std::{collections::HashSet, sync::Arc}; +use alloy_primitives::{Bytes, B256, U256}; use async_trait::async_trait; use jsonrpsee::core::RpcResult; use reth_chainspec::{ChainSpec, EthereumHardforks}; @@ -7,7 +8,7 @@ use reth_consensus_common::calc::{ base_block_reward, base_block_reward_pre_merge, block_reward, ommer_reward, }; use reth_evm::ConfigureEvmEnv; -use reth_primitives::{BlockId, Bytes, Header, B256, U256}; +use reth_primitives::{BlockId, Header}; use reth_provider::{BlockReader, ChainSpecProvider, EvmEnvProvider, StateProviderFactory}; use reth_revm::database::StateProviderDatabase; use reth_rpc_api::TraceApiServer; @@ -213,7 +214,7 @@ where }) .await .transpose() - .ok_or_else(|| EthApiError::TransactionNotFound)? + .ok_or(EthApiError::TransactionNotFound)? } /// Returns transaction trace objects at the given index diff --git a/crates/rpc/rpc/src/txpool.rs b/crates/rpc/rpc/src/txpool.rs index 028cb288a6..db63167e39 100644 --- a/crates/rpc/rpc/src/txpool.rs +++ b/crates/rpc/rpc/src/txpool.rs @@ -1,6 +1,7 @@ +use alloy_primitives::Address; use async_trait::async_trait; use jsonrpsee::core::RpcResult as Result; -use reth_primitives::{Address, TransactionSignedEcRecovered}; +use reth_primitives::TransactionSignedEcRecovered; use reth_rpc_api::TxPoolApiServer; use reth_rpc_types::{ txpool::{TxpoolContent, TxpoolContentFrom, TxpoolInspect, TxpoolInspectSummary, TxpoolStatus}, diff --git a/crates/rpc/rpc/src/web3.rs b/crates/rpc/rpc/src/web3.rs index 787604e25e..8a890efbe7 100644 --- a/crates/rpc/rpc/src/web3.rs +++ b/crates/rpc/rpc/src/web3.rs @@ -1,7 +1,7 @@ +use alloy_primitives::{keccak256, Bytes, B256}; use async_trait::async_trait; use jsonrpsee::core::RpcResult; use reth_network_api::NetworkInfo; -use reth_primitives::{keccak256, Bytes, B256}; use reth_rpc_api::Web3ApiServer; use reth_rpc_server_types::ToRpcResult; diff --git a/crates/stages/api/Cargo.toml b/crates/stages/api/Cargo.toml index a5db5b9fb2..8b74b8c5ae 100644 --- a/crates/stages/api/Cargo.toml +++ b/crates/stages/api/Cargo.toml @@ -23,6 +23,7 @@ reth-prune.workspace = true reth-errors.workspace = true reth-stages-types.workspace = true reth-static-file-types.workspace = true +reth-node-types.workspace = true alloy-primitives.workspace = true diff --git a/crates/stages/api/src/metrics/listener.rs b/crates/stages/api/src/metrics/listener.rs index e37eaa3d72..aba001a92f 100644 --- a/crates/stages/api/src/metrics/listener.rs +++ b/crates/stages/api/src/metrics/listener.rs @@ -1,6 +1,5 @@ use crate::{metrics::SyncMetrics, StageCheckpoint, StageId}; use alloy_primitives::BlockNumber; -use reth_primitives_traits::constants::MEGAGAS; use std::{ future::Future, pin::Pin, @@ -30,11 +29,6 @@ pub enum MetricEvent { /// If specified, `entities_total` metric is updated. max_block_number: Option, }, - /// Execution stage processed some amount of gas. - ExecutionStageGas { - /// Gas processed. - gas: u64, - }, } /// Metrics routine that listens to new metric events on the `events_rx` receiver. @@ -82,9 +76,6 @@ impl MetricsListener { stage_metrics.entities_total.set(total as f64); } } - MetricEvent::ExecutionStageGas { gas } => { - self.sync_metrics.execution_stage.mgas_processed_total.increment(gas / MEGAGAS) - } } } } diff --git a/crates/stages/api/src/metrics/sync_metrics.rs b/crates/stages/api/src/metrics/sync_metrics.rs index 3ee2964ea7..b89d7b8822 100644 --- a/crates/stages/api/src/metrics/sync_metrics.rs +++ b/crates/stages/api/src/metrics/sync_metrics.rs @@ -1,14 +1,10 @@ use crate::StageId; -use reth_metrics::{ - metrics::{Counter, Gauge}, - Metrics, -}; +use reth_metrics::{metrics::Gauge, Metrics}; use std::collections::HashMap; #[derive(Debug, Default)] pub(crate) struct SyncMetrics { pub(crate) stages: HashMap, - pub(crate) execution_stage: ExecutionStageMetrics, } impl SyncMetrics { @@ -31,11 +27,3 @@ pub(crate) struct StageMetrics { /// The number of total entities of the last commit for a stage, if applicable. pub(crate) entities_total: Gauge, } - -/// Execution stage metrics. -#[derive(Metrics)] -#[metrics(scope = "sync.execution")] -pub(crate) struct ExecutionStageMetrics { - /// The total amount of gas processed (in millions) - pub(crate) mgas_processed_total: Counter, -} diff --git a/crates/stages/api/src/pipeline/builder.rs b/crates/stages/api/src/pipeline/builder.rs index 1e83af4c3c..8493504939 100644 --- a/crates/stages/api/src/pipeline/builder.rs +++ b/crates/stages/api/src/pipeline/builder.rs @@ -1,6 +1,7 @@ use crate::{pipeline::BoxedStage, MetricEventsSender, Pipeline, Stage, StageId, StageSet}; use alloy_primitives::{BlockNumber, B256}; use reth_db_api::database::Database; +use reth_node_types::NodeTypesWithDB; use reth_provider::ProviderFactory; use reth_static_file::StaticFileProducer; use tokio::sync::watch; @@ -68,11 +69,11 @@ where } /// Builds the final [`Pipeline`] using the given database. - pub fn build( + pub fn build>( self, - provider_factory: ProviderFactory, - static_file_producer: StaticFileProducer, - ) -> Pipeline { + provider_factory: ProviderFactory, + static_file_producer: StaticFileProducer>, + ) -> Pipeline { let Self { stages, max_block, tip_tx, metrics_tx } = self; Pipeline { provider_factory, diff --git a/crates/stages/api/src/pipeline/mod.rs b/crates/stages/api/src/pipeline/mod.rs index b2cdbfa1ac..a94112396a 100644 --- a/crates/stages/api/src/pipeline/mod.rs +++ b/crates/stages/api/src/pipeline/mod.rs @@ -5,11 +5,12 @@ use crate::{PipelineTarget, StageCheckpoint, StageId}; use alloy_primitives::{BlockNumber, B256}; pub use event::*; use futures_util::Future; -use reth_db_api::database::Database; +use reth_node_types::NodeTypesWithDB; use reth_primitives_traits::constants::BEACON_CONSENSUS_REORG_UNWIND_DEPTH; use reth_provider::{ - writer::UnifiedStorageWriter, FinalizedBlockReader, FinalizedBlockWriter, ProviderFactory, - StageCheckpointReader, StageCheckpointWriter, StaticFileProviderFactory, + providers::ProviderNodeTypes, writer::UnifiedStorageWriter, FinalizedBlockReader, + FinalizedBlockWriter, ProviderFactory, StageCheckpointReader, StageCheckpointWriter, + StaticFileProviderFactory, }; use reth_prune::PrunerBuilder; use reth_static_file::StaticFileProducer; @@ -36,10 +37,10 @@ pub(crate) type BoxedStage = Box>; /// The future that returns the owned pipeline and the result of the pipeline run. See /// [`Pipeline::run_as_fut`]. -pub type PipelineFut = Pin> + Send>>; +pub type PipelineFut = Pin> + Send>>; /// The pipeline type itself with the result of [`Pipeline::run_as_fut`] -pub type PipelineWithResult = (Pipeline, Result); +pub type PipelineWithResult = (Pipeline, Result); #[cfg_attr(doc, aquamarine::aquamarine)] /// A staged sync pipeline. @@ -63,14 +64,14 @@ pub type PipelineWithResult = (Pipeline, Result { +pub struct Pipeline { /// Provider factory. - provider_factory: ProviderFactory, + provider_factory: ProviderFactory, /// All configured stages in the order they will be executed. - stages: Vec>, + stages: Vec>, /// The maximum block number to sync to. max_block: Option, - static_file_producer: StaticFileProducer, + static_file_producer: StaticFileProducer>, /// Sender for events the pipeline emits. event_sender: EventSender, /// Keeps track of the progress of the pipeline. @@ -80,12 +81,9 @@ pub struct Pipeline { metrics_tx: Option, } -impl Pipeline -where - DB: Database + 'static, -{ +impl Pipeline { /// Construct a pipeline using a [`PipelineBuilder`]. - pub fn builder() -> PipelineBuilder { + pub fn builder() -> PipelineBuilder { PipelineBuilder::default() } @@ -107,7 +105,9 @@ where pub fn events(&self) -> EventStream { self.event_sender.new_listener() } +} +impl Pipeline { /// Registers progress metrics for each registered stage pub fn register_metrics(&mut self) -> Result<(), PipelineError> { let Some(metrics_tx) = &mut self.metrics_tx else { return Ok(()) }; @@ -127,7 +127,7 @@ where /// Consume the pipeline and run it until it reaches the provided tip, if set. Return the /// pipeline and its result as a future. #[track_caller] - pub fn run_as_fut(mut self, target: Option) -> PipelineFut { + pub fn run_as_fut(mut self, target: Option) -> PipelineFut { // TODO: fix this in a follow up PR. ideally, consensus engine would be responsible for // updating metrics. let _ = self.register_metrics(); // ignore error @@ -487,8 +487,8 @@ where } } -fn on_stage_error( - factory: &ProviderFactory, +fn on_stage_error( + factory: &ProviderFactory, stage_id: StageId, prev_checkpoint: Option, err: StageError, @@ -574,7 +574,7 @@ fn on_stage_error( } } -impl std::fmt::Debug for Pipeline { +impl std::fmt::Debug for Pipeline { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("Pipeline") .field("stages", &self.stages.iter().map(|stage| stage.id()).collect::>()) @@ -591,7 +591,7 @@ mod tests { use assert_matches::assert_matches; use reth_consensus::ConsensusError; use reth_errors::ProviderError; - use reth_provider::test_utils::create_test_provider_factory; + use reth_provider::test_utils::{create_test_provider_factory, MockNodeTypesWithDB}; use reth_prune::PruneModes; use reth_testing_utils::{generators, generators::random_header}; use tokio_stream::StreamExt; @@ -628,7 +628,7 @@ mod tests { async fn run_pipeline() { let provider_factory = create_test_provider_factory(); - let mut pipeline = Pipeline::builder() + let mut pipeline = Pipeline::::builder() .add_stage( TestStage::new(StageId::Other("A")) .add_exec(Ok(ExecOutput { checkpoint: StageCheckpoint::new(20), done: true })), @@ -696,7 +696,7 @@ mod tests { async fn unwind_pipeline() { let provider_factory = create_test_provider_factory(); - let mut pipeline = Pipeline::builder() + let mut pipeline = Pipeline::::builder() .add_stage( TestStage::new(StageId::Other("A")) .add_exec(Ok(ExecOutput { checkpoint: StageCheckpoint::new(100), done: true })) @@ -830,7 +830,7 @@ mod tests { async fn unwind_pipeline_with_intermediate_progress() { let provider_factory = create_test_provider_factory(); - let mut pipeline = Pipeline::builder() + let mut pipeline = Pipeline::::builder() .add_stage( TestStage::new(StageId::Other("A")) .add_exec(Ok(ExecOutput { checkpoint: StageCheckpoint::new(100), done: true })) @@ -930,7 +930,7 @@ mod tests { async fn run_pipeline_with_unwind() { let provider_factory = create_test_provider_factory(); - let mut pipeline = Pipeline::builder() + let mut pipeline = Pipeline::::builder() .add_stage( TestStage::new(StageId::Other("A")) .add_exec(Ok(ExecOutput { checkpoint: StageCheckpoint::new(10), done: true })) @@ -1051,7 +1051,7 @@ mod tests { async fn pipeline_error_handling() { // Non-fatal let provider_factory = create_test_provider_factory(); - let mut pipeline = Pipeline::builder() + let mut pipeline = Pipeline::::builder() .add_stage( TestStage::new(StageId::Other("NonFatal")) .add_exec(Err(StageError::Recoverable(Box::new(std::fmt::Error)))) @@ -1067,7 +1067,7 @@ mod tests { // Fatal let provider_factory = create_test_provider_factory(); - let mut pipeline = Pipeline::builder() + let mut pipeline = Pipeline::::builder() .add_stage(TestStage::new(StageId::Other("Fatal")).add_exec(Err( StageError::DatabaseIntegrity(ProviderError::BlockBodyIndicesNotFound(5)), ))) diff --git a/crates/stages/stages/src/lib.rs b/crates/stages/stages/src/lib.rs index 9f2da8b0fc..0c3db14cb6 100644 --- a/crates/stages/stages/src/lib.rs +++ b/crates/stages/stages/src/lib.rs @@ -27,7 +27,7 @@ //! # use reth_evm_ethereum::EthEvmConfig; //! # use reth_provider::ProviderFactory; //! # use reth_provider::StaticFileProviderFactory; -//! # use reth_provider::test_utils::create_test_provider_factory; +//! # use reth_provider::test_utils::{create_test_provider_factory, MockNodeTypesWithDB}; //! # use reth_static_file::StaticFileProducer; //! # use reth_config::config::StageConfig; //! # use reth_consensus::Consensus; @@ -53,7 +53,7 @@ //! # ); //! // Create a pipeline that can fully sync //! # let pipeline = -//! Pipeline::builder() +//! Pipeline::::builder() //! .with_tip_sender(tip_tx) //! .add_stages(DefaultStages::new( //! provider_factory.clone(), diff --git a/crates/stages/stages/src/sets.rs b/crates/stages/stages/src/sets.rs index 6048ed4f32..2c7ef4decd 100644 --- a/crates/stages/stages/src/sets.rs +++ b/crates/stages/stages/src/sets.rs @@ -16,7 +16,7 @@ //! # use reth_prune_types::PruneModes; //! # use reth_evm_ethereum::EthEvmConfig; //! # use reth_provider::StaticFileProviderFactory; -//! # use reth_provider::test_utils::create_test_provider_factory; +//! # use reth_provider::test_utils::{create_test_provider_factory, MockNodeTypesWithDB}; //! # use reth_static_file::StaticFileProducer; //! # use reth_config::config::StageConfig; //! # use reth_evm::execute::BlockExecutorProvider; @@ -27,8 +27,8 @@ //! let static_file_producer = //! StaticFileProducer::new(provider_factory.clone(), PruneModes::default()); //! // Build a pipeline with all offline stages. -//! let pipeline = Pipeline::builder() -//! .add_stages(OfflineStages::new(exec, StageConfig::default(), PruneModes::default(), false)) +//! let pipeline = Pipeline::::builder() +//! .add_stages(OfflineStages::new(exec, StageConfig::default(), PruneModes::default())) //! .build(provider_factory, static_file_producer); //! //! # } diff --git a/crates/stages/stages/src/stages/bodies.rs b/crates/stages/stages/src/stages/bodies.rs index b95f3d0ddc..0b78fb959a 100644 --- a/crates/stages/stages/src/stages/bodies.rs +++ b/crates/stages/stages/src/stages/bodies.rs @@ -689,7 +689,7 @@ mod tests { }, }; use futures_util::Stream; - use reth_db::{static_file::HeaderMask, tables, test_utils::TempDatabase, DatabaseEnv}; + use reth_db::{static_file::HeaderMask, tables}; use reth_db_api::{ cursor::DbCursorRO, models::{StoredBlockBodyIndices, StoredBlockOmmers}, @@ -707,8 +707,8 @@ mod tests { StaticFileSegment, TxNumber, B256, }; use reth_provider::{ - providers::StaticFileWriter, HeaderProvider, ProviderFactory, - StaticFileProviderFactory, TransactionsProvider, + providers::StaticFileWriter, test_utils::MockNodeTypesWithDB, HeaderProvider, + ProviderFactory, StaticFileProviderFactory, TransactionsProvider, }; use reth_stages_api::{ExecInput, ExecOutput, UnwindInput}; use reth_testing_utils::generators::{ @@ -718,7 +718,6 @@ mod tests { collections::{HashMap, VecDeque}, ops::RangeInclusive, pin::Pin, - sync::Arc, task::{Context, Poll}, }; @@ -979,7 +978,7 @@ mod tests { /// A [`BodyDownloader`] that is backed by an internal [`HashMap`] for testing. #[derive(Debug)] pub(crate) struct TestBodyDownloader { - provider_factory: ProviderFactory>>, + provider_factory: ProviderFactory, responses: HashMap, headers: VecDeque, batch_size: u64, @@ -987,7 +986,7 @@ mod tests { impl TestBodyDownloader { pub(crate) fn new( - provider_factory: ProviderFactory>>, + provider_factory: ProviderFactory, responses: HashMap, batch_size: u64, ) -> Self { diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index bc177a01b7..5324a37736 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -3,7 +3,10 @@ use num_traits::Zero; use reth_config::config::ExecutionConfig; use reth_db::{static_file::HeaderMask, tables}; use reth_db_api::{cursor::DbCursorRO, database::Database, transaction::DbTx}; -use reth_evm::execute::{BatchExecutor, BlockExecutorProvider}; +use reth_evm::{ + execute::{BatchExecutor, BlockExecutorProvider}, + metrics::ExecutorMetrics, +}; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_exex::{ExExManagerHandle, ExExNotification}; use reth_primitives::{BlockNumber, Header, StaticFileSegment}; @@ -18,8 +21,8 @@ use reth_prune_types::PruneModes; use reth_revm::database::StateProviderDatabase; use reth_stages_api::{ BlockErrorKind, CheckpointBlockRange, EntitiesCheckpoint, ExecInput, ExecOutput, - ExecutionCheckpoint, ExecutionStageThresholds, MetricEvent, MetricEventsSender, Stage, - StageCheckpoint, StageError, StageId, UnwindInput, UnwindOutput, + ExecutionCheckpoint, ExecutionStageThresholds, Stage, StageCheckpoint, StageError, StageId, + UnwindInput, UnwindOutput, }; use std::{ cmp::Ordering, @@ -61,7 +64,6 @@ use tracing::*; // false positive, we cannot derive it if !DB: Debug. #[allow(missing_debug_implementations)] pub struct ExecutionStage { - metrics_tx: Option, /// The stage's internal block executor executor_provider: E, /// The commit thresholds of the execution stage. @@ -83,11 +85,13 @@ pub struct ExecutionStage { post_unwind_commit_input: Option, /// Handle to communicate with `ExEx` manager. exex_manager_handle: ExExManagerHandle, + /// Executor metrics. + metrics: ExecutorMetrics, } impl ExecutionStage { /// Create new execution stage with specified config. - pub const fn new( + pub fn new( executor_provider: E, thresholds: ExecutionStageThresholds, external_clean_threshold: u64, @@ -95,7 +99,6 @@ impl ExecutionStage { exex_manager_handle: ExExManagerHandle, ) -> Self { Self { - metrics_tx: None, external_clean_threshold, executor_provider, thresholds, @@ -103,6 +106,7 @@ impl ExecutionStage { post_execute_commit_input: None, post_unwind_commit_input: None, exex_manager_handle, + metrics: ExecutorMetrics::default(), } } @@ -135,12 +139,6 @@ impl ExecutionStage { ) } - /// Set the metric events sender. - pub fn with_metrics_tx(mut self, metrics_tx: MetricEventsSender) -> Self { - self.metrics_tx = Some(metrics_tx); - self - } - /// Adjusts the prune modes related to changesets. /// /// This function verifies whether the [`super::MerkleStage`] or Hashing stages will run from @@ -276,12 +274,13 @@ where // Execute the block let execute_start = Instant::now(); - executor.execute_and_verify_one((&block, td, None).into()).map_err(|error| { - StageError::Block { + self.metrics.metered((&block, td, None).into(), |input| { + executor.execute_and_verify_one(input).map_err(|error| StageError::Block { block: Box::new(block.header.clone().seal_slow()), error: BlockErrorKind::Execution(error), - } + }) })?; + execution_duration += execute_start.elapsed(); // Log execution throughput @@ -300,12 +299,6 @@ where last_log_instant = Instant::now(); } - // Gas metrics - if let Some(metrics_tx) = &mut self.metrics_tx { - let _ = - metrics_tx.send(MetricEvent::ExecutionStageGas { gas: block.header.gas_used }); - } - stage_progress = block_number; stage_checkpoint.progress.processed += block.gas_used; @@ -372,7 +365,7 @@ where let time = Instant::now(); // write output - let mut writer = UnifiedStorageWriter::new(provider, static_file_producer); + let mut writer = UnifiedStorageWriter::new(&provider, static_file_producer); writer.write_to_storage(state, OriginalValuesKnown::Yes)?; let db_write_duration = time.elapsed(); diff --git a/crates/stages/stages/src/stages/headers.rs b/crates/stages/stages/src/stages/headers.rs index 4dd9f8452c..dfc183e263 100644 --- a/crates/stages/stages/src/stages/headers.rs +++ b/crates/stages/stages/src/stages/headers.rs @@ -390,12 +390,11 @@ mod tests { use super::*; use crate::test_utils::{TestRunnerError, TestStageDB}; use reth_consensus::test_utils::TestConsensus; - use reth_db::{test_utils::TempDatabase, DatabaseEnv}; use reth_downloaders::headers::reverse_headers::{ ReverseHeadersDownloader, ReverseHeadersDownloaderBuilder, }; use reth_network_p2p::test_utils::{TestHeaderDownloader, TestHeadersClient}; - use reth_provider::BlockNumReader; + use reth_provider::{test_utils::MockNodeTypesWithDB, BlockNumReader}; use tokio::sync::watch; pub(crate) struct HeadersTestRunner { @@ -427,7 +426,7 @@ mod tests { } impl StageTestRunner for HeadersTestRunner { - type S = HeaderStage>>, D>; + type S = HeaderStage, D>; fn db(&self) -> &TestStageDB { &self.db diff --git a/crates/stages/stages/src/stages/mod.rs b/crates/stages/stages/src/stages/mod.rs index 569dc8cc38..694f3e4722 100644 --- a/crates/stages/stages/src/stages/mod.rs +++ b/crates/stages/stages/src/stages/mod.rs @@ -46,9 +46,7 @@ mod tests { use reth_chainspec::ChainSpecBuilder; use reth_db::{ mdbx::{cursor::Cursor, RW}, - tables, - test_utils::TempDatabase, - AccountsHistory, DatabaseEnv, + tables, AccountsHistory, }; use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW}, @@ -62,9 +60,10 @@ mod tests { StaticFileSegment, B256, U256, }; use reth_provider::{ - providers::StaticFileWriter, AccountExtReader, BlockReader, DatabaseProviderFactory, - ProviderFactory, ProviderResult, ReceiptProvider, StageCheckpointWriter, - StaticFileProviderFactory, StorageReader, + providers::{StaticFileProvider, StaticFileWriter}, + test_utils::MockNodeTypesWithDB, + AccountExtReader, BlockReader, DatabaseProviderFactory, ProviderFactory, ProviderResult, + ReceiptProvider, StageCheckpointWriter, StaticFileProviderFactory, StorageReader, }; use reth_prune_types::{PruneMode, PruneModes}; use reth_stages_api::{ @@ -140,7 +139,7 @@ mod tests { .unwrap(); provider_rw.commit().unwrap(); - let check_pruning = |factory: ProviderFactory>>, + let check_pruning = |factory: ProviderFactory, prune_modes: PruneModes, expect_num_receipts: usize, expect_num_acc_changesets: usize, @@ -297,7 +296,10 @@ mod tests { is_full_node: bool, expected: Option, ) { - let static_file_provider = db.factory.static_file_provider(); + // We recreate the static file provider, since consistency heals are done on fetching the + // writer for the first time. + let static_file_provider = + StaticFileProvider::read_write(db.factory.static_file_provider().path()).unwrap(); // Simulate corruption by removing `prune_count` rows from the data file without updating // its offset list and configuration. @@ -312,8 +314,11 @@ mod tests { data_file.get_ref().sync_all().unwrap(); } + // We recreate the static file provider, since consistency heals are done on fetching the + // writer for the first time. assert_eq!( - static_file_provider + StaticFileProvider::read_write(db.factory.static_file_provider().path()) + .unwrap() .check_consistency(&db.factory.database_provider_ro().unwrap(), is_full_node,), Ok(expected) ); diff --git a/crates/stages/stages/src/stages/prune.rs b/crates/stages/stages/src/stages/prune.rs index 7b162bd16d..407073bcef 100644 --- a/crates/stages/stages/src/stages/prune.rs +++ b/crates/stages/stages/src/stages/prune.rs @@ -47,7 +47,7 @@ impl Stage for PruneStage { .delete_limit(self.commit_threshold) .build(provider.static_file_provider().clone()); - let result = pruner.run(provider, input.target())?; + let result = pruner.run_with_provider(&provider.0, input.target())?; if result.progress.is_finished() { Ok(ExecOutput { checkpoint: StageCheckpoint::new(input.target()), done: true }) } else { diff --git a/crates/stages/stages/src/stages/sender_recovery.rs b/crates/stages/stages/src/stages/sender_recovery.rs index 7e594ca067..ccb0013c5b 100644 --- a/crates/stages/stages/src/stages/sender_recovery.rs +++ b/crates/stages/stages/src/stages/sender_recovery.rs @@ -6,7 +6,7 @@ use reth_db_api::{ database::Database, transaction::{DbTx, DbTxMut}, }; -use reth_primitives::{Address, StaticFileSegment, TransactionSignedNoHash, TxNumber}; +use reth_primitives::{Address, GotExpected, StaticFileSegment, TransactionSignedNoHash, TxNumber}; use reth_provider::{ BlockReader, DatabaseProviderRW, HeaderProvider, ProviderError, PruneCheckpointReader, StatsReader, @@ -153,23 +153,37 @@ where .unzip(); let static_file_provider = provider.static_file_provider().clone(); - tokio::task::spawn_blocking(move || { + + // We do not use `tokio::task::spawn_blocking` because, during a shutdown, + // there will be a timeout grace period in which Tokio does not allow spawning + // additional blocking tasks. This would cause this function to return + // `SenderRecoveryStageError::RecoveredSendersMismatch` at the end. + // + // However, using `std::thread::spawn` allows us to utilize the timeout grace + // period to complete some work without throwing errors during the shutdown. + std::thread::spawn(move || { for (chunk_range, recovered_senders_tx) in chunks { // Read the raw value, and let the rayon worker to decompress & decode. - let chunk = static_file_provider - .fetch_range_with_predicate( - StaticFileSegment::Transactions, - chunk_range.clone(), - |cursor, number| { - Ok(cursor - .get_one::>>( - number.into(), - )? - .map(|tx| (number, tx))) - }, - |_| true, - ) - .expect("failed to fetch range"); + let chunk = match static_file_provider.fetch_range_with_predicate( + StaticFileSegment::Transactions, + chunk_range.clone(), + |cursor, number| { + Ok(cursor + .get_one::>>( + number.into(), + )? + .map(|tx| (number, tx))) + }, + |_| true, + ) { + Ok(chunk) => chunk, + Err(err) => { + // We exit early since we could not process this chunk. + let _ = recovered_senders_tx + .send(Err(Box::new(SenderRecoveryStageError::StageError(err.into())))); + break + } + }; // Spawn the task onto the global rayon pool // This task will send the results through the channel after it has read the transaction @@ -178,14 +192,28 @@ where let mut rlp_buf = Vec::with_capacity(128); for (number, tx) in chunk { rlp_buf.clear(); - let tx = tx.value().expect("decode error"); - let _ = recovered_senders_tx.send(recover_sender((number, tx), &mut rlp_buf)); + + let res = tx + .value() + .map_err(|err| Box::new(SenderRecoveryStageError::StageError(err.into()))) + .and_then(|tx| recover_sender((number, tx), &mut rlp_buf)); + + let is_err = res.is_err(); + + let _ = recovered_senders_tx.send(res); + + // Finish early + if is_err { + break + } } }); } }); debug!(target: "sync::stages::sender_recovery", ?tx_range, "Appending recovered senders to the database"); + + let mut processed_transactions = 0; for channel in receivers { while let Ok(recovered) = channel.recv() { let (tx_id, sender) = match recovered { @@ -212,14 +240,33 @@ where }) } SenderRecoveryStageError::StageError(err) => Err(err), + SenderRecoveryStageError::RecoveredSendersMismatch(expectation) => { + Err(StageError::Fatal( + SenderRecoveryStageError::RecoveredSendersMismatch(expectation) + .into(), + )) + } } } }; senders_cursor.append(tx_id, sender)?; + processed_transactions += 1; } } debug!(target: "sync::stages::sender_recovery", ?tx_range, "Finished recovering senders batch"); + // Fail safe to ensure that we do not proceed without having recovered all senders. + let expected = tx_range.end - tx_range.start; + if processed_transactions != expected { + return Err(StageError::Fatal( + SenderRecoveryStageError::RecoveredSendersMismatch(GotExpected { + got: processed_transactions, + expected, + }) + .into(), + )); + } + Ok(()) } @@ -266,6 +313,10 @@ enum SenderRecoveryStageError { #[error(transparent)] FailedRecovery(#[from] FailedSenderRecoveryError), + /// Number of recovered senders does not match + #[error("mismatched sender count during recovery: {_0}")] + RecoveredSendersMismatch(GotExpected), + /// A different type of stage error occurred #[error(transparent)] StageError(#[from] StageError), diff --git a/crates/stages/stages/src/test_utils/test_db.rs b/crates/stages/stages/src/test_utils/test_db.rs index 4480e7a235..d896346059 100644 --- a/crates/stages/stages/src/test_utils/test_db.rs +++ b/crates/stages/stages/src/test_utils/test_db.rs @@ -1,9 +1,7 @@ use reth_chainspec::MAINNET; use reth_db::{ tables, - test_utils::{ - create_test_rw_db, create_test_rw_db_with_path, create_test_static_files_dir, TempDatabase, - }, + test_utils::{create_test_rw_db, create_test_rw_db_with_path, create_test_static_files_dir}, DatabaseEnv, }; use reth_db_api::{ @@ -21,17 +19,18 @@ use reth_primitives::{ }; use reth_provider::{ providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter}, + test_utils::MockNodeTypesWithDB, HistoryWriter, ProviderError, ProviderFactory, StaticFileProviderFactory, }; use reth_storage_errors::provider::ProviderResult; use reth_testing_utils::generators::ChangeSet; -use std::{collections::BTreeMap, path::Path, sync::Arc}; +use std::{collections::BTreeMap, path::Path}; use tempfile::TempDir; /// Test database that is used for testing stage implementations. #[derive(Debug)] pub struct TestStageDB { - pub factory: ProviderFactory>>, + pub factory: ProviderFactory, pub temp_static_files_dir: TempDir, } diff --git a/crates/static-file/static-file/Cargo.toml b/crates/static-file/static-file/Cargo.toml index 1a1921d58c..8fa89e12e0 100644 --- a/crates/static-file/static-file/Cargo.toml +++ b/crates/static-file/static-file/Cargo.toml @@ -13,6 +13,7 @@ workspace = true [dependencies] # reth +reth-chainspec.workspace = true reth-db.workspace = true reth-db-api.workspace = true reth-provider.workspace = true @@ -22,6 +23,7 @@ reth-tokio-util.workspace = true reth-prune-types.workspace = true reth-static-file-types.workspace = true reth-stages-types.workspace = true +reth-node-types.workspace = true alloy-primitives.workspace = true diff --git a/crates/static-file/static-file/src/segments/headers.rs b/crates/static-file/static-file/src/segments/headers.rs index 3212c0cd88..54d5bee65c 100644 --- a/crates/static-file/static-file/src/segments/headers.rs +++ b/crates/static-file/static-file/src/segments/headers.rs @@ -1,10 +1,10 @@ use crate::segments::Segment; use alloy_primitives::BlockNumber; use reth_db::tables; -use reth_db_api::{cursor::DbCursorRO, database::Database, transaction::DbTx}; +use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; use reth_provider::{ providers::{StaticFileProvider, StaticFileWriter}, - DatabaseProviderRO, + DBProvider, }; use reth_static_file_types::StaticFileSegment; use reth_storage_errors::provider::ProviderResult; @@ -14,14 +14,14 @@ use std::ops::RangeInclusive; #[derive(Debug, Default)] pub struct Headers; -impl Segment for Headers { +impl Segment for Headers { fn segment(&self) -> StaticFileSegment { StaticFileSegment::Headers } fn copy_to_static_files( &self, - provider: DatabaseProviderRO, + provider: Provider, static_file_provider: StaticFileProvider, block_range: RangeInclusive, ) -> ProviderResult<()> { diff --git a/crates/static-file/static-file/src/segments/mod.rs b/crates/static-file/static-file/src/segments/mod.rs index fcbd19c8a3..a5c0b249e1 100644 --- a/crates/static-file/static-file/src/segments/mod.rs +++ b/crates/static-file/static-file/src/segments/mod.rs @@ -13,14 +13,13 @@ mod sidecars; pub use sidecars::Sidecars; use alloy_primitives::BlockNumber; -use reth_db_api::database::Database; -use reth_provider::{providers::StaticFileProvider, DatabaseProviderRO}; +use reth_provider::providers::StaticFileProvider; use reth_static_file_types::StaticFileSegment; use reth_storage_errors::provider::ProviderResult; use std::ops::RangeInclusive; /// A segment represents moving some portion of the data to static files. -pub trait Segment: Send + Sync { +pub trait Segment: Send + Sync { /// Returns the [`StaticFileSegment`]. fn segment(&self) -> StaticFileSegment; @@ -28,7 +27,7 @@ pub trait Segment: Send + Sync { /// the management of and writing to files. fn copy_to_static_files( &self, - provider: DatabaseProviderRO, + provider: Provider, static_file_provider: StaticFileProvider, block_range: RangeInclusive, ) -> ProviderResult<()>; diff --git a/crates/static-file/static-file/src/segments/receipts.rs b/crates/static-file/static-file/src/segments/receipts.rs index b63d083a06..4e2185a598 100644 --- a/crates/static-file/static-file/src/segments/receipts.rs +++ b/crates/static-file/static-file/src/segments/receipts.rs @@ -1,10 +1,10 @@ use crate::segments::Segment; use alloy_primitives::BlockNumber; use reth_db::tables; -use reth_db_api::{cursor::DbCursorRO, database::Database, transaction::DbTx}; +use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; use reth_provider::{ providers::{StaticFileProvider, StaticFileWriter}, - BlockReader, DatabaseProviderRO, + BlockReader, DBProvider, }; use reth_static_file_types::StaticFileSegment; use reth_storage_errors::provider::{ProviderError, ProviderResult}; @@ -14,14 +14,14 @@ use std::ops::RangeInclusive; #[derive(Debug, Default)] pub struct Receipts; -impl Segment for Receipts { +impl Segment for Receipts { fn segment(&self) -> StaticFileSegment { StaticFileSegment::Receipts } fn copy_to_static_files( &self, - provider: DatabaseProviderRO, + provider: Provider, static_file_provider: StaticFileProvider, block_range: RangeInclusive, ) -> ProviderResult<()> { diff --git a/crates/static-file/static-file/src/segments/sidecars.rs b/crates/static-file/static-file/src/segments/sidecars.rs index 2d82c03179..7e69355c0e 100644 --- a/crates/static-file/static-file/src/segments/sidecars.rs +++ b/crates/static-file/static-file/src/segments/sidecars.rs @@ -1,10 +1,10 @@ use crate::segments::Segment; use alloy_primitives::BlockNumber; use reth_db::tables; -use reth_db_api::{cursor::DbCursorRO, database::Database, transaction::DbTx}; +use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; use reth_provider::{ providers::{StaticFileProvider, StaticFileWriter}, - DatabaseProviderRO, + BlockReader, DBProvider, }; use reth_static_file_types::StaticFileSegment; use reth_storage_errors::provider::ProviderResult; @@ -14,14 +14,14 @@ use std::ops::RangeInclusive; #[derive(Debug, Default)] pub struct Sidecars; -impl Segment for Sidecars { +impl Segment for Sidecars { fn segment(&self) -> StaticFileSegment { StaticFileSegment::Sidecars } fn copy_to_static_files( &self, - provider: DatabaseProviderRO, + provider: Provider, static_file_provider: StaticFileProvider, block_range: RangeInclusive, ) -> ProviderResult<()> { diff --git a/crates/static-file/static-file/src/segments/transactions.rs b/crates/static-file/static-file/src/segments/transactions.rs index ac690def4b..52e0ca8b57 100644 --- a/crates/static-file/static-file/src/segments/transactions.rs +++ b/crates/static-file/static-file/src/segments/transactions.rs @@ -1,10 +1,10 @@ use crate::segments::Segment; use alloy_primitives::BlockNumber; use reth_db::tables; -use reth_db_api::{cursor::DbCursorRO, database::Database, transaction::DbTx}; +use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; use reth_provider::{ providers::{StaticFileProvider, StaticFileWriter}, - BlockReader, DatabaseProviderRO, + BlockReader, DBProvider, }; use reth_static_file_types::StaticFileSegment; use reth_storage_errors::provider::{ProviderError, ProviderResult}; @@ -14,7 +14,7 @@ use std::ops::RangeInclusive; #[derive(Debug, Default)] pub struct Transactions; -impl Segment for Transactions { +impl Segment for Transactions { fn segment(&self) -> StaticFileSegment { StaticFileSegment::Transactions } @@ -23,7 +23,7 @@ impl Segment for Transactions { /// [`StaticFileSegment::Transactions`] for the provided block range. fn copy_to_static_files( &self, - provider: DatabaseProviderRO, + provider: Provider, static_file_provider: StaticFileProvider, block_range: RangeInclusive, ) -> ProviderResult<()> { diff --git a/crates/static-file/static-file/src/static_file_producer.rs b/crates/static-file/static-file/src/static_file_producer.rs index 281fc457e8..14377c25a0 100644 --- a/crates/static-file/static-file/src/static_file_producer.rs +++ b/crates/static-file/static-file/src/static_file_producer.rs @@ -4,10 +4,9 @@ use crate::{segments, segments::Segment, StaticFileProducerEvent}; use alloy_primitives::BlockNumber; use parking_lot::Mutex; use rayon::prelude::*; -use reth_db_api::database::Database; use reth_provider::{ - providers::StaticFileWriter, ProviderFactory, StageCheckpointReader as _, - StaticFileProviderFactory, + providers::StaticFileWriter, BlockReader, DBProvider, DatabaseProviderFactory, + StageCheckpointReader, StaticFileProviderFactory, }; use reth_prune_types::PruneModes; use reth_stages_types::StageId; @@ -25,22 +24,29 @@ use tracing::{debug, trace}; pub type StaticFileProducerResult = ProviderResult; /// The [`StaticFileProducer`] instance itself with the result of [`StaticFileProducerInner::run`] -pub type StaticFileProducerWithResult = (StaticFileProducer, StaticFileProducerResult); +pub type StaticFileProducerWithResult = + (StaticFileProducer, StaticFileProducerResult); /// Static File producer. It's a wrapper around [`StaticFileProducer`] that allows to share it /// between threads. -#[derive(Debug, Clone)] -pub struct StaticFileProducer(Arc>>); +#[derive(Debug)] +pub struct StaticFileProducer(Arc>>); -impl StaticFileProducer { +impl StaticFileProducer { /// Creates a new [`StaticFileProducer`]. - pub fn new(provider_factory: ProviderFactory, prune_modes: PruneModes) -> Self { - Self(Arc::new(Mutex::new(StaticFileProducerInner::new(provider_factory, prune_modes)))) + pub fn new(provider: Provider, prune_modes: PruneModes) -> Self { + Self(Arc::new(Mutex::new(StaticFileProducerInner::new(provider, prune_modes)))) + } +} + +impl Clone for StaticFileProducer { + fn clone(&self) -> Self { + Self(self.0.clone()) } } -impl Deref for StaticFileProducer { - type Target = Arc>>; +impl Deref for StaticFileProducer { + type Target = Arc>>; fn deref(&self) -> &Self::Target { &self.0 @@ -50,9 +56,9 @@ impl Deref for StaticFileProducer { /// Static File producer routine. See [`StaticFileProducerInner::run`] for more detailed /// description. #[derive(Debug)] -pub struct StaticFileProducerInner { +pub struct StaticFileProducerInner { /// Provider factory - provider_factory: ProviderFactory, + provider: Provider, /// Pruning configuration for every part of the data that can be pruned. Set by user, and /// needed in [`StaticFileProducerInner`] to prevent attempting to move prunable data to static /// files. See [`StaticFileProducerInner::get_static_file_targets`]. @@ -99,11 +105,17 @@ impl StaticFileTargets { } } -impl StaticFileProducerInner { - fn new(provider_factory: ProviderFactory, prune_modes: PruneModes) -> Self { - Self { provider_factory, prune_modes, event_sender: Default::default() } +impl StaticFileProducerInner { + fn new(provider: Provider, prune_modes: PruneModes) -> Self { + Self { provider, prune_modes, event_sender: Default::default() } } +} +impl StaticFileProducerInner +where + Provider: StaticFileProviderFactory + + DatabaseProviderFactory, +{ /// Listen for events on the `static_file_producer`. pub fn events(&self) -> EventStream { self.event_sender.new_listener() @@ -113,8 +125,8 @@ impl StaticFileProducerInner { /// /// For each [Some] target in [`StaticFileTargets`], initializes a corresponding [Segment] and /// runs it with the provided block range using [`reth_provider::providers::StaticFileProvider`] - /// and a read-only database transaction from [`ProviderFactory`]. All segments are run in - /// parallel. + /// and a read-only database transaction from [`DatabaseProviderFactory`]. All segments are run + /// in parallel. /// /// NOTE: it doesn't delete the data from database, and the actual deleting (aka pruning) logic /// lives in the `prune` crate. @@ -125,7 +137,7 @@ impl StaticFileProducerInner { } debug_assert!(targets.is_contiguous_to_highest_static_files( - self.provider_factory.static_file_provider().get_highest_static_files() + self.provider.static_file_provider().get_highest_static_files() )); self.event_sender.notify(StaticFileProducerEvent::Started { targets: targets.clone() }); @@ -133,7 +145,8 @@ impl StaticFileProducerInner { debug!(target: "static_file", ?targets, "StaticFileProducer started"); let start = Instant::now(); - let mut segments = Vec::<(Box>, RangeInclusive)>::new(); + let mut segments = + Vec::<(Box>, RangeInclusive)>::new(); if let Some(block_range) = targets.transactions.clone() { segments.push((Box::new(segments::Transactions), block_range)); @@ -154,8 +167,8 @@ impl StaticFileProducerInner { // Create a new database transaction on every segment to prevent long-lived read-only // transactions - let provider = self.provider_factory.provider()?.disable_long_read_transaction_safety(); - segment.copy_to_static_files(provider, self.provider_factory.static_file_provider(), block_range.clone())?; + let provider = self.provider.database_provider_ro()?.disable_long_read_transaction_safety(); + segment.copy_to_static_files(provider, self.provider.static_file_provider(), block_range.clone())?; let elapsed = start.elapsed(); // TODO(alexey): track in metrics debug!(target: "static_file", segment = %segment.segment(), ?block_range, ?elapsed, "Finished StaticFileProducer segment"); @@ -163,9 +176,9 @@ impl StaticFileProducerInner { Ok(()) })?; - self.provider_factory.static_file_provider().commit()?; + self.provider.static_file_provider().commit()?; for (segment, block_range) in segments { - self.provider_factory + self.provider .static_file_provider() .update_index(segment.segment(), Some(*block_range.end()))?; } @@ -184,7 +197,7 @@ impl StaticFileProducerInner { /// /// Returns highest block numbers for all static file segments. pub fn copy_to_static_files(&self) -> ProviderResult { - let provider = self.provider_factory.provider()?; + let provider = self.provider.database_provider_ro()?; let stages_checkpoints = [StageId::Headers, StageId::Execution, StageId::Bodies] .into_iter() .map(|stage| provider.get_stage_checkpoint(stage).map(|c| c.map(|c| c.block_number))) @@ -209,8 +222,7 @@ impl StaticFileProducerInner { &self, finalized_block_numbers: HighestStaticFiles, ) -> ProviderResult { - let highest_static_files = - self.provider_factory.static_file_provider().get_highest_static_files(); + let highest_static_files = self.provider.static_file_provider().get_highest_static_files(); let targets = StaticFileTargets { headers: finalized_block_numbers.headers.and_then(|finalized_block_number| { @@ -269,10 +281,10 @@ mod tests { }; use alloy_primitives::{B256, U256}; use assert_matches::assert_matches; - use reth_db::{test_utils::TempDatabase, DatabaseEnv}; use reth_db_api::{database::Database, transaction::DbTx}; use reth_provider::{ - providers::StaticFileWriter, ProviderError, ProviderFactory, StaticFileProviderFactory, + providers::StaticFileWriter, test_utils::MockNodeTypesWithDB, ProviderError, + ProviderFactory, StaticFileProviderFactory, }; use reth_prune_types::PruneModes; use reth_stages::test_utils::{StorageKind, TestStageDB}; @@ -280,13 +292,10 @@ mod tests { use reth_testing_utils::generators::{ self, random_block_range, random_receipt, BlockRangeParams, }; - use std::{ - sync::{mpsc::channel, Arc}, - time::Duration, - }; + use std::{sync::mpsc::channel, time::Duration}; use tempfile::TempDir; - fn setup() -> (ProviderFactory>>, TempDir) { + fn setup() -> (ProviderFactory, TempDir) { let mut rng = generators::rng(); let db = TestStageDB::default(); diff --git a/crates/static-file/types/src/filters.rs b/crates/static-file/types/src/filters.rs deleted file mode 100644 index b6935fbb51..0000000000 --- a/crates/static-file/types/src/filters.rs +++ /dev/null @@ -1,38 +0,0 @@ -use strum::AsRefStr; - -#[derive(Debug, Copy, Clone)] -/// Static File filters. -pub enum Filters { - /// Static File uses filters with [`InclusionFilter`] and [`PerfectHashingFunction`]. - WithFilters(InclusionFilter, PerfectHashingFunction), - /// Static File doesn't use any filters. - WithoutFilters, -} - -impl Filters { - /// Returns `true` if static file uses filters. - pub const fn has_filters(&self) -> bool { - matches!(self, Self::WithFilters(_, _)) - } -} - -#[derive(Debug, Copy, Clone, AsRefStr)] -#[cfg_attr(feature = "clap", derive(clap::ValueEnum))] -/// Static File inclusion filter. Also see [Filters]. -pub enum InclusionFilter { - #[strum(serialize = "cuckoo")] - /// Cuckoo filter - Cuckoo, -} - -#[derive(Debug, Copy, Clone, AsRefStr)] -#[cfg_attr(feature = "clap", derive(clap::ValueEnum))] -/// Static File perfect hashing function. Also see [Filters]. -pub enum PerfectHashingFunction { - #[strum(serialize = "fmph")] - /// Fingerprint-Based Minimal Perfect Hash Function - Fmph, - #[strum(serialize = "gofmph")] - /// Fingerprint-Based Minimal Perfect Hash Function with Group Optimization - GoFmph, -} diff --git a/crates/static-file/types/src/lib.rs b/crates/static-file/types/src/lib.rs index 15feafd180..c74bfc73f1 100644 --- a/crates/static-file/types/src/lib.rs +++ b/crates/static-file/types/src/lib.rs @@ -9,12 +9,10 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] mod compression; -mod filters; mod segment; use alloy_primitives::BlockNumber; pub use compression::Compression; -pub use filters::{Filters, InclusionFilter, PerfectHashingFunction}; pub use segment::{SegmentConfig, SegmentHeader, SegmentRangeInclusive, StaticFileSegment}; /// Default static file block count. diff --git a/crates/static-file/types/src/segment.rs b/crates/static-file/types/src/segment.rs index ba2b2c98f9..8ca1cc2e06 100644 --- a/crates/static-file/types/src/segment.rs +++ b/crates/static-file/types/src/segment.rs @@ -1,4 +1,4 @@ -use crate::{BlockNumber, Compression, Filters, InclusionFilter}; +use crate::{BlockNumber, Compression}; use alloy_primitives::TxNumber; use derive_more::Display; use serde::{Deserialize, Serialize}; @@ -52,17 +52,7 @@ impl StaticFileSegment { /// Returns the default configuration of the segment. pub const fn config(&self) -> SegmentConfig { - let default_config = SegmentConfig { - filters: Filters::WithFilters( - InclusionFilter::Cuckoo, - super::PerfectHashingFunction::Fmph, - ), - compression: Compression::Lz4, - }; - - match self { - Self::Headers | Self::Transactions | Self::Receipts | Self::Sidecars => default_config, - } + SegmentConfig { compression: Compression::Lz4 } } /// Returns the number of columns for the segment @@ -84,18 +74,12 @@ impl StaticFileSegment { /// Returns file name for the provided segment and range, alongside filters, compression. pub fn filename_with_configuration( &self, - filters: Filters, compression: Compression, block_range: &SegmentRangeInclusive, ) -> String { let prefix = self.filename(block_range); - let filters_name = match filters { - Filters::WithFilters(inclusion_filter, phf) => { - format!("{}-{}", inclusion_filter.as_ref(), phf.as_ref()) - } - Filters::WithoutFilters => "none".to_string(), - }; + let filters_name = "none".to_string(); // ATTENTION: if changing the name format, be sure to reflect those changes in // [`Self::parse_filename`.] @@ -316,8 +300,6 @@ impl SegmentHeader { /// Configuration used on the segment. #[derive(Debug, Clone, Copy)] pub struct SegmentConfig { - /// Inclusion filters used on the segment - pub filters: Filters, /// Compression used on the segment pub compression: Compression, } @@ -390,46 +372,28 @@ mod tests { ( StaticFileSegment::Headers, 2..=30, - "static_file_headers_2_30_cuckoo-fmph_lz4", - Some(( - Compression::Lz4, - Filters::WithFilters( - InclusionFilter::Cuckoo, - crate::PerfectHashingFunction::Fmph, - ), - )), + "static_file_headers_2_30_none_lz4", + Some(Compression::Lz4), ), ( StaticFileSegment::Headers, 2..=30, - "static_file_headers_2_30_cuckoo-fmph_zstd", - Some(( - Compression::Zstd, - Filters::WithFilters( - InclusionFilter::Cuckoo, - crate::PerfectHashingFunction::Fmph, - ), - )), + "static_file_headers_2_30_none_zstd", + Some(Compression::Zstd), ), ( StaticFileSegment::Headers, 2..=30, - "static_file_headers_2_30_cuckoo-fmph_zstd-dict", - Some(( - Compression::ZstdWithDictionary, - Filters::WithFilters( - InclusionFilter::Cuckoo, - crate::PerfectHashingFunction::Fmph, - ), - )), + "static_file_headers_2_30_none_zstd-dict", + Some(Compression::ZstdWithDictionary), ), ]; - for (segment, block_range, filename, configuration) in test_vectors { + for (segment, block_range, filename, compression) in test_vectors { let block_range: SegmentRangeInclusive = block_range.into(); - if let Some((compression, filters)) = configuration { + if let Some(compression) = compression { assert_eq!( - segment.filename_with_configuration(filters, compression, &block_range,), + segment.filename_with_configuration(compression, &block_range), filename ); } else { diff --git a/crates/storage/codecs/Cargo.toml b/crates/storage/codecs/Cargo.toml index 7ecf75208a..1c47b44290 100644 --- a/crates/storage/codecs/Cargo.toml +++ b/crates/storage/codecs/Cargo.toml @@ -16,11 +16,14 @@ reth-codecs-derive = { path = "./derive", default-features = false } # eth alloy-consensus = { workspace = true, optional = true } -alloy-eips = { workspace = true, optional = true } +alloy-eips = { workspace = true, optional = true, features = ["serde"] } alloy-genesis = { workspace = true, optional = true } alloy-primitives.workspace = true alloy-trie = { workspace = true, optional = true } +# optimism +op-alloy-consensus = { workspace = true, optional = true } + # misc bytes.workspace = true modular-bitfield = { workspace = true, optional = true } @@ -31,7 +34,11 @@ alloy-eips = { workspace = true, default-features = false, features = [ "arbitrary", "serde", ] } -alloy-primitives = { workspace = true, features = ["arbitrary", "serde", "rand"] } +alloy-primitives = { workspace = true, features = [ + "arbitrary", + "serde", + "rand", +] } alloy-consensus = { workspace = true, features = ["arbitrary"] } test-fuzz.workspace = true serde_json.workspace = true @@ -51,3 +58,4 @@ alloy = [ "dep:alloy-trie", "dep:serde" ] +optimism = ["alloy", "dep:op-alloy-consensus"] diff --git a/crates/storage/codecs/derive/src/arbitrary.rs b/crates/storage/codecs/derive/src/arbitrary.rs index 4feae63c4f..8aa44062e2 100644 --- a/crates/storage/codecs/derive/src/arbitrary.rs +++ b/crates/storage/codecs/derive/src/arbitrary.rs @@ -27,7 +27,7 @@ pub fn maybe_generate_tests( let mut buf = vec![]; let len = field.clone().to_compact(&mut buf); let (decoded, _): (super::#type_ident, _) = Compact::from_compact(&buf, len); - assert!(field == decoded, "maybe_generate_tests::compact"); + assert_eq!(field, decoded, "maybe_generate_tests::compact"); } }); } else if arg.to_string() == "rlp" { diff --git a/crates/storage/codecs/src/alloy/access_list.rs b/crates/storage/codecs/src/alloy/access_list.rs index 306ef4f494..306b64d7e4 100644 --- a/crates/storage/codecs/src/alloy/access_list.rs +++ b/crates/storage/codecs/src/alloy/access_list.rs @@ -1,10 +1,8 @@ use crate::Compact; +use alloc::vec::Vec; use alloy_eips::eip2930::{AccessList, AccessListItem}; use alloy_primitives::Address; -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; - /// Implement `Compact` for `AccessListItem` and `AccessList`. impl Compact for AccessListItem { fn to_compact(&self, buf: &mut B) -> usize diff --git a/crates/storage/codecs/src/alloy/genesis_account.rs b/crates/storage/codecs/src/alloy/genesis_account.rs index 463c94fa3a..84c8ee77a3 100644 --- a/crates/storage/codecs/src/alloy/genesis_account.rs +++ b/crates/storage/codecs/src/alloy/genesis_account.rs @@ -1,12 +1,10 @@ use crate::Compact; +use alloc::vec::Vec; use alloy_genesis::GenesisAccount as AlloyGenesisAccount; use alloy_primitives::{Bytes, B256, U256}; use reth_codecs_derive::add_arbitrary_tests; use serde::{Deserialize, Serialize}; -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; - /// `GenesisAccount` acts as bridge which simplifies Compact implementation for /// `AlloyGenesisAccount`. /// diff --git a/crates/storage/codecs/src/alloy/log.rs b/crates/storage/codecs/src/alloy/log.rs index 2a3aa07b70..7f0a0e3625 100644 --- a/crates/storage/codecs/src/alloy/log.rs +++ b/crates/storage/codecs/src/alloy/log.rs @@ -1,12 +1,10 @@ //! Native Compact codec impl for primitive alloy log types. use crate::Compact; +use alloc::vec::Vec; use alloy_primitives::{Address, Bytes, Log, LogData}; use bytes::BufMut; -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; - /// Implement `Compact` for `LogData` and `Log`. impl Compact for LogData { fn to_compact(&self, buf: &mut B) -> usize diff --git a/crates/storage/codecs/src/alloy/mod.rs b/crates/storage/codecs/src/alloy/mod.rs index 41a5ba20e2..efd76b1075 100644 --- a/crates/storage/codecs/src/alloy/mod.rs +++ b/crates/storage/codecs/src/alloy/mod.rs @@ -3,6 +3,7 @@ mod authorization_list; mod genesis_account; mod log; mod request; +mod transaction; mod trie; mod txkind; mod withdrawal; diff --git a/crates/storage/codecs/src/alloy/transaction/eip1559.rs b/crates/storage/codecs/src/alloy/transaction/eip1559.rs new file mode 100644 index 0000000000..a088949258 --- /dev/null +++ b/crates/storage/codecs/src/alloy/transaction/eip1559.rs @@ -0,0 +1,67 @@ +use crate::Compact; +use alloy_consensus::TxEip1559 as AlloyTxEip1559; +use alloy_eips::eip2930::AccessList; +use alloy_primitives::{Bytes, ChainId, TxKind, U256}; +use serde::{Deserialize, Serialize}; + +/// [EIP-1559 Transaction](https://eips.ethereum.org/EIPS/eip-1559) +/// +/// This is a helper type to use derive on it instead of manually managing `bitfield`. +/// +/// By deriving `Compact` here, any future changes or enhancements to the `Compact` derive +/// will automatically apply to this type. +/// +/// Notice: Make sure this struct is 1:1 with [`alloy_consensus::transaction::TxEip1559`] +#[derive(Debug, Clone, PartialEq, Eq, Hash, Compact, Default, Serialize, Deserialize)] +#[cfg_attr(test, derive(arbitrary::Arbitrary))] +#[cfg_attr(test, crate::add_arbitrary_tests(compact))] +pub(crate) struct TxEip1559 { + chain_id: ChainId, + nonce: u64, + gas_limit: u64, + max_fee_per_gas: u128, + max_priority_fee_per_gas: u128, + to: TxKind, + value: U256, + access_list: AccessList, + input: Bytes, +} + +impl Compact for AlloyTxEip1559 { + fn to_compact(&self, buf: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + let tx = TxEip1559 { + chain_id: self.chain_id, + nonce: self.nonce, + gas_limit: self.gas_limit as u64, + max_fee_per_gas: self.max_fee_per_gas, + max_priority_fee_per_gas: self.max_priority_fee_per_gas, + to: self.to, + value: self.value, + access_list: self.access_list.clone(), + input: self.input.clone(), + }; + + tx.to_compact(buf) + } + + fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { + let (tx, _) = TxEip1559::from_compact(buf, len); + + let alloy_tx = Self { + chain_id: tx.chain_id, + nonce: tx.nonce, + gas_limit: tx.gas_limit.into(), + max_fee_per_gas: tx.max_fee_per_gas, + max_priority_fee_per_gas: tx.max_priority_fee_per_gas, + to: tx.to, + value: tx.value, + access_list: tx.access_list, + input: tx.input, + }; + + (alloy_tx, buf) + } +} diff --git a/crates/storage/codecs/src/alloy/transaction/eip2930.rs b/crates/storage/codecs/src/alloy/transaction/eip2930.rs new file mode 100644 index 0000000000..33b58dfff7 --- /dev/null +++ b/crates/storage/codecs/src/alloy/transaction/eip2930.rs @@ -0,0 +1,62 @@ +use crate::Compact; +use alloy_consensus::transaction::TxEip2930 as AlloyTxEip2930; +use alloy_eips::eip2930::AccessList; +use alloy_primitives::{Bytes, ChainId, TxKind, U256}; +use reth_codecs_derive::add_arbitrary_tests; +use serde::{Deserialize, Serialize}; + +/// Transaction with an [`AccessList`] ([EIP-2930](https://eips.ethereum.org/EIPS/eip-2930)). +/// +/// This is a helper type to use derive on it instead of manually managing `bitfield`. +/// +/// By deriving `Compact` here, any future changes or enhancements to the `Compact` derive +/// will automatically apply to this type. +/// +/// Notice: Make sure this struct is 1:1 with [`alloy_consensus::transaction::TxEip2930`] +#[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Serialize, Deserialize, Compact)] +#[cfg_attr(test, derive(arbitrary::Arbitrary))] +#[add_arbitrary_tests(compact)] +pub(crate) struct TxEip2930 { + chain_id: ChainId, + nonce: u64, + gas_price: u128, + gas_limit: u64, + to: TxKind, + value: U256, + access_list: AccessList, + input: Bytes, +} + +impl Compact for AlloyTxEip2930 { + fn to_compact(&self, buf: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + let tx = TxEip2930 { + chain_id: self.chain_id, + nonce: self.nonce, + gas_price: self.gas_price, + gas_limit: self.gas_limit as u64, + to: self.to, + value: self.value, + access_list: self.access_list.clone(), + input: self.input.clone(), + }; + tx.to_compact(buf) + } + + fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { + let (tx, _) = TxEip2930::from_compact(buf, len); + let alloy_tx = Self { + chain_id: tx.chain_id, + nonce: tx.nonce, + gas_price: tx.gas_price, + gas_limit: tx.gas_limit as u128, + to: tx.to, + value: tx.value, + access_list: tx.access_list, + input: tx.input, + }; + (alloy_tx, buf) + } +} diff --git a/crates/storage/codecs/src/alloy/transaction/eip4844.rs b/crates/storage/codecs/src/alloy/transaction/eip4844.rs new file mode 100644 index 0000000000..15a5f443c4 --- /dev/null +++ b/crates/storage/codecs/src/alloy/transaction/eip4844.rs @@ -0,0 +1,94 @@ +use crate::{Compact, CompactPlaceholder}; +use alloc::vec::Vec; +use alloy_consensus::transaction::TxEip4844 as AlloyTxEip4844; +use alloy_eips::eip2930::AccessList; +use alloy_primitives::{Address, Bytes, ChainId, B256, U256}; +use reth_codecs_derive::add_arbitrary_tests; +use serde::{Deserialize, Serialize}; + +/// [EIP-4844 Blob Transaction](https://eips.ethereum.org/EIPS/eip-4844#blob-transaction) +/// +/// This is a helper type to use derive on it instead of manually managing `bitfield`. +/// +/// By deriving `Compact` here, any future changes or enhancements to the `Compact` derive +/// will automatically apply to this type. +/// +/// Notice: Make sure this struct is 1:1 with [`alloy_consensus::transaction::TxEip4844`] +#[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Serialize, Deserialize, Compact)] +#[cfg_attr(test, derive(arbitrary::Arbitrary))] +#[add_arbitrary_tests(compact)] +pub(crate) struct TxEip4844 { + chain_id: ChainId, + nonce: u64, + gas_limit: u64, + max_fee_per_gas: u128, + max_priority_fee_per_gas: u128, + /// TODO(debt): this should be removed if we break the DB. + /// Makes sure that the Compact bitflag struct has one bit after the above field: + /// + placeholder: Option, + to: Address, + value: U256, + access_list: AccessList, + blob_versioned_hashes: Vec, + max_fee_per_blob_gas: u128, + input: Bytes, +} + +impl Compact for AlloyTxEip4844 { + fn to_compact(&self, buf: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + let tx = TxEip4844 { + chain_id: self.chain_id, + nonce: self.nonce, + gas_limit: self.gas_limit as u64, + max_fee_per_gas: self.max_fee_per_gas, + max_priority_fee_per_gas: self.max_priority_fee_per_gas, + placeholder: Some(()), + to: self.to, + value: self.value, + access_list: self.access_list.clone(), + blob_versioned_hashes: self.blob_versioned_hashes.clone(), + max_fee_per_blob_gas: self.max_fee_per_blob_gas, + input: self.input.clone(), + }; + tx.to_compact(buf) + } + + fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { + let (tx, _) = TxEip4844::from_compact(buf, len); + let alloy_tx = Self { + chain_id: tx.chain_id, + nonce: tx.nonce, + gas_limit: tx.gas_limit as u128, + max_fee_per_gas: tx.max_fee_per_gas, + max_priority_fee_per_gas: tx.max_priority_fee_per_gas, + to: tx.to, + value: tx.value, + access_list: tx.access_list, + blob_versioned_hashes: tx.blob_versioned_hashes, + max_fee_per_blob_gas: tx.max_fee_per_blob_gas, + input: tx.input, + }; + (alloy_tx, buf) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::{address, bytes}; + + #[test] + fn backwards_compatible_txkind_test() { + // TxEip4844 encoded with TxKind on to field + // holesky tx hash: <0xa3b1668225bf0fbfdd6c19aa6fd071fa4ff5d09a607c67ccd458b97735f745ac> + let tx = bytes!("224348a100426844cb2dc6c0b2d05e003b9aca0079c9109b764609df928d16fc4a91e9081f7e87db09310001019101fb28118ceccaabca22a47e35b9c3f12eb2dcb25e5c543d5b75e6cd841f0a05328d26ef16e8450000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000052000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000000000200000000000000000000000007b399987d24fc5951f3e94a4cb16e87414bf22290000000000000000000000001670090000000000000000000000000000010001302e31382e302d64657600000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000420000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000009e640a6aadf4f664cf467b795c31332f44acbe6c000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000002c00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006614c2d1000000000000000000000000000000000000000000000000000000000014012c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001e000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000064000000000000000000000000000000000000000000000000000000000000093100000000000000000000000000000000000000000000000000000000000000c8000000000000000000000000000000000000000000000000000000000000093100000000000000000000000000000000000000000000000000000000000003e800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000041f06fd78f4dcdf089263524731620941747b9b93fd8f631557e25b23845a78b685bd82f9d36bce2f4cc812b6e5191df52479d349089461ffe76e9f2fa2848a0fe1b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000410819f04aba17677807c61ae72afdddf7737f26931ecfa8af05b7c669808b36a2587e32c90bb0ed2100266dd7797c80121a109a2b0fe941ca5a580e438988cac81c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"); + let (tx, _) = TxEip4844::from_compact(&tx, tx.len()); + assert_eq!(tx.to, address!("79C9109b764609df928d16fC4a91e9081F7e87DB")); + assert_eq!(tx.placeholder, Some(())); + assert_eq!(tx.input, bytes!("ef16e8450000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000052000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000000000200000000000000000000000007b399987d24fc5951f3e94a4cb16e87414bf22290000000000000000000000001670090000000000000000000000000000010001302e31382e302d64657600000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000420000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000009e640a6aadf4f664cf467b795c31332f44acbe6c000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000002c00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006614c2d1000000000000000000000000000000000000000000000000000000000014012c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001e000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000064000000000000000000000000000000000000000000000000000000000000093100000000000000000000000000000000000000000000000000000000000000c8000000000000000000000000000000000000000000000000000000000000093100000000000000000000000000000000000000000000000000000000000003e800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000041f06fd78f4dcdf089263524731620941747b9b93fd8f631557e25b23845a78b685bd82f9d36bce2f4cc812b6e5191df52479d349089461ffe76e9f2fa2848a0fe1b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000410819f04aba17677807c61ae72afdddf7737f26931ecfa8af05b7c669808b36a2587e32c90bb0ed2100266dd7797c80121a109a2b0fe941ca5a580e438988cac81c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")); + } +} diff --git a/crates/storage/codecs/src/alloy/transaction/eip7702.rs b/crates/storage/codecs/src/alloy/transaction/eip7702.rs new file mode 100644 index 0000000000..a44e97ee1d --- /dev/null +++ b/crates/storage/codecs/src/alloy/transaction/eip7702.rs @@ -0,0 +1,69 @@ +use crate::Compact; +use alloc::vec::Vec; +use alloy_consensus::transaction::TxEip7702 as AlloyTxEip7702; +use alloy_eips::{eip2930::AccessList, eip7702::SignedAuthorization}; +use alloy_primitives::{Address, Bytes, ChainId, U256}; +use reth_codecs_derive::add_arbitrary_tests; +use serde::{Deserialize, Serialize}; + +/// [EIP-7702 Set Code Transaction](https://eips.ethereum.org/EIPS/eip-7702) +/// +/// This is a helper type to use derive on it instead of manually managing `bitfield`. +/// +/// By deriving `Compact` here, any future changes or enhancements to the `Compact` derive +/// will automatically apply to this type. +/// +/// Notice: Make sure this struct is 1:1 with [`alloy_consensus::transaction::TxEip7702`] +#[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Serialize, Deserialize, Compact)] +#[cfg_attr(test, derive(arbitrary::Arbitrary))] +#[add_arbitrary_tests(compact)] +pub(crate) struct TxEip7702 { + chain_id: ChainId, + nonce: u64, + gas_limit: u64, + max_fee_per_gas: u128, + max_priority_fee_per_gas: u128, + to: Address, + value: U256, + access_list: AccessList, + authorization_list: Vec, + input: Bytes, +} + +impl Compact for AlloyTxEip7702 { + fn to_compact(&self, buf: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + let tx = TxEip7702 { + chain_id: self.chain_id, + nonce: self.nonce, + max_fee_per_gas: self.max_fee_per_gas, + max_priority_fee_per_gas: self.max_priority_fee_per_gas, + gas_limit: self.gas_limit as u64, + to: self.to, + value: self.value, + input: self.input.clone(), + access_list: self.access_list.clone(), + authorization_list: self.authorization_list.clone(), + }; + tx.to_compact(buf) + } + + fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { + let (tx, _) = TxEip7702::from_compact(buf, len); + let alloy_tx = Self { + chain_id: tx.chain_id, + nonce: tx.nonce, + max_fee_per_gas: tx.max_fee_per_gas, + max_priority_fee_per_gas: tx.max_priority_fee_per_gas, + gas_limit: tx.gas_limit as u128, + to: tx.to, + value: tx.value, + input: tx.input, + access_list: tx.access_list, + authorization_list: tx.authorization_list, + }; + (alloy_tx, buf) + } +} diff --git a/crates/storage/codecs/src/alloy/transaction/legacy.rs b/crates/storage/codecs/src/alloy/transaction/legacy.rs new file mode 100644 index 0000000000..641b27bf53 --- /dev/null +++ b/crates/storage/codecs/src/alloy/transaction/legacy.rs @@ -0,0 +1,78 @@ +use crate::Compact; +use alloy_consensus::TxLegacy as AlloyTxLegacy; +use alloy_primitives::{Bytes, ChainId, TxKind, U256}; +use serde::{Deserialize, Serialize}; + +/// Legacy transaction. +#[derive(Debug, Clone, PartialEq, Eq, Default, Compact, Serialize, Deserialize)] +#[cfg_attr(test, derive(arbitrary::Arbitrary))] +#[cfg_attr(test, crate::add_arbitrary_tests(compact))] +pub(crate) struct TxLegacy { + /// Added as EIP-155: Simple replay attack protection + chain_id: Option, + /// A scalar value equal to the number of transactions sent by the sender; formally Tn. + nonce: u64, + /// A scalar value equal to the number of + /// Wei to be paid per unit of gas for all computation + /// costs incurred as a result of the execution of this transaction; formally Tp. + /// + /// As ethereum circulation is around 120mil eth as of 2022 that is around + /// 120000000000000000000000000 wei we are safe to use u128 as its max number is: + /// 340282366920938463463374607431768211455 + gas_price: u128, + /// A scalar value equal to the maximum + /// amount of gas that should be used in executing + /// this transaction. This is paid up-front, before any + /// computation is done and may not be increased + /// later; formally Tg. + gas_limit: u64, + /// The 160-bit address of the message call’s recipient or, for a contract creation + /// transaction, ∅, used here to denote the only member of B0 ; formally Tt. + to: TxKind, + /// A scalar value equal to the number of Wei to + /// be transferred to the message call’s recipient or, + /// in the case of contract creation, as an endowment + /// to the newly created account; formally Tv. + value: U256, + /// Input has two uses depending if transaction is Create or Call (if `to` field is None or + /// Some). pub init: An unlimited size byte array specifying the + /// EVM-code for the account initialisation procedure CREATE, + /// data: An unlimited size byte array specifying the + /// input data of the message call, formally Td. + input: Bytes, +} + +impl Compact for AlloyTxLegacy { + fn to_compact(&self, buf: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + let tx = TxLegacy { + chain_id: self.chain_id, + nonce: self.nonce, + gas_price: self.gas_price, + gas_limit: self.gas_limit as u64, + to: self.to, + value: self.value, + input: self.input.clone(), + }; + + tx.to_compact(buf) + } + + fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { + let (tx, _) = TxLegacy::from_compact(buf, len); + + let alloy_tx = Self { + chain_id: tx.chain_id, + nonce: tx.nonce, + gas_price: tx.gas_price, + gas_limit: tx.gas_limit.into(), + to: tx.to, + value: tx.value, + input: tx.input, + }; + + (alloy_tx, buf) + } +} diff --git a/crates/storage/codecs/src/alloy/transaction/mod.rs b/crates/storage/codecs/src/alloy/transaction/mod.rs new file mode 100644 index 0000000000..717f918fb1 --- /dev/null +++ b/crates/storage/codecs/src/alloy/transaction/mod.rs @@ -0,0 +1,39 @@ +mod eip1559; +mod eip2930; +mod eip4844; +mod eip7702; +mod legacy; +#[cfg(feature = "optimism")] +mod optimism; + +#[cfg(test)] +mod tests { + + // each value in the database has an extra field named flags that encodes metadata about other + // fields in the value, e.g. offset and length. + // + // this check is to ensure we do not inadvertently add too many fields to a struct which would + // expand the flags field and break backwards compatibility + + #[cfg(feature = "optimism")] + use crate::alloy::transaction::optimism::TxDeposit; + use crate::alloy::transaction::{ + eip1559::TxEip1559, eip2930::TxEip2930, eip4844::TxEip4844, eip7702::TxEip7702, + legacy::TxLegacy, + }; + + #[test] + fn test_ensure_backwards_compatibility() { + assert_eq!(TxEip4844::bitflag_encoded_bytes(), 5); + assert_eq!(TxLegacy::bitflag_encoded_bytes(), 3); + assert_eq!(TxEip1559::bitflag_encoded_bytes(), 4); + assert_eq!(TxEip2930::bitflag_encoded_bytes(), 3); + assert_eq!(TxEip7702::bitflag_encoded_bytes(), 4); + } + + #[cfg(feature = "optimism")] + #[test] + fn test_ensure_backwards_compatibility_optimism() { + assert_eq!(TxDeposit::bitflag_encoded_bytes(), 2); + } +} diff --git a/crates/storage/codecs/src/alloy/transaction/optimism.rs b/crates/storage/codecs/src/alloy/transaction/optimism.rs new file mode 100644 index 0000000000..c84b19559f --- /dev/null +++ b/crates/storage/codecs/src/alloy/transaction/optimism.rs @@ -0,0 +1,61 @@ +use crate::Compact; +use alloy_primitives::{Address, Bytes, TxKind, B256, U256}; +use op_alloy_consensus::TxDeposit as AlloyTxDeposit; +use reth_codecs_derive::add_arbitrary_tests; +use serde::{Deserialize, Serialize}; + +/// Deposit transactions, also known as deposits are initiated on L1, and executed on L2. +/// +/// This is a helper type to use derive on it instead of manually managing `bitfield`. +/// +/// By deriving `Compact` here, any future changes or enhancements to the `Compact` derive +/// will automatically apply to this type. +/// +/// Notice: Make sure this struct is 1:1 with [`op_alloy_consensus::TxDeposit`] +#[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Serialize, Deserialize, Compact)] +#[cfg_attr(test, derive(arbitrary::Arbitrary))] +#[add_arbitrary_tests(compact)] +pub(crate) struct TxDeposit { + source_hash: B256, + from: Address, + to: TxKind, + mint: Option, + value: U256, + gas_limit: u64, + is_system_transaction: bool, + input: Bytes, +} + +impl Compact for AlloyTxDeposit { + fn to_compact(&self, buf: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + let tx = TxDeposit { + source_hash: self.source_hash, + from: self.from, + to: self.to, + mint: self.mint, + value: self.value, + gas_limit: self.gas_limit as u64, + is_system_transaction: self.is_system_transaction, + input: self.input.clone(), + }; + tx.to_compact(buf) + } + + fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { + let (tx, _) = TxDeposit::from_compact(buf, len); + let alloy_tx = Self { + source_hash: tx.source_hash, + from: tx.from, + to: tx.to, + mint: tx.mint, + value: tx.value, + gas_limit: tx.gas_limit as u128, + is_system_transaction: tx.is_system_transaction, + input: tx.input, + }; + (alloy_tx, buf) + } +} diff --git a/crates/storage/codecs/src/alloy/trie.rs b/crates/storage/codecs/src/alloy/trie.rs index 73e82af63a..c89ef0bf6e 100644 --- a/crates/storage/codecs/src/alloy/trie.rs +++ b/crates/storage/codecs/src/alloy/trie.rs @@ -1,13 +1,11 @@ //! Native Compact codec impl for EIP-7685 requests. use crate::Compact; +use alloc::vec::Vec; use alloy_primitives::B256; use alloy_trie::{hash_builder::HashBuilderValue, BranchNodeCompact, TrieMask}; use bytes::{Buf, BufMut}; -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; - /// Identifier for [`HashBuilderValue::Hash`] const HASH_BUILDER_TYPE_HASH: u8 = 0; diff --git a/crates/storage/codecs/src/lib.rs b/crates/storage/codecs/src/lib.rs index 4419dbee2a..a3d8070e6e 100644 --- a/crates/storage/codecs/src/lib.rs +++ b/crates/storage/codecs/src/lib.rs @@ -22,9 +22,7 @@ pub use reth_codecs_derive::*; use alloy_primitives::{Address, Bloom, Bytes, FixedBytes, U256}; use bytes::{Buf, BufMut}; -#[cfg(not(feature = "std"))] extern crate alloc; -#[cfg(not(feature = "std"))] use alloc::vec::Vec; #[cfg(any(test, feature = "alloy"))] @@ -489,7 +487,7 @@ mod tests { fn compact_bytes() { let arr = [1, 2, 3, 4, 5]; let list = Bytes::copy_from_slice(&arr); - let mut buf = vec![]; + let mut buf = Vec::with_capacity(list.len() + 1); assert_eq!(list.to_compact(&mut buf), list.len()); // Add some noise data. @@ -514,7 +512,7 @@ mod tests { #[test] fn compact_b256() { - let mut buf = vec![]; + let mut buf = Vec::with_capacity(32 + 1); assert_eq!(B256::ZERO.to_compact(&mut buf), 32); assert_eq!(buf, vec![0; 32]); @@ -547,7 +545,7 @@ mod tests { #[test] fn compact_option() { let opt = Some(B256::ZERO); - let mut buf = vec![]; + let mut buf = Vec::with_capacity(1 + 32); assert_eq!(None::.to_compact(&mut buf), 0); assert_eq!(opt.to_compact(&mut buf), 1); @@ -558,7 +556,7 @@ mod tests { // If `None`, it returns the slice at the same cursor position. assert_eq!(Option::::from_compact(&buf, 0), (None, buf.as_slice())); - let mut buf = vec![]; + let mut buf = Vec::with_capacity(32); assert_eq!(opt.specialized_to_compact(&mut buf), 1); assert_eq!(buf.len(), 32); assert_eq!(Option::::specialized_from_compact(&buf, 1), (opt, vec![].as_slice())); @@ -607,7 +605,7 @@ mod tests { assert_eq!(buf, vec![2u8]); assert_eq!(u64::from_compact(&buf, 1), (2u64, vec![].as_slice())); - let mut buf = vec![]; + let mut buf = Vec::with_capacity(8); assert_eq!(0xffffffffffffffffu64.to_compact(&mut buf), 8); assert_eq!(&buf, &[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff]); @@ -685,18 +683,16 @@ mod tests { #[test] fn compact_test_struct() { let test = TestStruct::default(); - let mut buf = vec![]; - assert_eq!( - test.to_compact(&mut buf), - 2 + // TestStructFlags + const EXPECTED_SIZE: usize = 2 + // TestStructFlags 1 + 1 + // 0 + 0 + 0 + 32 + 1 + 2 + 1 + - 1 + 20 * 2 - ); + 1 + 20 * 2; + let mut buf = Vec::with_capacity(EXPECTED_SIZE); + assert_eq!(test.to_compact(&mut buf), EXPECTED_SIZE); assert_eq!( TestStruct::from_compact(&buf, buf.len()), diff --git a/crates/storage/db-api/Cargo.toml b/crates/storage/db-api/Cargo.toml index 6163aa4351..c6561360d3 100644 --- a/crates/storage/db-api/Cargo.toml +++ b/crates/storage/db-api/Cargo.toml @@ -22,6 +22,9 @@ reth-stages-types.workspace = true reth-storage-errors.workspace = true reth-trie-common.workspace = true +# ethereum +alloy-primitives.workspace = true + # codecs modular-bitfield.workspace = true parity-scale-codec = { version = "3.2.1", features = ["bytes"] } @@ -48,21 +51,10 @@ rand.workspace = true test-fuzz.workspace = true -pprof = { workspace = true, features = [ - "flamegraph", - "frame-pointer", - "criterion", -] } -criterion.workspace = true -iai-callgrind.workspace = true - arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true proptest-arbitrary-interop.workspace = true - -assert_matches.workspace = true - [features] test-utils = ["arbitrary"] arbitrary = [ diff --git a/crates/storage/db-api/src/cursor.rs b/crates/storage/db-api/src/cursor.rs index bada512831..134819a8e2 100644 --- a/crates/storage/db-api/src/cursor.rs +++ b/crates/storage/db-api/src/cursor.rs @@ -132,10 +132,6 @@ pub trait DbDupCursorRW { } /// Provides an iterator to `Cursor` when handling `Table`. -/// -/// Reason why we have two lifetimes is to distinguish between `'cursor` lifetime -/// and inherited `'tx` lifetime. If there is only one, rust would short circle -/// the Cursor lifetime and it wouldn't be possible to use Walker. pub struct Walker<'cursor, T: Table, CURSOR: DbCursorRO> { /// Cursor to be used to walk through the table. cursor: &'cursor mut CURSOR, diff --git a/crates/storage/db-api/src/database.rs b/crates/storage/db-api/src/database.rs index 22c61284b4..df7c3a5678 100644 --- a/crates/storage/db-api/src/database.rs +++ b/crates/storage/db-api/src/database.rs @@ -8,7 +8,7 @@ use std::{fmt::Debug, sync::Arc}; /// Main Database trait that can open read-only and read-write transactions. /// /// Sealed trait which cannot be implemented by 3rd parties, exposed only for consumption. -pub trait Database: Send + Sync { +pub trait Database: Send + Sync + Debug { /// Read-Only database transaction type TX: DbTx + Send + Sync + Debug + 'static; /// Read-Write database transaction diff --git a/crates/storage/db-api/src/models/accounts.rs b/crates/storage/db-api/src/models/accounts.rs index b71864c2f8..338a3a06f6 100644 --- a/crates/storage/db-api/src/models/accounts.rs +++ b/crates/storage/db-api/src/models/accounts.rs @@ -7,7 +7,7 @@ use crate::{ table::{Decode, Encode}, DatabaseError, }; -use reth_primitives::{Address, BlockNumber, StorageKey}; +use alloy_primitives::{Address, BlockNumber, StorageKey}; use serde::{Deserialize, Serialize}; /// [`BlockNumber`] concatenated with [`Address`]. diff --git a/crates/storage/db-api/src/models/blocks.rs b/crates/storage/db-api/src/models/blocks.rs index 052254be78..b48baf6d6b 100644 --- a/crates/storage/db-api/src/models/blocks.rs +++ b/crates/storage/db-api/src/models/blocks.rs @@ -1,7 +1,8 @@ //! Block related models and types. +use alloy_primitives::B256; use reth_codecs::{add_arbitrary_tests, Compact}; -use reth_primitives::{Header, Withdrawals, B256}; +use reth_primitives::Header; use serde::{Deserialize, Serialize}; /// The storage representation of a block's ommers. @@ -15,15 +16,6 @@ pub struct StoredBlockOmmers { pub ommers: Vec
, } -/// The storage representation of block withdrawals. -#[derive(Debug, Default, Eq, PartialEq, Clone, Serialize, Deserialize, Compact)] -#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] -#[add_arbitrary_tests(compact)] -pub struct StoredBlockWithdrawals { - /// The block withdrawals. - pub withdrawals: Withdrawals, -} - /// Hash of the block header. pub type HeaderHash = B256; diff --git a/crates/storage/db-api/src/models/mod.rs b/crates/storage/db-api/src/models/mod.rs index 743bbe6bad..b8c1a6f66c 100644 --- a/crates/storage/db-api/src/models/mod.rs +++ b/crates/storage/db-api/src/models/mod.rs @@ -4,9 +4,12 @@ use crate::{ table::{Compress, Decode, Decompress, Encode}, DatabaseError, }; - +use alloy_primitives::{Address, Log, B256, U256}; use reth_codecs::{add_arbitrary_tests, Compact}; -use reth_primitives::{parlia::Snapshot, Address, B256, *}; +use reth_primitives::{ + parlia::Snapshot, Account, BlobSidecar, BlobSidecars, BufMut, Bytecode, GenesisAccount, Header, + Receipt, Requests, SealedHeader, StorageEntry, TransactionSignedNoHash, TxType, +}; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::StageCheckpoint; use reth_trie_common::{StoredNibbles, StoredNibblesSubKey, *}; @@ -14,15 +17,15 @@ use serde::{Deserialize, Serialize}; pub mod accounts; pub mod blocks; -pub mod client_version; pub mod integer_list; pub mod sharded_key; pub mod storage_sharded_key; pub use accounts::*; pub use blocks::*; -pub use client_version::ClientVersion; -pub use reth_db_models::{AccountBeforeTx, StoredBlockBodyIndices}; +pub use reth_db_models::{ + AccountBeforeTx, ClientVersion, StoredBlockBodyIndices, StoredBlockWithdrawals, +}; pub use sharded_key::ShardedKey; /// Macro that implements [`Encode`] and [`Decode`] for uint types. @@ -326,8 +329,7 @@ mod tests { use rand::Rng; use reth_primitives::{ parlia::{ValidatorInfo, VoteAddress, VoteData, DEFAULT_TURN_LENGTH}, - Account, Header, Receipt, ReceiptWithBloom, SealedHeader, TxEip1559, TxEip2930, TxEip4844, - TxLegacy, Withdrawals, + Account, Header, Receipt, ReceiptWithBloom, SealedHeader, Withdrawals, }; use reth_prune_types::{PruneCheckpoint, PruneMode, PruneSegment}; use reth_stages_types::{ @@ -368,10 +370,6 @@ mod tests { assert_eq!(StoredBlockOmmers::bitflag_encoded_bytes(), 0); assert_eq!(StoredBlockWithdrawals::bitflag_encoded_bytes(), 0); assert_eq!(StorageHashingCheckpoint::bitflag_encoded_bytes(), 1); - assert_eq!(TxEip1559::bitflag_encoded_bytes(), 4); - assert_eq!(TxEip2930::bitflag_encoded_bytes(), 3); - assert_eq!(TxEip4844::bitflag_encoded_bytes(), 5); - assert_eq!(TxLegacy::bitflag_encoded_bytes(), 3); assert_eq!(Withdrawals::bitflag_encoded_bytes(), 0); } @@ -401,10 +399,6 @@ mod tests { assert_eq!(StoredBlockOmmers::bitflag_encoded_bytes(), 0); assert_eq!(StoredBlockWithdrawals::bitflag_encoded_bytes(), 0); assert_eq!(StorageHashingCheckpoint::bitflag_encoded_bytes(), 1); - assert_eq!(TxEip1559::bitflag_encoded_bytes(), 4); - assert_eq!(TxEip2930::bitflag_encoded_bytes(), 3); - assert_eq!(TxEip4844::bitflag_encoded_bytes(), 5); - assert_eq!(TxLegacy::bitflag_encoded_bytes(), 3); assert_eq!(Withdrawals::bitflag_encoded_bytes(), 0); } diff --git a/crates/storage/db-api/src/models/sharded_key.rs b/crates/storage/db-api/src/models/sharded_key.rs index feba230a60..dd8702a481 100644 --- a/crates/storage/db-api/src/models/sharded_key.rs +++ b/crates/storage/db-api/src/models/sharded_key.rs @@ -3,7 +3,7 @@ use crate::{ table::{Decode, Encode}, DatabaseError, }; -use reth_primitives::BlockNumber; +use alloy_primitives::BlockNumber; use serde::{Deserialize, Serialize}; use std::hash::Hash; diff --git a/crates/storage/db-api/src/models/storage_sharded_key.rs b/crates/storage/db-api/src/models/storage_sharded_key.rs index 04243808f7..b6538256e6 100644 --- a/crates/storage/db-api/src/models/storage_sharded_key.rs +++ b/crates/storage/db-api/src/models/storage_sharded_key.rs @@ -3,8 +3,8 @@ use crate::{ table::{Decode, Encode}, DatabaseError, }; +use alloy_primitives::{Address, BlockNumber, B256}; use derive_more::AsRef; -use reth_primitives::{Address, BlockNumber, B256}; use serde::{Deserialize, Serialize}; use super::ShardedKey; diff --git a/crates/storage/db-api/src/scale.rs b/crates/storage/db-api/src/scale.rs index a837dadef9..99382a4a91 100644 --- a/crates/storage/db-api/src/scale.rs +++ b/crates/storage/db-api/src/scale.rs @@ -2,7 +2,7 @@ use crate::{ table::{Compress, Decompress}, DatabaseError, }; -use reth_primitives::*; +use alloy_primitives::U256; mod sealed { pub trait Sealed {} diff --git a/crates/storage/db-common/Cargo.toml b/crates/storage/db-common/Cargo.toml index 5c453df1cf..7fc4879698 100644 --- a/crates/storage/db-common/Cargo.toml +++ b/crates/storage/db-common/Cargo.toml @@ -21,9 +21,11 @@ reth-etl.workspace = true reth-codecs.workspace = true reth-stages-types.workspace = true reth-fs-util.workspace = true +reth-node-types.workspace = true # eth alloy-genesis.workspace = true +alloy-primitives.workspace = true # misc eyre.workspace = true @@ -39,6 +41,7 @@ tracing.workspace = true [dev-dependencies] reth-primitives-traits.workspace = true +reth-provider = { workspace = true, features = ["test-utils"] } [lints] workspace = true diff --git a/crates/storage/db-common/src/db_tool/mod.rs b/crates/storage/db-common/src/db_tool/mod.rs index 3884089b43..483e6c301e 100644 --- a/crates/storage/db-common/src/db_tool/mod.rs +++ b/crates/storage/db-common/src/db_tool/mod.rs @@ -12,28 +12,21 @@ use reth_db_api::{ DatabaseError, }; use reth_fs_util as fs; +use reth_node_types::NodeTypesWithDB; use reth_provider::{ChainSpecProvider, ProviderFactory}; use std::{path::Path, rc::Rc, sync::Arc}; use tracing::info; /// Wrapper over DB that implements many useful DB queries. #[derive(Debug)] -pub struct DbTool { +pub struct DbTool { /// The provider factory that the db tool will use. - pub provider_factory: ProviderFactory, + pub provider_factory: ProviderFactory, } -impl DbTool { - /// Takes a DB where the tables have already been created. - pub fn new(provider_factory: ProviderFactory) -> eyre::Result { - // Disable timeout because we are entering a TUI which might read for a long time. We - // disable on the [`DbTool`] level since it's only used in the CLI. - provider_factory.provider()?.disable_long_read_transaction_safety(); - Ok(Self { provider_factory }) - } - +impl DbTool { /// Get an [`Arc`] to the [`ChainSpec`]. - pub fn chain(&self) -> Arc { + pub fn chain(&self) -> Arc { self.provider_factory.chain_spec() } @@ -115,6 +108,16 @@ impl DbTool { Ok((data.map_err(|e: DatabaseError| eyre::eyre!(e))?, hits)) } +} + +impl> DbTool { + /// Takes a DB where the tables have already been created. + pub fn new(provider_factory: ProviderFactory) -> eyre::Result { + // Disable timeout because we are entering a TUI which might read for a long time. We + // disable on the [`DbTool`] level since it's only used in the CLI. + provider_factory.provider()?.disable_long_read_transaction_safety(); + Ok(Self { provider_factory }) + } /// Grabs the content of the table for the given key pub fn get(&self, key: T::Key) -> Result> { diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index bb474bb7ea..d7c06aaa61 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -1,33 +1,28 @@ //! Reth genesis initialization utility functions. use alloy_genesis::GenesisAccount; +use alloy_primitives::{Address, B256, U256}; use reth_chainspec::ChainSpec; use reth_codecs::Compact; use reth_config::config::EtlConfig; use reth_db::tables; -use reth_db_api::{database::Database, transaction::DbTxMut, DatabaseError}; +use reth_db_api::{transaction::DbTxMut, DatabaseError}; use reth_etl::Collector; -use reth_primitives::{ - Account, Address, Bytecode, Receipts, StaticFileSegment, StorageEntry, B256, U256, -}; +use reth_primitives::{Account, Bytecode, GotExpected, Receipts, StaticFileSegment, StorageEntry}; use reth_provider::{ errors::provider::ProviderResult, providers::{StaticFileProvider, StaticFileWriter}, writer::UnifiedStorageWriter, - BlockHashReader, BlockNumReader, BundleStateInit, ChainSpecProvider, DatabaseProviderRW, - ExecutionOutcome, HashingWriter, HistoryWriter, OriginalValuesKnown, ProviderError, - ProviderFactory, RevertsInit, StageCheckpointWriter, StateWriter, StaticFileProviderFactory, - TrieWriter, + BlockHashReader, BlockNumReader, BundleStateInit, ChainSpecProvider, DBProvider, + DatabaseProviderFactory, ExecutionOutcome, HashingWriter, HeaderProvider, HistoryWriter, + OriginalValuesKnown, ProviderError, RevertsInit, StageCheckpointWriter, StateChangeWriter, + StateWriter, StaticFileProviderFactory, TrieWriter, }; use reth_stages_types::{StageCheckpoint, StageId}; use reth_trie::{IntermediateStateRootState, StateRoot as StateRootComputer, StateRootProgress}; use reth_trie_db::DatabaseStateRoot; use serde::{Deserialize, Serialize}; -use std::{ - collections::{BTreeMap, HashMap}, - io::BufRead, - sync::Arc, -}; +use std::{collections::HashMap, io::BufRead}; use tracing::{debug, error, info, trace}; /// Default soft limit for number of bytes to read from state dump file, before inserting into @@ -62,16 +57,9 @@ pub enum InitDatabaseError { /// Provider error. #[error(transparent)] Provider(#[from] ProviderError), - /// Computed state root doesn't match state root in state dump file. - #[error( - "state root mismatch, state dump: {expected_state_root}, computed: {computed_state_root}" - )] - StateRootMismatch { - /// Expected state root. - expected_state_root: B256, - /// Actual state root. - computed_state_root: B256, - }, + /// State root doesn't match the expected one. + #[error("state root mismatch: {_0}")] + StateRootMismatch(GotExpected), } impl From for InitDatabaseError { @@ -81,7 +69,19 @@ impl From for InitDatabaseError { } /// Write the genesis block if it has not already been written -pub fn init_genesis(factory: ProviderFactory) -> Result { +pub fn init_genesis(factory: &PF) -> Result +where + PF: DatabaseProviderFactory + + StaticFileProviderFactory + + ChainSpecProvider + + BlockHashReader, + PF::ProviderRW: StageCheckpointWriter + + HistoryWriter + + HeaderProvider + + HashingWriter + + StateChangeWriter + + AsRef, +{ let chain = factory.chain_spec(); let genesis = chain.genesis(); @@ -109,15 +109,15 @@ pub fn init_genesis(factory: ProviderFactory) -> Result(factory: ProviderFactory) -> Result( - provider: &DatabaseProviderRW, - capacity: usize, +pub fn insert_genesis_state<'a, 'b, Provider>( + provider: &Provider, alloc: impl Iterator, -) -> ProviderResult<()> { - insert_state::(provider, capacity, alloc, 0) +) -> ProviderResult<()> +where + Provider: DBProvider + StateChangeWriter + HeaderProvider + AsRef, +{ + insert_state(provider, alloc, 0) } /// Inserts state at given block into database. -pub fn insert_state<'a, 'b, DB: Database>( - provider: &DatabaseProviderRW, - capacity: usize, +pub fn insert_state<'a, 'b, Provider>( + provider: &Provider, alloc: impl Iterator, block: u64, -) -> ProviderResult<()> { +) -> ProviderResult<()> +where + Provider: DBProvider + StateChangeWriter + HeaderProvider + AsRef, +{ + let capacity = alloc.size_hint().1.unwrap_or(0); let mut state_init: BundleStateInit = HashMap::with_capacity(capacity); let mut reverts_init = HashMap::with_capacity(capacity); let mut contracts: HashMap = HashMap::with_capacity(capacity); for (address, account) in alloc { let bytecode_hash = if let Some(code) = &account.code { - let bytecode = Bytecode::new_raw(code.clone()); - let hash = bytecode.hash_slow(); - contracts.insert(hash, bytecode); - Some(hash) + match Bytecode::new_raw_checked(code.clone()) { + Ok(bytecode) => { + let hash = bytecode.hash_slow(); + contracts.insert(hash, bytecode); + Some(hash) + } + Err(err) => { + error!(%address, %err, "Failed to decode genesis bytecode."); + return Err(DatabaseError::Other(err.to_string()).into()); + } + } } else { None }; @@ -205,13 +217,13 @@ pub fn insert_state<'a, 'b, DB: Database>( let execution_outcome = ExecutionOutcome::new_init( state_init, all_reverts_init, - contracts.into_iter().collect(), + contracts, Receipts::default(), block, Vec::new(), ); - let mut storage_writer = UnifiedStorageWriter::from_database(provider); + let mut storage_writer = UnifiedStorageWriter::from_database(&provider); storage_writer.write_to_storage(execution_outcome, OriginalValuesKnown::Yes)?; trace!(target: "reth::cli", "Inserted state"); @@ -220,10 +232,13 @@ pub fn insert_state<'a, 'b, DB: Database>( } /// Inserts hashes for the genesis state. -pub fn insert_genesis_hashes<'a, 'b, DB: Database>( - provider: &DatabaseProviderRW, +pub fn insert_genesis_hashes<'a, 'b, Provider>( + provider: &Provider, alloc: impl Iterator + Clone, -) -> ProviderResult<()> { +) -> ProviderResult<()> +where + Provider: DBProvider + HashingWriter, +{ // insert and hash accounts to hashing table let alloc_accounts = alloc.clone().map(|(addr, account)| (*addr, Some(Account::from(account)))); provider.insert_account_for_hashing(alloc_accounts)?; @@ -233,13 +248,7 @@ pub fn insert_genesis_hashes<'a, 'b, DB: Database>( let alloc_storage = alloc.filter_map(|(addr, account)| { // only return Some if there is storage account.storage.as_ref().map(|storage| { - ( - *addr, - storage - .clone() - .into_iter() - .map(|(key, value)| StorageEntry { key, value: value.into() }), - ) + (*addr, storage.iter().map(|(&key, &value)| StorageEntry { key, value: value.into() })) }) }); provider.insert_storage_for_hashing(alloc_storage)?; @@ -250,29 +259,33 @@ pub fn insert_genesis_hashes<'a, 'b, DB: Database>( } /// Inserts history indices for genesis accounts and storage. -pub fn insert_genesis_history<'a, 'b, DB: Database>( - provider: &DatabaseProviderRW, +pub fn insert_genesis_history<'a, 'b, Provider>( + provider: &Provider, alloc: impl Iterator + Clone, -) -> ProviderResult<()> { - insert_history::(provider, alloc, 0) +) -> ProviderResult<()> +where + Provider: DBProvider + HistoryWriter, +{ + insert_history(provider, alloc, 0) } /// Inserts history indices for genesis accounts and storage. -pub fn insert_history<'a, 'b, DB: Database>( - provider: &DatabaseProviderRW, +pub fn insert_history<'a, 'b, Provider>( + provider: &Provider, alloc: impl Iterator + Clone, block: u64, -) -> ProviderResult<()> { - let account_transitions = - alloc.clone().map(|(addr, _)| (*addr, vec![block])).collect::>(); +) -> ProviderResult<()> +where + Provider: DBProvider + HistoryWriter, +{ + let account_transitions = alloc.clone().map(|(addr, _)| (*addr, [block])); provider.insert_account_history_index(account_transitions)?; trace!(target: "reth::cli", "Inserted account history"); let storage_transitions = alloc .filter_map(|(addr, account)| account.storage.as_ref().map(|storage| (addr, storage))) - .flat_map(|(addr, storage)| storage.iter().map(|(key, _)| ((*addr, *key), vec![block]))) - .collect::>(); + .flat_map(|(addr, storage)| storage.iter().map(|(key, _)| ((*addr, *key), [block]))); provider.insert_storage_history_index(storage_transitions)?; trace!(target: "reth::cli", "Inserted storage history"); @@ -281,18 +294,21 @@ pub fn insert_history<'a, 'b, DB: Database>( } /// Inserts header for the genesis state. -pub fn insert_genesis_header( - provider: &DatabaseProviderRW, +pub fn insert_genesis_header( + provider: &Provider, static_file_provider: &StaticFileProvider, - chain: Arc, -) -> ProviderResult<()> { - let (header, block_hash) = chain.sealed_genesis_header().split(); + chain: &ChainSpec, +) -> ProviderResult<()> +where + Provider: DBProvider, +{ + let (header, block_hash) = (chain.genesis_header(), chain.genesis_hash()); match static_file_provider.block_hash(0) { Ok(None) | Err(ProviderError::MissingStaticFileBlock(StaticFileSegment::Headers, 0)) => { let (difficulty, hash) = (header.difficulty, block_hash); let mut writer = static_file_provider.latest_writer(StaticFileSegment::Headers)?; - writer.append_header(&header, difficulty, &hash)?; + writer.append_header(header, difficulty, &hash)?; // skip the zero block index let mut writer = static_file_provider.latest_writer(StaticFileSegment::Sidecars)?; @@ -314,32 +330,60 @@ pub fn insert_genesis_header( /// It's similar to [`init_genesis`] but supports importing state too big to fit in memory, and can /// be set to the highest block present. One practical usecase is to import OP mainnet state at /// bedrock transition block. -pub fn init_from_state_dump( +pub fn init_from_state_dump( mut reader: impl BufRead, - factory: ProviderFactory, + provider_rw: &Provider, etl_config: EtlConfig, -) -> eyre::Result { - let block = factory.last_block_number()?; - let hash = factory.block_hash(block)?.unwrap(); +) -> eyre::Result +where + Provider: DBProvider + + BlockNumReader + + BlockHashReader + + ChainSpecProvider + + StageCheckpointWriter + + HistoryWriter + + HeaderProvider + + HashingWriter + + StateChangeWriter + + TrieWriter + + AsRef, +{ + let block = provider_rw.last_block_number()?; + let hash = provider_rw.block_hash(block)?.unwrap(); + let expected_state_root = provider_rw + .header_by_number(block)? + .ok_or(ProviderError::HeaderNotFound(block.into()))? + .state_root; + + // first line can be state root + let dump_state_root = parse_state_root(&mut reader)?; + if expected_state_root != dump_state_root { + error!(target: "reth::cli", + ?dump_state_root, + ?expected_state_root, + "State root from state dump does not match state root in current header." + ); + return Err(InitDatabaseError::StateRootMismatch(GotExpected { + got: dump_state_root, + expected: expected_state_root, + }) + .into()) + } debug!(target: "reth::cli", block, - chain=%factory.chain_spec().chain, + chain=%provider_rw.chain_spec().chain, "Initializing state at block" ); - // first line can be state root, then it can be used for verifying against computed state root - let expected_state_root = parse_state_root(&mut reader)?; - // remaining lines are accounts let collector = parse_accounts(&mut reader, etl_config)?; // write state to db - let provider_rw = factory.provider_rw()?; - dump_state(collector, &provider_rw, block)?; + dump_state(collector, provider_rw, block)?; // compute and compare state root. this advances the stage checkpoints. - let computed_state_root = compute_state_root(&provider_rw)?; + let computed_state_root = compute_state_root(provider_rw)?; if computed_state_root == expected_state_root { info!(target: "reth::cli", ?computed_state_root, @@ -352,7 +396,11 @@ pub fn init_from_state_dump( "Computed state root does not match state root in state dump" ); - Err(InitDatabaseError::StateRootMismatch { expected_state_root, computed_state_root })? + return Err(InitDatabaseError::StateRootMismatch(GotExpected { + got: computed_state_root, + expected: expected_state_root, + }) + .into()) } // insert sync stages for stages that require state @@ -360,8 +408,6 @@ pub fn init_from_state_dump( provider_rw.save_stage_checkpoint(stage, StageCheckpoint::new(block))?; } - provider_rw.commit()?; - Ok(hash) } @@ -408,11 +454,19 @@ fn parse_accounts( } /// Takes a [`Collector`] and processes all accounts. -fn dump_state( +fn dump_state( mut collector: Collector, - provider_rw: &DatabaseProviderRW, + provider_rw: &Provider, block: u64, -) -> Result<(), eyre::Error> { +) -> Result<(), eyre::Error> +where + Provider: DBProvider + + HeaderProvider + + HashingWriter + + HistoryWriter + + StateChangeWriter + + AsRef, +{ let accounts_len = collector.len(); let mut accounts = Vec::with_capacity(AVERAGE_COUNT_ACCOUNTS_PER_GB_STATE_DUMP); let mut total_inserted_accounts = 0; @@ -447,9 +501,8 @@ fn dump_state( )?; // block is already written to static files - insert_state::( + insert_state( provider_rw, - accounts.len(), accounts.iter().map(|(address, account)| (address, account)), block, )?; @@ -462,7 +515,10 @@ fn dump_state( /// Computes the state root (from scratch) based on the accounts and storages present in the /// database. -fn compute_state_root(provider: &DatabaseProviderRW) -> eyre::Result { +fn compute_state_root(provider: &Provider) -> eyre::Result +where + Provider: DBProvider + TrieWriter, +{ trace!(target: "reth::cli", "Computing state root"); let tx = provider.tx_ref(); @@ -539,10 +595,15 @@ mod tests { models::{storage_sharded_key::StorageShardedKey, ShardedKey}, table::{Table, TableRow}, transaction::DbTx, + Database, }; use reth_primitives::{HOLESKY_GENESIS_HASH, MAINNET_GENESIS_HASH, SEPOLIA_GENESIS_HASH}; use reth_primitives_traits::IntegerList; - use reth_provider::test_utils::create_test_provider_factory_with_chain_spec; + use reth_provider::{ + test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB}, + ProviderFactory, + }; + use std::{collections::BTreeMap, sync::Arc}; fn collect_table_entries( tx: &::TX, @@ -557,7 +618,7 @@ mod tests { #[test] fn success_init_genesis_mainnet() { let genesis_hash = - init_genesis(create_test_provider_factory_with_chain_spec(MAINNET.clone())).unwrap(); + init_genesis(&create_test_provider_factory_with_chain_spec(MAINNET.clone())).unwrap(); // actual, expected assert_eq!(genesis_hash, MAINNET_GENESIS_HASH); @@ -566,7 +627,7 @@ mod tests { #[test] fn success_init_genesis_sepolia() { let genesis_hash = - init_genesis(create_test_provider_factory_with_chain_spec(SEPOLIA.clone())).unwrap(); + init_genesis(&create_test_provider_factory_with_chain_spec(SEPOLIA.clone())).unwrap(); // actual, expected assert_eq!(genesis_hash, SEPOLIA_GENESIS_HASH); @@ -575,7 +636,7 @@ mod tests { #[test] fn success_init_genesis_holesky() { let genesis_hash = - init_genesis(create_test_provider_factory_with_chain_spec(HOLESKY.clone())).unwrap(); + init_genesis(&create_test_provider_factory_with_chain_spec(HOLESKY.clone())).unwrap(); // actual, expected assert_eq!(genesis_hash, HOLESKY_GENESIS_HASH); @@ -585,10 +646,10 @@ mod tests { fn fail_init_inconsistent_db() { let factory = create_test_provider_factory_with_chain_spec(SEPOLIA.clone()); let static_file_provider = factory.static_file_provider(); - init_genesis(factory.clone()).unwrap(); + init_genesis(&factory).unwrap(); // Try to init db with a different genesis block - let genesis_hash = init_genesis(ProviderFactory::new( + let genesis_hash = init_genesis(&ProviderFactory::::new( factory.into_db(), MAINNET.clone(), static_file_provider, @@ -627,14 +688,14 @@ mod tests { ..Default::default() }, hardforks: Default::default(), - genesis_hash: None, + genesis_hash: Default::default(), paris_block_and_final_difficulty: None, deposit_contract: None, ..Default::default() }); let factory = create_test_provider_factory_with_chain_spec(chain_spec); - init_genesis(factory.clone()).unwrap(); + init_genesis(&factory).unwrap(); let provider = factory.provider().unwrap(); diff --git a/crates/storage/db-models/Cargo.toml b/crates/storage/db-models/Cargo.toml index 6f0ae3ffc0..9bcd54f386 100644 --- a/crates/storage/db-models/Cargo.toml +++ b/crates/storage/db-models/Cargo.toml @@ -16,6 +16,9 @@ workspace = true reth-codecs.workspace = true reth-primitives = { workspace = true, features = ["reth-codec"] } +# ethereum +alloy-primitives.workspace = true + # codecs modular-bitfield.workspace = true serde = { workspace = true, default-features = false } diff --git a/crates/storage/db-models/src/accounts.rs b/crates/storage/db-models/src/accounts.rs index bc84ea0fcc..74736247a6 100644 --- a/crates/storage/db-models/src/accounts.rs +++ b/crates/storage/db-models/src/accounts.rs @@ -1,7 +1,8 @@ use reth_codecs::{add_arbitrary_tests, Compact}; use serde::Serialize; -use reth_primitives::{Account, Address, Buf}; +use alloy_primitives::Address; +use reth_primitives::{Account, Buf}; /// Account as it is saved in the database. /// diff --git a/crates/storage/db-models/src/blocks.rs b/crates/storage/db-models/src/blocks.rs index c993e58dad..3e740a2e1a 100644 --- a/crates/storage/db-models/src/blocks.rs +++ b/crates/storage/db-models/src/blocks.rs @@ -1,7 +1,8 @@ use std::ops::Range; +use alloy_primitives::TxNumber; use reth_codecs::{add_arbitrary_tests, Compact}; -use reth_primitives::TxNumber; +use reth_primitives::Withdrawals; use serde::{Deserialize, Serialize}; /// Total number of transactions. @@ -66,6 +67,15 @@ impl StoredBlockBodyIndices { } } +/// The storage representation of block withdrawals. +#[derive(Debug, Default, Eq, PartialEq, Clone, Serialize, Deserialize, Compact)] +#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] +#[add_arbitrary_tests(compact)] +pub struct StoredBlockWithdrawals { + /// The block withdrawals. + pub withdrawals: Withdrawals, +} + #[cfg(test)] mod tests { use crate::StoredBlockBodyIndices; diff --git a/crates/storage/db-api/src/models/client_version.rs b/crates/storage/db-models/src/client_version.rs similarity index 100% rename from crates/storage/db-api/src/models/client_version.rs rename to crates/storage/db-models/src/client_version.rs diff --git a/crates/storage/db-models/src/lib.rs b/crates/storage/db-models/src/lib.rs index 55a210269c..b8595362af 100644 --- a/crates/storage/db-models/src/lib.rs +++ b/crates/storage/db-models/src/lib.rs @@ -6,4 +6,8 @@ pub use accounts::AccountBeforeTx; /// Blocks pub mod blocks; -pub use blocks::StoredBlockBodyIndices; +pub use blocks::{StoredBlockBodyIndices, StoredBlockWithdrawals}; + +/// Client Version +pub mod client_version; +pub use client_version::ClientVersion; diff --git a/crates/storage/db/Cargo.toml b/crates/storage/db/Cargo.toml index 97046e54bb..09ae5efd43 100644 --- a/crates/storage/db/Cargo.toml +++ b/crates/storage/db/Cargo.toml @@ -24,6 +24,9 @@ reth-stages-types.workspace = true reth-tracing.workspace = true reth-trie-common.workspace = true +# ethereum +alloy-primitives.workspace = true + # mdbx reth-libmdbx = { workspace = true, optional = true, features = [ "return-borrowed", diff --git a/crates/storage/db/benches/utils.rs b/crates/storage/db/benches/utils.rs index 7856627e16..97154d483e 100644 --- a/crates/storage/db/benches/utils.rs +++ b/crates/storage/db/benches/utils.rs @@ -1,3 +1,4 @@ +use alloy_primitives::Bytes; use reth_db::{test_utils::create_test_rw_db_with_path, DatabaseEnv}; use reth_db_api::{ database::Database, @@ -5,7 +6,6 @@ use reth_db_api::{ transaction::DbTxMut, }; use reth_fs_util as fs; -use reth_primitives::Bytes; use std::{path::Path, sync::Arc}; /// Path where the DB is initialized for benchmarks. diff --git a/crates/storage/db/src/implementation/mdbx/mod.rs b/crates/storage/db/src/implementation/mdbx/mod.rs index cebbf9407e..2c26375eba 100644 --- a/crates/storage/db/src/implementation/mdbx/mod.rs +++ b/crates/storage/db/src/implementation/mdbx/mod.rs @@ -13,7 +13,7 @@ use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW}, database::Database, database_metrics::{DatabaseMetadata, DatabaseMetadataValue, DatabaseMetrics}, - models::client_version::ClientVersion, + models::ClientVersion, transaction::{DbTx, DbTxMut}, }; use reth_libmdbx::{ @@ -476,13 +476,14 @@ mod tests { test_utils::*, AccountChangeSets, }; + use alloy_primitives::{Address, B256, U256}; use reth_db_api::{ cursor::{DbDupCursorRO, DbDupCursorRW, ReverseWalker, Walker}, models::{AccountBeforeTx, ShardedKey}, table::{Encode, Table}, }; use reth_libmdbx::Error; - use reth_primitives::{Account, Address, Header, StorageEntry, B256, U256}; + use reth_primitives::{Account, Header, StorageEntry}; use reth_primitives_traits::IntegerList; use reth_storage_errors::db::{DatabaseWriteError, DatabaseWriteOperation}; use std::str::FromStr; diff --git a/crates/storage/db/src/lib.rs b/crates/storage/db/src/lib.rs index c16f2b73c4..a9f073d7b5 100644 --- a/crates/storage/db/src/lib.rs +++ b/crates/storage/db/src/lib.rs @@ -36,6 +36,7 @@ pub use utils::is_database_empty; #[cfg(feature = "mdbx")] pub use mdbx::{create_db, init_db, open_db, open_db_read_only, DatabaseEnv, DatabaseEnvKind}; +pub use models::ClientVersion; pub use reth_db_api::*; /// Collection of database test utilities diff --git a/crates/storage/db/src/static_file/cursor.rs b/crates/storage/db/src/static_file/cursor.rs index 4a052c6abf..f14e023087 100644 --- a/crates/storage/db/src/static_file/cursor.rs +++ b/crates/storage/db/src/static_file/cursor.rs @@ -1,8 +1,9 @@ use super::mask::{ColumnSelectorOne, ColumnSelectorThree, ColumnSelectorTwo}; +use alloy_primitives::B256; use derive_more::{Deref, DerefMut}; use reth_db_api::table::Decompress; use reth_nippy_jar::{DataReader, NippyJar, NippyJarCursor}; -use reth_primitives::{static_file::SegmentHeader, B256}; +use reth_primitives::static_file::SegmentHeader; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::sync::Arc; @@ -39,7 +40,7 @@ impl<'a> StaticFileCursor<'a> { } let row = match key_or_num { - KeyOrNumber::Key(k) => self.row_by_key_with_cols(k, mask), + KeyOrNumber::Key(_) => unimplemented!(), KeyOrNumber::Number(n) => match self.jar().user_header().start() { Some(offset) => { if offset > n { diff --git a/crates/storage/db/src/static_file/masks.rs b/crates/storage/db/src/static_file/masks.rs index 40b5406e01..9701fa37ca 100644 --- a/crates/storage/db/src/static_file/masks.rs +++ b/crates/storage/db/src/static_file/masks.rs @@ -4,8 +4,9 @@ use crate::{ static_file::mask::{ColumnSelectorOne, ColumnSelectorTwo, HeaderMask}, HeaderTerminalDifficulties, RawValue, Receipts, Transactions, }; +use alloy_primitives::BlockHash; use reth_db_api::table::Table; -use reth_primitives::{BlockHash, Header}; +use reth_primitives::Header; use reth_primitives_traits::BlobSidecars; // HEADER MASKS diff --git a/crates/storage/db/src/tables/codecs/fuzz/mod.rs b/crates/storage/db/src/tables/codecs/fuzz/mod.rs index 1d038bf7e6..9cb4caed35 100644 --- a/crates/storage/db/src/tables/codecs/fuzz/mod.rs +++ b/crates/storage/db/src/tables/codecs/fuzz/mod.rs @@ -17,7 +17,7 @@ macro_rules! impl_fuzzer_with_input { use reth_db_api::table; #[allow(unused_imports)] - use reth_primitives::*; + #[allow(unused_imports)] use reth_primitives_traits::*; diff --git a/crates/storage/db/src/tables/mod.rs b/crates/storage/db/src/tables/mod.rs index 4f3fc7acaf..da537a1014 100644 --- a/crates/storage/db/src/tables/mod.rs +++ b/crates/storage/db/src/tables/mod.rs @@ -19,19 +19,20 @@ pub use raw::{RawDupSort, RawKey, RawTable, RawValue, TableRawRow}; #[cfg(feature = "mdbx")] pub(crate) mod utils; +use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256}; use reth_db_api::{ models::{ accounts::BlockNumberAddress, blocks::{HeaderHash, StoredBlockOmmers}, - client_version::ClientVersion, storage_sharded_key::StorageShardedKey, - AccountBeforeTx, CompactU256, ShardedKey, StoredBlockBodyIndices, StoredBlockWithdrawals, + AccountBeforeTx, ClientVersion, CompactU256, ShardedKey, StoredBlockBodyIndices, + StoredBlockWithdrawals, }, table::{Decode, DupSort, Encode, Table}, }; use reth_primitives::{ - parlia::Snapshot, Account, Address, BlockHash, BlockNumber, Bytecode, Header, Receipt, - Requests, StorageEntry, TransactionSignedNoHash, TxHash, TxNumber, B256, + parlia::Snapshot, Account, Bytecode, Header, Receipt, Requests, StorageEntry, + TransactionSignedNoHash, }; use reth_primitives_traits::{BlobSidecars, IntegerList}; use reth_prune_types::{PruneCheckpoint, PruneSegment}; diff --git a/crates/storage/errors/Cargo.toml b/crates/storage/errors/Cargo.toml index c93adfbe19..cbea350360 100644 --- a/crates/storage/errors/Cargo.toml +++ b/crates/storage/errors/Cargo.toml @@ -11,10 +11,14 @@ repository.workspace = true workspace = true [dependencies] -alloy-rlp.workspace = true +# reth reth-primitives.workspace = true reth-fs-util.workspace = true +# alloy +alloy-primitives.workspace = true +alloy-rlp.workspace = true + # misc derive_more.workspace = true diff --git a/crates/storage/errors/src/db.rs b/crates/storage/errors/src/db.rs index 079a7d56fd..f27dacdc30 100644 --- a/crates/storage/errors/src/db.rs +++ b/crates/storage/errors/src/db.rs @@ -1,14 +1,9 @@ -#[cfg(feature = "std")] -use std::{fmt, fmt::Display, str::FromStr, string::String}; - -#[cfg(not(feature = "std"))] use alloc::{ boxed::Box, format, string::{String, ToString}, vec::Vec, }; -#[cfg(not(feature = "std"))] use core::{ fmt, fmt::{Debug, Display}, @@ -111,7 +106,7 @@ impl fmt::Display for DatabaseWriteError { f, "write operation {:?} failed for key \"{}\" in table {}: {}", self.operation, - reth_primitives::hex::encode(&self.key), + alloy_primitives::hex::encode(&self.key), self.table_name, self.info ) diff --git a/crates/storage/errors/src/lib.rs b/crates/storage/errors/src/lib.rs index cf1a1a9762..6abb0cd9b4 100644 --- a/crates/storage/errors/src/lib.rs +++ b/crates/storage/errors/src/lib.rs @@ -9,7 +9,6 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![cfg_attr(not(feature = "std"), no_std)] -#[cfg(not(feature = "std"))] extern crate alloc; /// Database error diff --git a/crates/storage/errors/src/lockfile.rs b/crates/storage/errors/src/lockfile.rs index 17687cb69c..667197f571 100644 --- a/crates/storage/errors/src/lockfile.rs +++ b/crates/storage/errors/src/lockfile.rs @@ -1,7 +1,5 @@ -use reth_fs_util::FsPathError; - -#[cfg(not(feature = "std"))] use alloc::string::{String, ToString}; +use reth_fs_util::FsPathError; /// Storage lock error. #[derive(Debug, Clone, PartialEq, Eq, derive_more::Display)] diff --git a/crates/storage/errors/src/provider.rs b/crates/storage/errors/src/provider.rs index 16ff50f334..e478ca2ebe 100644 --- a/crates/storage/errors/src/provider.rs +++ b/crates/storage/errors/src/provider.rs @@ -1,14 +1,11 @@ use crate::{db::DatabaseError, lockfile::StorageLockError, writer::UnifiedStorageWriterError}; +use alloy_primitives::{Address, BlockHash, BlockNumber, TxNumber, B256, U256}; use derive_more::Display; -use reth_primitives::{ - Address, BlockHash, BlockHashOrNumber, BlockNumber, GotExpected, StaticFileSegment, - TxHashOrNumber, TxNumber, B256, U256, -}; +use reth_primitives::{BlockHashOrNumber, GotExpected, StaticFileSegment, TxHashOrNumber}; #[cfg(feature = "std")] use std::path::PathBuf; -#[cfg(not(feature = "std"))] use alloc::{boxed::Box, string::String}; /// Provider result type. diff --git a/crates/storage/errors/src/writer.rs b/crates/storage/errors/src/writer.rs index 71a1c47e2a..10d4ad96ed 100644 --- a/crates/storage/errors/src/writer.rs +++ b/crates/storage/errors/src/writer.rs @@ -3,7 +3,7 @@ use reth_primitives::StaticFileSegment; /// `UnifiedStorageWriter` related errors /// `StorageWriter` related errors -#[derive(Clone, Debug, derive_more::Display, PartialEq, Eq)] +#[derive(Clone, Debug, derive_more::Display, PartialEq, Eq, derive_more::Error)] pub enum UnifiedStorageWriterError { /// Database writer is missing #[display("Database writer is missing")] @@ -18,16 +18,6 @@ pub enum UnifiedStorageWriterError { Database(DatabaseError), } -#[cfg(feature = "std")] -impl std::error::Error for UnifiedStorageWriterError { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - Self::Database(source) => std::error::Error::source(source), - _ => Option::None, - } - } -} - impl From for UnifiedStorageWriterError { fn from(error: DatabaseError) -> Self { Self::Database(error) diff --git a/crates/storage/libmdbx-rs/src/cursor.rs b/crates/storage/libmdbx-rs/src/cursor.rs index 74a704c0d4..d007cc03e4 100644 --- a/crates/storage/libmdbx-rs/src/cursor.rs +++ b/crates/storage/libmdbx-rs/src/cursor.rs @@ -486,7 +486,11 @@ where K: TransactionKind, { fn drop(&mut self) { - self.txn.txn_execute(|_| unsafe { ffi::mdbx_cursor_close(self.cursor) }).unwrap() + // To be able to close a cursor of a timed out transaction, we need to renew it first. + // Hence the usage of `txn_execute_renew_on_timeout` here. + let _ = self + .txn + .txn_execute_renew_on_timeout(|_| unsafe { ffi::mdbx_cursor_close(self.cursor) }); } } diff --git a/crates/storage/libmdbx-rs/src/environment.rs b/crates/storage/libmdbx-rs/src/environment.rs index 1549d42e18..f243c7da18 100644 --- a/crates/storage/libmdbx-rs/src/environment.rs +++ b/crates/storage/libmdbx-rs/src/environment.rs @@ -41,6 +41,8 @@ impl Environment { flags: EnvironmentFlags::default(), max_readers: None, max_dbs: None, + sync_bytes: None, + sync_period: None, rp_augment_limit: None, loose_limit: None, dp_reserve_limit: None, @@ -566,6 +568,8 @@ pub struct EnvironmentBuilder { flags: EnvironmentFlags, max_readers: Option, max_dbs: Option, + sync_bytes: Option, + sync_period: Option, rp_augment_limit: Option, loose_limit: Option, dp_reserve_limit: Option, @@ -639,6 +643,8 @@ impl EnvironmentBuilder { } for (opt, v) in [ (ffi::MDBX_opt_max_db, self.max_dbs), + (ffi::MDBX_opt_sync_bytes, self.sync_bytes), + (ffi::MDBX_opt_sync_period, self.sync_period), (ffi::MDBX_opt_rp_augment_limit, self.rp_augment_limit), (ffi::MDBX_opt_loose_limit, self.loose_limit), (ffi::MDBX_opt_dp_reserve_limit, self.dp_reserve_limit), @@ -767,6 +773,23 @@ impl EnvironmentBuilder { self } + /// Sets the interprocess/shared threshold to force flush the data buffers to disk, if + /// [`SyncMode::SafeNoSync`](crate::flags::SyncMode::SafeNoSync) is used. + pub fn set_sync_bytes(&mut self, v: usize) -> &mut Self { + self.sync_bytes = Some(v as u64); + self + } + + /// Sets the interprocess/shared relative period since the last unsteady commit to force flush + /// the data buffers to disk, if [`SyncMode::SafeNoSync`](crate::flags::SyncMode::SafeNoSync) is + /// used. + pub fn set_sync_period(&mut self, v: Duration) -> &mut Self { + // For this option, mdbx uses units of 1/65536 of a second. + let as_mdbx_units = (v.as_secs_f64() * 65536f64) as u64; + self.sync_period = Some(as_mdbx_units); + self + } + pub fn set_rp_augment_limit(&mut self, v: u64) -> &mut Self { self.rp_augment_limit = Some(v); self diff --git a/crates/storage/libmdbx-rs/src/transaction.rs b/crates/storage/libmdbx-rs/src/transaction.rs index 37af501c18..88236ebe99 100644 --- a/crates/storage/libmdbx-rs/src/transaction.rs +++ b/crates/storage/libmdbx-rs/src/transaction.rs @@ -112,6 +112,18 @@ where self.inner.txn_execute(f) } + /// Executes the given closure once the lock on the transaction is acquired. If the transaction + /// is timed out, it will be renewed first. + /// + /// Returns the result of the closure or an error if the transaction renewal fails. + #[inline] + pub(crate) fn txn_execute_renew_on_timeout(&self, f: F) -> Result + where + F: FnOnce(*mut ffi::MDBX_txn) -> T, + { + self.inner.txn_execute_renew_on_timeout(f) + } + /// Returns a copy of the raw pointer to the underlying MDBX transaction. #[doc(hidden)] #[cfg(test)] @@ -321,6 +333,14 @@ where { self.txn.txn_execute_fail_on_timeout(f) } + + #[inline] + fn txn_execute_renew_on_timeout(&self, f: F) -> Result + where + F: FnOnce(*mut ffi::MDBX_txn) -> T, + { + self.txn.txn_execute_renew_on_timeout(f) + } } impl Drop for TransactionInner @@ -596,7 +616,7 @@ impl TransactionPtr { /// /// Returns the result of the closure or an error if the transaction renewal fails. #[inline] - fn txn_execute_renew_on_timeout(&self, f: F) -> Result + pub(crate) fn txn_execute_renew_on_timeout(&self, f: F) -> Result where F: FnOnce(*mut ffi::MDBX_txn) -> T, { diff --git a/crates/storage/libmdbx-rs/tests/cursor.rs b/crates/storage/libmdbx-rs/tests/cursor.rs index 306b87c0de..0e02eafd9a 100644 --- a/crates/storage/libmdbx-rs/tests/cursor.rs +++ b/crates/storage/libmdbx-rs/tests/cursor.rs @@ -1,3 +1,4 @@ +#![allow(missing_docs)] use reth_libmdbx::*; use std::borrow::Cow; use tempfile::tempdir; diff --git a/crates/storage/libmdbx-rs/tests/environment.rs b/crates/storage/libmdbx-rs/tests/environment.rs index 85cf9a62a5..99453ef113 100644 --- a/crates/storage/libmdbx-rs/tests/environment.rs +++ b/crates/storage/libmdbx-rs/tests/environment.rs @@ -1,3 +1,4 @@ +#![allow(missing_docs)] use byteorder::{ByteOrder, LittleEndian}; use reth_libmdbx::*; use tempfile::tempdir; diff --git a/crates/storage/libmdbx-rs/tests/transaction.rs b/crates/storage/libmdbx-rs/tests/transaction.rs index 8cdd2531c5..c7e8e3fcd3 100644 --- a/crates/storage/libmdbx-rs/tests/transaction.rs +++ b/crates/storage/libmdbx-rs/tests/transaction.rs @@ -1,3 +1,4 @@ +#![allow(missing_docs)] use reth_libmdbx::*; use std::{ borrow::Cow, diff --git a/crates/storage/nippy-jar/Cargo.toml b/crates/storage/nippy-jar/Cargo.toml index 0bc3e40dc2..7c391483a7 100644 --- a/crates/storage/nippy-jar/Cargo.toml +++ b/crates/storage/nippy-jar/Cargo.toml @@ -18,20 +18,10 @@ name = "reth_nippy_jar" # reth reth-fs-util.workspace = true -# filter -ph = "0.8.0" -cuckoofilter = { version = "0.5.0", features = [ - "serde_support", - "serde_bytes", -] } - # compression zstd = { workspace = true, features = ["experimental", "zdict_builder"] } lz4_flex = { version = "0.11", default-features = false } -# offsets -sucds = "~0.8" - memmap2 = "0.9.4" bincode = "1.3" serde = { workspace = true, features = ["derive"] } diff --git a/crates/storage/nippy-jar/src/consistency.rs b/crates/storage/nippy-jar/src/consistency.rs new file mode 100644 index 0000000000..1093fb5546 --- /dev/null +++ b/crates/storage/nippy-jar/src/consistency.rs @@ -0,0 +1,197 @@ +use crate::{writer::OFFSET_SIZE_BYTES, NippyJar, NippyJarError, NippyJarHeader}; +use std::{ + cmp::Ordering, + fs::{File, OpenOptions}, + io::{BufWriter, Seek, SeekFrom}, + path::Path, +}; + +/// Performs consistency checks or heals on the [`NippyJar`] file +/// * Is the offsets file size expected? +/// * Is the data file size expected? +/// +/// This is based on the assumption that [`NippyJar`] configuration is **always** the last one +/// to be updated when something is written, as by the `NippyJarWriter::commit()` function shows. +/// +/// **For checks (read-only) use `check_consistency` method.** +/// +/// **For heals (read-write) use `ensure_consistency` method.** +#[derive(Debug)] +pub struct NippyJarChecker { + /// Associated [`NippyJar`], containing all necessary configurations for data + /// handling. + pub(crate) jar: NippyJar, + /// File handle to where the data is stored. + pub(crate) data_file: Option>, + /// File handle to where the offsets are stored. + pub(crate) offsets_file: Option>, +} + +impl NippyJarChecker { + pub const fn new(jar: NippyJar) -> Self { + Self { jar, data_file: None, offsets_file: None } + } + + /// It will throw an error if the [`NippyJar`] is in a inconsistent state. + pub fn check_consistency(&mut self) -> Result<(), NippyJarError> { + self.handle_consistency(ConsistencyFailStrategy::ThrowError) + } + + /// It will attempt to heal if the [`NippyJar`] is in a inconsistent state. + /// + /// **ATTENTION**: disk commit should be handled externally by consuming `Self` + pub fn ensure_consistency(&mut self) -> Result<(), NippyJarError> { + self.handle_consistency(ConsistencyFailStrategy::Heal) + } + + fn handle_consistency(&mut self, mode: ConsistencyFailStrategy) -> Result<(), NippyJarError> { + self.load_files(mode)?; + let mut reader = self.jar.open_data_reader()?; + + // When an offset size is smaller than the initial (8), we are dealing with immutable + // data. + if reader.offset_size() != OFFSET_SIZE_BYTES { + return Err(NippyJarError::FrozenJar) + } + + let expected_offsets_file_size: u64 = (1 + // first byte is the size of one offset + OFFSET_SIZE_BYTES as usize* self.jar.rows * self.jar.columns + // `offset size * num rows * num columns` + OFFSET_SIZE_BYTES as usize) as u64; // expected size of the data file + let actual_offsets_file_size = self.offsets_file().get_ref().metadata()?.len(); + + if mode.should_err() && + expected_offsets_file_size.cmp(&actual_offsets_file_size) != Ordering::Equal + { + return Err(NippyJarError::InconsistentState) + } + + // Offsets configuration wasn't properly committed + match expected_offsets_file_size.cmp(&actual_offsets_file_size) { + Ordering::Less => { + // Happened during an appending job + // TODO: ideally we could truncate until the last offset of the last column of the + // last row inserted + + // Windows has locked the file with the mmap handle, so we need to drop it + drop(reader); + + self.offsets_file().get_mut().set_len(expected_offsets_file_size)?; + reader = self.jar.open_data_reader()?; + } + Ordering::Greater => { + // Happened during a pruning job + // `num rows = (file size - 1 - size of one offset) / num columns` + self.jar.rows = ((actual_offsets_file_size. + saturating_sub(1). // first byte is the size of one offset + saturating_sub(OFFSET_SIZE_BYTES as u64) / // expected size of the data file + (self.jar.columns as u64)) / + OFFSET_SIZE_BYTES as u64) as usize; + + // Freeze row count changed + self.jar.freeze_config()?; + } + Ordering::Equal => {} + } + + // last offset should match the data_file_len + let last_offset = reader.reverse_offset(0)?; + let data_file_len = self.data_file().get_ref().metadata()?.len(); + + if mode.should_err() && last_offset.cmp(&data_file_len) != Ordering::Equal { + return Err(NippyJarError::InconsistentState) + } + + // Offset list wasn't properly committed + match last_offset.cmp(&data_file_len) { + Ordering::Less => { + // Windows has locked the file with the mmap handle, so we need to drop it + drop(reader); + + // Happened during an appending job, so we need to truncate the data, since there's + // no way to recover it. + self.data_file().get_mut().set_len(last_offset)?; + } + Ordering::Greater => { + // Happened during a pruning job, so we need to reverse iterate offsets until we + // find the matching one. + for index in 0..reader.offsets_count()? { + let offset = reader.reverse_offset(index + 1)?; + // It would only be equal if the previous row was fully pruned. + if offset <= data_file_len { + let new_len = self + .offsets_file() + .get_ref() + .metadata()? + .len() + .saturating_sub(OFFSET_SIZE_BYTES as u64 * (index as u64 + 1)); + + // Windows has locked the file with the mmap handle, so we need to drop it + drop(reader); + + self.offsets_file().get_mut().set_len(new_len)?; + + // Since we decrease the offset list, we need to check the consistency of + // `self.jar.rows` again + self.handle_consistency(ConsistencyFailStrategy::Heal)?; + break + } + } + } + Ordering::Equal => {} + } + + self.offsets_file().seek(SeekFrom::End(0))?; + self.data_file().seek(SeekFrom::End(0))?; + + Ok(()) + } + + /// Loads data and offsets files. + fn load_files(&mut self, mode: ConsistencyFailStrategy) -> Result<(), NippyJarError> { + let load_file = |path: &Path| -> Result, NippyJarError> { + let path = path + .exists() + .then_some(path) + .ok_or_else(|| NippyJarError::MissingFile(path.to_path_buf()))?; + Ok(BufWriter::new(OpenOptions::new().read(true).write(mode.should_heal()).open(path)?)) + }; + self.data_file = Some(load_file(self.jar.data_path())?); + self.offsets_file = Some(load_file(&self.jar.offsets_path())?); + Ok(()) + } + + /// Returns a mutable reference to offsets file. + /// + /// **Panics** if it does not exist. + fn offsets_file(&mut self) -> &mut BufWriter { + self.offsets_file.as_mut().expect("should exist") + } + + /// Returns a mutable reference to data file. + /// + /// **Panics** if it does not exist. + fn data_file(&mut self) -> &mut BufWriter { + self.data_file.as_mut().expect("should exist") + } +} + +/// Strategy on encountering an inconsistent state on [`NippyJarChecker`]. +#[derive(Debug, Copy, Clone)] +enum ConsistencyFailStrategy { + /// Writer should heal. + Heal, + /// Writer should throw an error. + ThrowError, +} + +impl ConsistencyFailStrategy { + /// Whether writer should heal. + const fn should_heal(&self) -> bool { + matches!(self, Self::Heal) + } + + /// Whether writer should throw an error. + const fn should_err(&self) -> bool { + matches!(self, Self::ThrowError) + } +} diff --git a/crates/storage/nippy-jar/src/cursor.rs b/crates/storage/nippy-jar/src/cursor.rs index d42b0d364b..7af55fd436 100644 --- a/crates/storage/nippy-jar/src/cursor.rs +++ b/crates/storage/nippy-jar/src/cursor.rs @@ -1,10 +1,8 @@ use crate::{ compression::{Compression, Compressors, Zstd}, - DataReader, InclusionFilter, NippyJar, NippyJarError, NippyJarHeader, PerfectHashingFunction, - RefRow, + DataReader, NippyJar, NippyJarError, NippyJarHeader, RefRow, }; use std::{ops::Range, sync::Arc}; -use sucds::int_vectors::Access; use zstd::bulk::Decompressor; /// Simple cursor implementation to retrieve data from [`NippyJar`]. @@ -67,35 +65,6 @@ impl<'a, H: NippyJarHeader> NippyJarCursor<'a, H> { self.row = 0; } - /// Returns a row, searching it by a key. - /// - /// **May return false positives.** - /// - /// Example usage would be querying a transactions file with a transaction hash which is **NOT** - /// stored in file. - pub fn row_by_key(&mut self, key: &[u8]) -> Result>, NippyJarError> { - if let (Some(filter), Some(phf)) = (&self.jar.filter, &self.jar.phf) { - // TODO: is it worth to parallelize both? - - // May have false positives - if filter.contains(key)? { - // May have false positives - if let Some(row_index) = phf.get_index(key)? { - self.row = self - .jar - .offsets_index - .access(row_index as usize) - .expect("built from same set") as u64; - return self.next_row() - } - } - } else { - return Err(NippyJarError::UnsupportedFilterQuery) - } - - Ok(None) - } - /// Returns a row by its number. pub fn row_by_number(&mut self, row: usize) -> Result>, NippyJarError> { self.row = row as u64; @@ -130,40 +99,6 @@ impl<'a, H: NippyJarHeader> NippyJarCursor<'a, H> { )) } - /// Returns a row, searching it by a key using a - /// `mask` to only read certain columns from the row. - /// - /// **May return false positives.** - /// - /// Example usage would be querying a transactions file with a transaction hash which is **NOT** - /// stored in file. - pub fn row_by_key_with_cols( - &mut self, - key: &[u8], - mask: usize, - ) -> Result>, NippyJarError> { - if let (Some(filter), Some(phf)) = (&self.jar.filter, &self.jar.phf) { - // TODO: is it worth to parallelize both? - - // May have false positives - if filter.contains(key)? { - // May have false positives - if let Some(row_index) = phf.get_index(key)? { - self.row = self - .jar - .offsets_index - .access(row_index as usize) - .expect("built from same set") as u64; - return self.next_row_with_cols(mask) - } - } - } else { - return Err(NippyJarError::UnsupportedFilterQuery) - } - - Ok(None) - } - /// Returns a row by its number by using a `mask` to only read certain columns from the row. pub fn row_by_number_with_cols( &mut self, diff --git a/crates/storage/nippy-jar/src/error.rs b/crates/storage/nippy-jar/src/error.rs index 225d4fba30..ffeb5f3939 100644 --- a/crates/storage/nippy-jar/src/error.rs +++ b/crates/storage/nippy-jar/src/error.rs @@ -1,3 +1,4 @@ +use std::path::PathBuf; use thiserror::Error; /// Errors associated with [`crate::NippyJar`]. @@ -23,20 +24,6 @@ pub enum NippyJarError { ColumnLenMismatch(usize, usize), #[error("unexpected missing value: row:col {0}:{1}")] UnexpectedMissingValue(u64, u64), - #[error(transparent)] - EthFilterError(#[from] cuckoofilter::CuckooError), - #[error("nippy jar initialized without filter")] - FilterMissing, - #[error("filter has reached max capacity")] - FilterMaxCapacity, - #[error("cuckoo was not properly initialized after loaded")] - FilterCuckooNotLoaded, - #[error("perfect hashing function doesn't have any keys added")] - PHFMissingKeys, - #[error("nippy jar initialized without perfect hashing function")] - PHFMissing, - #[error("nippy jar was built without an index")] - UnsupportedFilterQuery, #[error("the size of an offset must be at most 8 bytes, got {offset_size}")] OffsetSizeTooBig { /// The read offset size in number of bytes. @@ -64,4 +51,6 @@ pub enum NippyJarError { FrozenJar, #[error("File is in an inconsistent state.")] InconsistentState, + #[error("Missing file: {0}.")] + MissingFile(PathBuf), } diff --git a/crates/storage/nippy-jar/src/filter/cuckoo.rs b/crates/storage/nippy-jar/src/filter/cuckoo.rs deleted file mode 100644 index b4e05fbb74..0000000000 --- a/crates/storage/nippy-jar/src/filter/cuckoo.rs +++ /dev/null @@ -1,88 +0,0 @@ -use super::InclusionFilter; -use crate::NippyJarError; -use cuckoofilter::{CuckooFilter, ExportedCuckooFilter}; -use serde::{Deserialize, Deserializer, Serialize, Serializer}; -use std::collections::hash_map::DefaultHasher; - -/// [CuckooFilter](https://www.cs.cmu.edu/~dga/papers/cuckoo-conext2014.pdf). It builds and provides an approximated set-membership filter to answer queries such as "Does this element belong to this set?". Has a theoretical 3% false positive rate. -pub struct Cuckoo { - /// Remaining number of elements that can be added. - /// - /// This is necessary because the inner implementation will fail on adding an element past capacity, **but it will still add it and remove other**: [source](https://github.com/axiomhq/rust-cuckoofilter/tree/624da891bed1dd5d002c8fa92ce0dcd301975561#notes--todos) - remaining: usize, - - /// `CuckooFilter`. - filter: CuckooFilter, // TODO does it need an actual hasher? -} - -impl Cuckoo { - pub fn new(max_capacity: usize) -> Self { - // CuckooFilter might return `NotEnoughSpace` even if they are remaining elements, if it's - // close to capacity. Therefore, we increase it. - let max_capacity = max_capacity + 100 + max_capacity / 3; - - Self { remaining: max_capacity, filter: CuckooFilter::with_capacity(max_capacity) } - } -} - -impl InclusionFilter for Cuckoo { - fn add(&mut self, element: &[u8]) -> Result<(), NippyJarError> { - if self.remaining == 0 { - return Err(NippyJarError::FilterMaxCapacity) - } - - self.remaining -= 1; - - Ok(self.filter.add(element)?) - } - - fn contains(&self, element: &[u8]) -> Result { - Ok(self.filter.contains(element)) - } - - fn size(&self) -> usize { - self.filter.memory_usage() - } -} - -impl std::fmt::Debug for Cuckoo { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("Cuckoo") - .field("remaining", &self.remaining) - .field("filter_size", &self.filter.memory_usage()) - .finish_non_exhaustive() - } -} - -#[cfg(test)] -impl PartialEq for Cuckoo { - fn eq(&self, _other: &Self) -> bool { - self.remaining == _other.remaining && { - let f1 = self.filter.export(); - let f2 = _other.filter.export(); - f1.length == f2.length && f1.values == f2.values - } - } -} - -impl<'de> Deserialize<'de> for Cuckoo { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let (remaining, exported): (usize, ExportedCuckooFilter) = - Deserialize::deserialize(deserializer)?; - - Ok(Self { remaining, filter: exported.into() }) - } -} - -impl Serialize for Cuckoo { - /// Potentially expensive, but should be used only when creating the file. - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - (self.remaining, self.filter.export()).serialize(serializer) - } -} diff --git a/crates/storage/nippy-jar/src/filter/mod.rs b/crates/storage/nippy-jar/src/filter/mod.rs deleted file mode 100644 index 3ddae0148c..0000000000 --- a/crates/storage/nippy-jar/src/filter/mod.rs +++ /dev/null @@ -1,48 +0,0 @@ -use crate::NippyJarError; -use serde::{Deserialize, Serialize}; - -mod cuckoo; -pub use cuckoo::Cuckoo; - -/// Membership filter set trait. -pub trait InclusionFilter { - /// Add element to the inclusion list. - fn add(&mut self, element: &[u8]) -> Result<(), NippyJarError>; - - /// Checks if the element belongs to the inclusion list. **There might be false positives.** - fn contains(&self, element: &[u8]) -> Result; - - fn size(&self) -> usize; -} - -/// Enum with different [`InclusionFilter`] types. -#[derive(Debug, Serialize, Deserialize)] -#[cfg_attr(test, derive(PartialEq))] -pub enum InclusionFilters { - Cuckoo(Cuckoo), - // Avoids irrefutable let errors. Remove this after adding another one. - Unused, -} - -impl InclusionFilter for InclusionFilters { - fn add(&mut self, element: &[u8]) -> Result<(), NippyJarError> { - match self { - Self::Cuckoo(c) => c.add(element), - Self::Unused => todo!(), - } - } - - fn contains(&self, element: &[u8]) -> Result { - match self { - Self::Cuckoo(c) => c.contains(element), - Self::Unused => todo!(), - } - } - - fn size(&self) -> usize { - match self { - Self::Cuckoo(c) => c.size(), - Self::Unused => 0, - } - } -} diff --git a/crates/storage/nippy-jar/src/lib.rs b/crates/storage/nippy-jar/src/lib.rs index 056f456eb2..a720192d6a 100644 --- a/crates/storage/nippy-jar/src/lib.rs +++ b/crates/storage/nippy-jar/src/lib.rs @@ -21,20 +21,27 @@ use std::{ ops::Range, path::{Path, PathBuf}, }; -use sucds::{int_vectors::PrefixSummedEliasFano, Serializable}; -use tracing::*; -pub mod filter; -use filter::{Cuckoo, InclusionFilter, InclusionFilters}; +// Windows specific extension for std::fs +#[cfg(windows)] +use std::os::windows::prelude::OpenOptionsExt; + +use tracing::*; pub mod compression; #[cfg(test)] use compression::Compression; use compression::Compressors; -pub mod phf; -pub use phf::PHFKey; -use phf::{Fmph, Functions, GoFmph, PerfectHashingFunction}; +/// empty enum for backwards compatibility +#[derive(Debug, Serialize, Deserialize)] +#[cfg_attr(test, derive(PartialEq, Eq))] +pub enum Functions {} + +/// empty enum for backwards compatibility +#[derive(Debug, Serialize, Deserialize)] +#[cfg_attr(test, derive(PartialEq, Eq))] +pub enum InclusionFilters {} mod error; pub use error::NippyJarError; @@ -43,13 +50,16 @@ mod cursor; pub use cursor::NippyJarCursor; mod writer; -pub use writer::{ConsistencyFailStrategy, NippyJarWriter}; +pub use writer::NippyJarWriter; + +mod consistency; +pub use consistency::NippyJarChecker; const NIPPY_JAR_VERSION: usize = 1; const INDEX_FILE_EXTENSION: &str = "idx"; const OFFSETS_FILE_EXTENSION: &str = "off"; -const CONFIG_FILE_EXTENSION: &str = "conf"; +pub const CONFIG_FILE_EXTENSION: &str = "conf"; /// A [`RefRow`] is a list of column value slices pointing to either an internal buffer or a /// memory-mapped file. @@ -74,26 +84,6 @@ impl NippyJarHeader for T where /// /// Data is organized into a columnar format, enabling column-based compression. Data retrieval /// entails consulting an offset list and fetching the data from file via `mmap`. -/// -/// PHF & Filters: -/// For data membership verification, the `filter` field can be configured with algorithms like -/// Bloom or Cuckoo filters. While these filters enable rapid membership checks, it's important to -/// note that **they may yield false positives but not false negatives**. Therefore, they serve as -/// preliminary checks (eg. in `by_hash` queries) and should be followed by data verification on -/// retrieval. -/// -/// The `phf` (Perfect Hashing Function) and `offsets_index` fields facilitate the data retrieval -/// process in for example `by_hash` queries. Specifically, the PHF converts a query, such as a -/// block hash, into a unique integer. This integer is then used as an index in `offsets_index`, -/// which maps to the actual data location in the `offsets` list. Similar to the `filter`, the PHF -/// may also produce false positives but not false negatives, necessitating subsequent data -/// verification. -/// -/// Note: that the key (eg. `BlockHash`) passed to a filter and phf does not need to actually be -/// stored. -/// -/// Ultimately, the `freeze` function yields two files: a data file containing both the data and its -/// configuration, and an index file that houses the offsets and `offsets_index`. #[derive(Serialize, Deserialize)] #[cfg_attr(test, derive(PartialEq))] pub struct NippyJar { @@ -109,14 +99,11 @@ pub struct NippyJar { /// Optional compression algorithm applied to the data. compressor: Option, #[serde(skip)] - /// Optional filter function for data membership checks. + /// Optional field for backwards compatibility filter: Option, #[serde(skip)] - /// Optional Perfect Hashing Function (PHF) for unique offset mapping. + /// Optional field for backwards compatibility phf: Option, - /// Index mapping PHF output to value offsets in `offsets`. - #[serde(skip)] - offsets_index: PrefixSummedEliasFano, /// Maximum uncompressed row size of the set. This will enable decompression without any /// resizing of the output buffer. max_row_size: usize, @@ -135,8 +122,6 @@ impl std::fmt::Debug for NippyJar { .field("compressor", &self.compressor) .field("filter", &self.filter) .field("phf", &self.phf) - .field("offsets_index (len)", &self.offsets_index.len()) - .field("offsets_index (size in bytes)", &self.offsets_index.size_in_bytes()) .field("path", &self.path) .field("max_row_size", &self.max_row_size) .finish_non_exhaustive() @@ -153,11 +138,6 @@ impl NippyJar<()> { pub fn load_without_header(path: &Path) -> Result { Self::load(path) } - - /// Whether this [`NippyJar`] uses a [`InclusionFilters`] and [`Functions`]. - pub const fn uses_filters(&self) -> bool { - self.filter.is_some() && self.phf.is_some() - } } impl NippyJar { @@ -172,7 +152,6 @@ impl NippyJar { compressor: None, filter: None, phf: None, - offsets_index: PrefixSummedEliasFano::default(), path: path.to_path_buf(), } } @@ -190,24 +169,6 @@ impl NippyJar { self } - /// Adds [`filter::Cuckoo`] filter. - pub fn with_cuckoo_filter(mut self, max_capacity: usize) -> Self { - self.filter = Some(InclusionFilters::Cuckoo(Cuckoo::new(max_capacity))); - self - } - - /// Adds [`phf::Fmph`] perfect hashing function. - pub fn with_fmph(mut self) -> Self { - self.phf = Some(Functions::Fmph(Fmph::new())); - self - } - - /// Adds [`phf::GoFmph`] perfect hashing function. - pub fn with_gofmph(mut self) -> Self { - self.phf = Some(Functions::GoFmph(GoFmph::new())); - self - } - /// Gets a reference to the user header. pub const fn user_header(&self) -> &H { &self.user_header @@ -223,16 +184,6 @@ impl NippyJar { self.rows } - /// Returns the size of inclusion filter - pub fn filter_size(&self) -> usize { - self.size() - } - - /// Returns the size of offsets index - pub fn offsets_index_size(&self) -> usize { - self.offsets_index.size_in_bytes() - } - /// Gets a reference to the compressor. pub const fn compressor(&self) -> Option<&Compressors> { self.compressor.as_ref() @@ -243,8 +194,7 @@ impl NippyJar { self.compressor.as_mut() } - /// Loads the file configuration and returns [`Self`] without deserializing filters related - /// structures or the offset list. + /// Loads the file configuration and returns [`Self`]. /// /// **The user must ensure the header type matches the one used during the jar's creation.** pub fn load(path: &Path) -> Result { @@ -258,16 +208,6 @@ impl NippyJar { Ok(obj) } - /// Loads filters into memory. - pub fn load_filters(&mut self) -> Result<(), NippyJarError> { - // Read the offsets lists located at the index file. - let mut offsets_file = File::open(self.index_path())?; - self.offsets_index = PrefixSummedEliasFano::deserialize_from(&mut offsets_file)?; - self.phf = bincode::deserialize_from(&mut offsets_file)?; - self.filter = bincode::deserialize_from(&mut offsets_file)?; - Ok(()) - } - /// Returns the path for the data file pub fn data_path(&self) -> &Path { self.path.as_ref() @@ -326,36 +266,22 @@ impl NippyJar { // fsync() dir if let Some(parent) = tmp_path.parent() { + //custom_flags() is only available on Windows + #[cfg(windows)] + OpenOptions::new() + .read(true) + .write(true) + .custom_flags(0x02000000) // FILE_FLAG_BACKUP_SEMANTICS + .open(parent)? + .sync_all()?; + + #[cfg(not(windows))] OpenOptions::new().read(true).open(parent)?.sync_all()?; } Ok(()) } } -impl InclusionFilter for NippyJar { - fn add(&mut self, element: &[u8]) -> Result<(), NippyJarError> { - self.filter.as_mut().ok_or(NippyJarError::FilterMissing)?.add(element) - } - - fn contains(&self, element: &[u8]) -> Result { - self.filter.as_ref().ok_or(NippyJarError::FilterMissing)?.contains(element) - } - - fn size(&self) -> usize { - self.filter.as_ref().map(|f| f.size()).unwrap_or(0) - } -} - -impl PerfectHashingFunction for NippyJar { - fn set_keys(&mut self, keys: &[T]) -> Result<(), NippyJarError> { - self.phf.as_mut().ok_or(NippyJarError::PHFMissing)?.set_keys(keys) - } - - fn get_index(&self, key: &[u8]) -> Result, NippyJarError> { - self.phf.as_ref().ok_or(NippyJarError::PHFMissing)?.get_index(key) - } -} - #[cfg(test)] impl NippyJar { /// If required, prepares any compression algorithm to an early pass of the data. @@ -371,55 +297,6 @@ impl NippyJar { Ok(()) } - /// Prepares beforehand the offsets index for querying rows based on `values` (eg. transaction - /// hash). Expects `values` to be sorted in the same way as the data that is going to be - /// later on inserted. - /// - /// Currently collecting all items before acting on them. - pub fn prepare_index( - &mut self, - values: impl IntoIterator>, - row_count: usize, - ) -> Result<(), NippyJarError> { - debug!(target: "nippy-jar", ?row_count, "Preparing index."); - - let values = values.into_iter().collect::, _>>()?; - - debug_assert!( - row_count == values.len(), - "Row count ({row_count}) differs from value list count ({}).", - values.len() - ); - - let mut offsets_index = vec![0; row_count]; - - // Builds perfect hashing function from the values - if let Some(phf) = self.phf.as_mut() { - debug!(target: "nippy-jar", ?row_count, values_count = ?values.len(), "Setting keys for perfect hashing function."); - phf.set_keys(&values)?; - } - - if self.filter.is_some() || self.phf.is_some() { - debug!(target: "nippy-jar", ?row_count, "Creating filter and offsets_index."); - - for (row_num, v) in values.into_iter().enumerate() { - if let Some(filter) = self.filter.as_mut() { - filter.add(v.as_ref())?; - } - - if let Some(phf) = self.phf.as_mut() { - // Points to the first column value offset of the row. - let index = phf.get_index(v.as_ref())?.expect("initialized") as usize; - let _ = std::mem::replace(&mut offsets_index[index], row_num as u64); - } - } - } - - debug!(target: "nippy-jar", ?row_count, "Encoding offsets index list."); - self.offsets_index = PrefixSummedEliasFano::from_slice(&offsets_index)?; - Ok(()) - } - /// Writes all data and configuration to a file and the offset index to another. pub fn freeze( self, @@ -430,11 +307,8 @@ impl NippyJar { debug!(target: "nippy-jar", path=?self.data_path(), "Opening data file."); - // Write phf, filter and offset index to file - self.freeze_filters()?; - // Creates the writer, data and offsets file - let mut writer = NippyJarWriter::new(self, ConsistencyFailStrategy::Heal)?; + let mut writer = NippyJarWriter::new(self)?; // Append rows to file while holding offsets in memory writer.append_rows(columns, total_rows)?; @@ -447,18 +321,6 @@ impl NippyJar { Ok(writer.into_jar()) } - /// Freezes [`PerfectHashingFunction`], [`InclusionFilter`] and the offset index to file. - fn freeze_filters(&self) -> Result<(), NippyJarError> { - debug!(target: "nippy-jar", path=?self.index_path(), "Writing offsets and offsets index to file."); - - let mut file = File::create(self.index_path())?; - self.offsets_index.serialize_into(&mut file)?; - bincode::serialize_into(&mut file, &self.phf)?; - bincode::serialize_into(&mut file, &self.filter)?; - - Ok(()) - } - /// Safety checks before creating and returning a [`File`] handle to write data to. fn check_before_freeze( &self, @@ -474,11 +336,6 @@ impl NippyJar { } } - // Check `prepare_index` was called. - if let Some(phf) = &self.phf { - let _ = phf.get_index(&[])?; - } - Ok(()) } } @@ -588,7 +445,7 @@ mod tests { use super::*; use compression::Compression; use rand::{rngs::SmallRng, seq::SliceRandom, RngCore, SeedableRng}; - use std::{collections::HashSet, fs::OpenOptions}; + use std::{fs::OpenOptions, io::Read}; type ColumnResults = Vec>; type ColumnValues = Vec>; @@ -617,102 +474,30 @@ mod tests { } #[test] - fn test_phf() { - let (col1, col2) = test_data(None); - let num_columns = 2; - let num_rows = col1.len() as u64; - let file_path = tempfile::NamedTempFile::new().unwrap(); - - let create_nippy = || -> NippyJar<()> { - let mut nippy = NippyJar::new_without_header(num_columns, file_path.path()); - assert!(matches!( - NippyJar::set_keys(&mut nippy, &col1), - Err(NippyJarError::PHFMissing) - )); - nippy - }; - - let check_phf = |mut nippy: NippyJar<_>| { - assert!(matches!( - NippyJar::get_index(&nippy, &col1[0]), - Err(NippyJarError::PHFMissingKeys) - )); - assert!(NippyJar::set_keys(&mut nippy, &col1).is_ok()); - - let collect_indexes = |nippy: &NippyJar<_>| -> Vec { - col1.iter() - .map(|value| NippyJar::get_index(nippy, value.as_slice()).unwrap().unwrap()) - .collect() - }; - - // Ensure all indexes are unique - let indexes = collect_indexes(&nippy); - assert_eq!(indexes.iter().collect::>().len(), indexes.len()); - - // Ensure reproducibility - assert!(NippyJar::set_keys(&mut nippy, &col1).is_ok()); - assert_eq!(indexes, collect_indexes(&nippy)); - - // Ensure that loaded phf provides the same function outputs - nippy.prepare_index(clone_with_result(&col1), col1.len()).unwrap(); - nippy - .freeze(vec![clone_with_result(&col1), clone_with_result(&col2)], num_rows) - .unwrap(); - let mut loaded_nippy = NippyJar::load_without_header(file_path.path()).unwrap(); - loaded_nippy.load_filters().unwrap(); - assert_eq!(indexes, collect_indexes(&loaded_nippy)); - }; - - // fmph bytes size for 100 values of 32 bytes: 54 - check_phf(create_nippy().with_fmph()); - - // fmph bytes size for 100 values of 32 bytes: 46 - check_phf(create_nippy().with_gofmph()); - } - - #[test] - fn test_filter() { - let (col1, col2) = test_data(Some(1)); - let num_columns = 2; - let num_rows = col1.len() as u64; - let file_path = tempfile::NamedTempFile::new().unwrap(); + fn test_config_serialization() { + let file = tempfile::NamedTempFile::new().unwrap(); + let jar = NippyJar::new_without_header(23, file.path()).with_lz4(); + jar.freeze_config().unwrap(); - let mut nippy = NippyJar::new_without_header(num_columns, file_path.path()); + let mut config_file = OpenOptions::new().read(true).open(jar.config_path()).unwrap(); + let config_file_len = config_file.metadata().unwrap().len(); + assert_eq!(config_file_len, 37); - assert!(matches!( - InclusionFilter::add(&mut nippy, &col1[0]), - Err(NippyJarError::FilterMissing) - )); - - nippy = nippy.with_cuckoo_filter(4); - - // Add col1[0] - assert!(!InclusionFilter::contains(&nippy, &col1[0]).unwrap()); - assert!(InclusionFilter::add(&mut nippy, &col1[0]).is_ok()); - assert!(InclusionFilter::contains(&nippy, &col1[0]).unwrap()); - - // Add col1[1] - assert!(!InclusionFilter::contains(&nippy, &col1[1]).unwrap()); - assert!(InclusionFilter::add(&mut nippy, &col1[1]).is_ok()); - assert!(InclusionFilter::contains(&nippy, &col1[1]).unwrap()); - - // // Add more columns until max_capacity - assert!(InclusionFilter::add(&mut nippy, &col1[2]).is_ok()); - assert!(InclusionFilter::add(&mut nippy, &col1[3]).is_ok()); - - let nippy = nippy - .freeze(vec![clone_with_result(&col1), clone_with_result(&col2)], num_rows) - .unwrap(); - let mut loaded_nippy = NippyJar::load_without_header(file_path.path()).unwrap(); - loaded_nippy.load_filters().unwrap(); + let mut buf = Vec::with_capacity(config_file_len as usize); + config_file.read_to_end(&mut buf).unwrap(); - assert_eq!(nippy, loaded_nippy); + assert_eq!( + vec![ + 1, 0, 0, 0, 0, 0, 0, 0, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + ], + buf + ); - assert!(InclusionFilter::contains(&loaded_nippy, &col1[0]).unwrap()); - assert!(InclusionFilter::contains(&loaded_nippy, &col1[1]).unwrap()); - assert!(InclusionFilter::contains(&loaded_nippy, &col1[2]).unwrap()); - assert!(InclusionFilter::contains(&loaded_nippy, &col1[3]).unwrap()); - assert!(!InclusionFilter::contains(&loaded_nippy, &col1[4]).unwrap()); + let mut read_jar = bincode::deserialize_from::<_, NippyJar>(&buf[..]).unwrap(); + // Path is not ser/de + read_jar.path = file.path().to_path_buf(); + assert_eq!(jar, read_jar); } #[test] @@ -763,13 +548,11 @@ mod tests { .freeze(vec![clone_with_result(&col1), clone_with_result(&col2)], num_rows) .unwrap(); - let mut loaded_nippy = NippyJar::load_without_header(file_path.path()).unwrap(); - loaded_nippy.load_filters().unwrap(); + let loaded_nippy = NippyJar::load_without_header(file_path.path()).unwrap(); assert_eq!(nippy.version, loaded_nippy.version); assert_eq!(nippy.columns, loaded_nippy.columns); assert_eq!(nippy.filter, loaded_nippy.filter); assert_eq!(nippy.phf, loaded_nippy.phf); - assert_eq!(nippy.offsets_index, loaded_nippy.offsets_index); assert_eq!(nippy.max_row_size, loaded_nippy.max_row_size); assert_eq!(nippy.path, loaded_nippy.path); @@ -808,8 +591,7 @@ mod tests { .freeze(vec![clone_with_result(&col1), clone_with_result(&col2)], num_rows) .unwrap(); - let mut loaded_nippy = NippyJar::load_without_header(file_path.path()).unwrap(); - loaded_nippy.load_filters().unwrap(); + let loaded_nippy = NippyJar::load_without_header(file_path.path()).unwrap(); assert_eq!(nippy, loaded_nippy); if let Some(Compressors::Lz4(_)) = loaded_nippy.compressor() { @@ -847,8 +629,7 @@ mod tests { .freeze(vec![clone_with_result(&col1), clone_with_result(&col2)], num_rows) .unwrap(); - let mut loaded_nippy = NippyJar::load_without_header(file_path.path()).unwrap(); - loaded_nippy.load_filters().unwrap(); + let loaded_nippy = NippyJar::load_without_header(file_path.path()).unwrap(); assert_eq!(nippy, loaded_nippy); if let Some(Compressors::Zstd(zstd)) = loaded_nippy.compressor() { @@ -870,7 +651,7 @@ mod tests { } } - /// Tests `NippyJar` with everything enabled: compression, filter, offset list and offset index. + /// Tests `NippyJar` with everything enabled. #[test] fn test_full_nippy_jar() { let (col1, col2) = test_data(None); @@ -890,12 +671,9 @@ mod tests { { let mut nippy = NippyJar::new(num_columns, file_path.path(), BlockJarHeader { block_start }) - .with_zstd(true, 5000) - .with_cuckoo_filter(col1.len()) - .with_fmph(); + .with_zstd(true, 5000); nippy.prepare_compression(data.clone()).unwrap(); - nippy.prepare_index(clone_with_result(&col1), col1.len()).unwrap(); nippy .freeze(vec![clone_with_result(&col1), clone_with_result(&col2)], num_rows) .unwrap(); @@ -903,12 +681,9 @@ mod tests { // Read file { - let mut loaded_nippy = NippyJar::::load(file_path.path()).unwrap(); - loaded_nippy.load_filters().unwrap(); + let loaded_nippy = NippyJar::::load(file_path.path()).unwrap(); assert!(loaded_nippy.compressor().is_some()); - assert!(loaded_nippy.filter.is_some()); - assert!(loaded_nippy.phf.is_some()); assert_eq!(loaded_nippy.user_header().block_start, block_start); if let Some(Compressors::Zstd(_zstd)) = loaded_nippy.compressor() { @@ -929,22 +704,9 @@ mod tests { data.shuffle(&mut rand::thread_rng()); for (row_num, (v0, v1)) in data { - // Simulates `by_hash` queries by iterating col1 values, which were used to - // create the inner index. - { - let row_by_value = cursor - .row_by_key(v0) - .unwrap() - .unwrap() - .iter() - .map(|a| a.to_vec()) - .collect::>(); - assert_eq!((&row_by_value[0], &row_by_value[1]), (v0, v1)); - - // Simulates `by_number` queries - let row_by_num = cursor.row_by_number(row_num).unwrap().unwrap(); - assert_eq!(row_by_value, row_by_num); - } + // Simulates `by_number` queries + let row_by_num = cursor.row_by_number(row_num).unwrap().unwrap(); + assert_eq!((&row_by_num[0].to_vec(), &row_by_num[1].to_vec()), (v0, v1)); } } } @@ -960,13 +722,9 @@ mod tests { // Create file { - let mut nippy = NippyJar::new_without_header(num_columns, file_path.path()) - .with_zstd(true, 5000) - .with_cuckoo_filter(col1.len()) - .with_fmph(); - + let mut nippy = + NippyJar::new_without_header(num_columns, file_path.path()).with_zstd(true, 5000); nippy.prepare_compression(data).unwrap(); - nippy.prepare_index(clone_with_result(&col1), col1.len()).unwrap(); nippy .freeze(vec![clone_with_result(&col1), clone_with_result(&col2)], num_rows) .unwrap(); @@ -974,8 +732,7 @@ mod tests { // Read file { - let mut loaded_nippy = NippyJar::load_without_header(file_path.path()).unwrap(); - loaded_nippy.load_filters().unwrap(); + let loaded_nippy = NippyJar::load_without_header(file_path.path()).unwrap(); if let Some(Compressors::Zstd(_zstd)) = loaded_nippy.compressor() { let mut cursor = NippyJarCursor::new(&loaded_nippy).unwrap(); @@ -989,84 +746,41 @@ mod tests { // Read both columns for (row_num, (v0, v1)) in &data { - // Simulates `by_hash` queries by iterating col1 values, which were used to - // create the inner index. - let row_by_value = cursor - .row_by_key_with_cols(v0, BLOCKS_FULL_MASK) - .unwrap() - .unwrap() - .iter() - .map(|a| a.to_vec()) - .collect::>(); - assert_eq!((&row_by_value[0], &row_by_value[1]), (*v0, *v1)); - // Simulates `by_number` queries let row_by_num = cursor .row_by_number_with_cols(*row_num, BLOCKS_FULL_MASK) .unwrap() .unwrap(); - assert_eq!(row_by_value, row_by_num); + assert_eq!((&row_by_num[0].to_vec(), &row_by_num[1].to_vec()), (*v0, *v1)); } // Read first column only: `Block` const BLOCKS_BLOCK_MASK: usize = 0b01; for (row_num, (v0, _)) in &data { - // Simulates `by_hash` queries by iterating col1 values, which were used to - // create the inner index. - let row_by_value = cursor - .row_by_key_with_cols(v0, BLOCKS_BLOCK_MASK) - .unwrap() - .unwrap() - .iter() - .map(|a| a.to_vec()) - .collect::>(); - assert_eq!(row_by_value.len(), 1); - assert_eq!(&row_by_value[0], *v0); - // Simulates `by_number` queries let row_by_num = cursor .row_by_number_with_cols(*row_num, BLOCKS_BLOCK_MASK) .unwrap() .unwrap(); assert_eq!(row_by_num.len(), 1); - assert_eq!(row_by_value, row_by_num); + assert_eq!(&row_by_num[0].to_vec(), *v0); } // Read second column only: `Block` const BLOCKS_WITHDRAWAL_MASK: usize = 0b10; - for (row_num, (v0, v1)) in &data { - // Simulates `by_hash` queries by iterating col1 values, which were used to - // create the inner index. - let row_by_value = cursor - .row_by_key_with_cols(v0, BLOCKS_WITHDRAWAL_MASK) - .unwrap() - .unwrap() - .iter() - .map(|a| a.to_vec()) - .collect::>(); - assert_eq!(row_by_value.len(), 1); - assert_eq!(&row_by_value[0], *v1); - + for (row_num, (_, v1)) in &data { // Simulates `by_number` queries let row_by_num = cursor .row_by_number_with_cols(*row_num, BLOCKS_WITHDRAWAL_MASK) .unwrap() .unwrap(); assert_eq!(row_by_num.len(), 1); - assert_eq!(row_by_value, row_by_num); + assert_eq!(&row_by_num[0].to_vec(), *v1); } // Read nothing const BLOCKS_EMPTY_MASK: usize = 0b00; - for (row_num, (v0, _)) in &data { - // Simulates `by_hash` queries by iterating col1 values, which were used to - // create the inner index. - assert!(cursor - .row_by_key_with_cols(v0, BLOCKS_EMPTY_MASK) - .unwrap() - .unwrap() - .is_empty()); - + for (row_num, _) in &data { // Simulates `by_number` queries assert!(cursor .row_by_number_with_cols(*row_num, BLOCKS_EMPTY_MASK) @@ -1140,7 +854,7 @@ mod tests { assert!(initial_offset_size > 0); // Appends a third row - let mut writer = NippyJarWriter::new(nippy, ConsistencyFailStrategy::Heal).unwrap(); + let mut writer = NippyJarWriter::new(nippy).unwrap(); writer.append_column(Some(Ok(&col1[2]))).unwrap(); writer.append_column(Some(Ok(&col2[2]))).unwrap(); @@ -1171,7 +885,7 @@ mod tests { // Writer will execute a consistency check and verify first that the offset list on disk // doesn't match the nippy.rows, and prune it. Then, it will prune the data file // accordingly as well. - let writer = NippyJarWriter::new(nippy, ConsistencyFailStrategy::Heal).unwrap(); + let writer = NippyJarWriter::new(nippy).unwrap(); assert_eq!(initial_rows, writer.rows()); assert_eq!( initial_offset_size, @@ -1197,7 +911,7 @@ mod tests { // Appends a third row, so we have an offset list in memory, which is not flushed to disk, // while the data has been. - let mut writer = NippyJarWriter::new(nippy, ConsistencyFailStrategy::Heal).unwrap(); + let mut writer = NippyJarWriter::new(nippy).unwrap(); writer.append_column(Some(Ok(&col1[2]))).unwrap(); writer.append_column(Some(Ok(&col2[2]))).unwrap(); @@ -1220,7 +934,7 @@ mod tests { // Writer will execute a consistency check and verify that the data file has more data than // it should, and resets it to the last offset of the list (on disk here) - let writer = NippyJarWriter::new(nippy, ConsistencyFailStrategy::Heal).unwrap(); + let writer = NippyJarWriter::new(nippy).unwrap(); assert_eq!(initial_rows, writer.rows()); assert_eq!( initial_data_size, @@ -1236,7 +950,7 @@ mod tests { assert_eq!(nippy.max_row_size, 0); assert_eq!(nippy.rows, 0); - let mut writer = NippyJarWriter::new(nippy, ConsistencyFailStrategy::Heal).unwrap(); + let mut writer = NippyJarWriter::new(nippy).unwrap(); assert_eq!(writer.column(), 0); writer.append_column(Some(Ok(&col1[0]))).unwrap(); @@ -1274,7 +988,7 @@ mod tests { assert_eq!(nippy.max_row_size, col1[0].len() + col2[0].len()); assert_eq!(nippy.rows, 1); - let mut writer = NippyJarWriter::new(nippy, ConsistencyFailStrategy::Heal).unwrap(); + let mut writer = NippyJarWriter::new(nippy).unwrap(); assert_eq!(writer.column(), 0); writer.append_column(Some(Ok(&col1[1]))).unwrap(); @@ -1305,7 +1019,7 @@ mod tests { fn prune_rows(num_columns: usize, file_path: &Path, col1: &[Vec], col2: &[Vec]) { let nippy = NippyJar::load_without_header(file_path).unwrap(); - let mut writer = NippyJarWriter::new(nippy, ConsistencyFailStrategy::Heal).unwrap(); + let mut writer = NippyJarWriter::new(nippy).unwrap(); // Appends a third row, so we have an offset list in memory, which is not flushed to disk writer.append_column(Some(Ok(&col1[2]))).unwrap(); @@ -1336,7 +1050,7 @@ mod tests { } // This should prune from the ondisk offset list and clear the jar. - let mut writer = NippyJarWriter::new(nippy, ConsistencyFailStrategy::Heal).unwrap(); + let mut writer = NippyJarWriter::new(nippy).unwrap(); writer.prune_rows(1).unwrap(); assert!(writer.is_dirty()); @@ -1377,6 +1091,6 @@ mod tests { data_file.set_len(data_len - 32 * missing_offsets).unwrap(); // runs the consistency check. - let _ = NippyJarWriter::new(nippy, ConsistencyFailStrategy::Heal).unwrap(); + let _ = NippyJarWriter::new(nippy).unwrap(); } } diff --git a/crates/storage/nippy-jar/src/phf/fmph.rs b/crates/storage/nippy-jar/src/phf/fmph.rs deleted file mode 100644 index a332c40cf7..0000000000 --- a/crates/storage/nippy-jar/src/phf/fmph.rs +++ /dev/null @@ -1,99 +0,0 @@ -use crate::{NippyJarError, PHFKey, PerfectHashingFunction}; -use ph::fmph::{BuildConf, Function}; -use serde::{ - de::Error as DeSerdeError, ser::Error as SerdeError, Deserialize, Deserializer, Serialize, - Serializer, -}; - -/// Wrapper struct for [`Function`]. Implementation of the following [paper](https://dl.acm.org/doi/10.1145/3596453). -#[derive(Default)] -pub struct Fmph { - function: Option, -} - -impl Fmph { - pub const fn new() -> Self { - Self { function: None } - } -} - -impl PerfectHashingFunction for Fmph { - fn set_keys(&mut self, keys: &[T]) -> Result<(), NippyJarError> { - self.function = Some(Function::from_slice_with_conf( - keys, - BuildConf { use_multiple_threads: true, ..Default::default() }, - )); - Ok(()) - } - - fn get_index(&self, key: &[u8]) -> Result, NippyJarError> { - if let Some(f) = &self.function { - return Ok(f.get(key)) - } - Err(NippyJarError::PHFMissingKeys) - } -} - -#[cfg(test)] -impl PartialEq for Fmph { - fn eq(&self, _other: &Self) -> bool { - match (&self.function, &_other.function) { - (Some(func1), Some(func2)) => { - func1.level_sizes() == func2.level_sizes() && - func1.write_bytes() == func2.write_bytes() && - { - let mut f1 = Vec::with_capacity(func1.write_bytes()); - func1.write(&mut f1).expect("enough capacity"); - - let mut f2 = Vec::with_capacity(func2.write_bytes()); - func2.write(&mut f2).expect("enough capacity"); - - f1 == f2 - } - } - (None, None) => true, - _ => false, - } - } -} - -impl std::fmt::Debug for Fmph { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("Fmph") - .field("bytes_size", &self.function.as_ref().map(|f| f.write_bytes())) - .finish_non_exhaustive() - } -} - -impl Serialize for Fmph { - /// Potentially expensive, but should be used only when creating the file. - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - match &self.function { - Some(f) => { - let mut v = Vec::with_capacity(f.write_bytes()); - f.write(&mut v).map_err(S::Error::custom)?; - serializer.serialize_some(&v) - } - None => serializer.serialize_none(), - } - } -} - -impl<'de> Deserialize<'de> for Fmph { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - if let Some(buffer) = >>::deserialize(deserializer)? { - return Ok(Self { - function: Some( - Function::read(&mut std::io::Cursor::new(buffer)).map_err(D::Error::custom)?, - ), - }) - } - Ok(Self { function: None }) - } -} diff --git a/crates/storage/nippy-jar/src/phf/go_fmph.rs b/crates/storage/nippy-jar/src/phf/go_fmph.rs deleted file mode 100644 index 328ddcb4dd..0000000000 --- a/crates/storage/nippy-jar/src/phf/go_fmph.rs +++ /dev/null @@ -1,100 +0,0 @@ -use crate::{NippyJarError, PHFKey, PerfectHashingFunction}; -use ph::fmph::{GOBuildConf, GOFunction}; -use serde::{ - de::Error as DeSerdeError, ser::Error as SerdeError, Deserialize, Deserializer, Serialize, - Serializer, -}; - -/// Wrapper struct for [`GOFunction`]. Implementation of the following [paper](https://dl.acm.org/doi/10.1145/3596453). -#[derive(Default)] -pub struct GoFmph { - function: Option, -} - -impl GoFmph { - pub const fn new() -> Self { - Self { function: None } - } -} - -impl PerfectHashingFunction for GoFmph { - fn set_keys(&mut self, keys: &[T]) -> Result<(), NippyJarError> { - self.function = Some(GOFunction::from_slice_with_conf( - keys, - GOBuildConf { use_multiple_threads: true, ..Default::default() }, - )); - Ok(()) - } - - fn get_index(&self, key: &[u8]) -> Result, NippyJarError> { - if let Some(f) = &self.function { - return Ok(f.get(key)) - } - Err(NippyJarError::PHFMissingKeys) - } -} - -#[cfg(test)] -impl PartialEq for GoFmph { - fn eq(&self, other: &Self) -> bool { - match (&self.function, &other.function) { - (Some(func1), Some(func2)) => { - func1.level_sizes() == func2.level_sizes() && - func1.write_bytes() == func2.write_bytes() && - { - let mut f1 = Vec::with_capacity(func1.write_bytes()); - func1.write(&mut f1).expect("enough capacity"); - - let mut f2 = Vec::with_capacity(func2.write_bytes()); - func2.write(&mut f2).expect("enough capacity"); - - f1 == f2 - } - } - (None, None) => true, - _ => false, - } - } -} - -impl std::fmt::Debug for GoFmph { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("GoFmph") - .field("bytes_size", &self.function.as_ref().map(|f| f.write_bytes())) - .finish_non_exhaustive() - } -} - -impl Serialize for GoFmph { - /// Potentially expensive, but should be used only when creating the file. - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - match &self.function { - Some(f) => { - let mut v = Vec::with_capacity(f.write_bytes()); - f.write(&mut v).map_err(S::Error::custom)?; - serializer.serialize_some(&v) - } - None => serializer.serialize_none(), - } - } -} - -impl<'de> Deserialize<'de> for GoFmph { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - if let Some(buffer) = >>::deserialize(deserializer)? { - return Ok(Self { - function: Some( - GOFunction::read(&mut std::io::Cursor::new(buffer)) - .map_err(D::Error::custom)?, - ), - }) - } - Ok(Self { function: None }) - } -} diff --git a/crates/storage/nippy-jar/src/phf/mod.rs b/crates/storage/nippy-jar/src/phf/mod.rs deleted file mode 100644 index ade48b60a3..0000000000 --- a/crates/storage/nippy-jar/src/phf/mod.rs +++ /dev/null @@ -1,46 +0,0 @@ -use crate::NippyJarError; -use serde::{Deserialize, Serialize}; -use std::hash::Hash; - -mod fmph; -pub use fmph::Fmph; - -mod go_fmph; -pub use go_fmph::GoFmph; - -/// Trait alias for [`PerfectHashingFunction`] keys. -pub trait PHFKey: AsRef<[u8]> + Sync + Clone + Hash {} -impl + Sync + Clone + Hash> PHFKey for T {} - -/// Trait to build and query a perfect hashing function. -pub trait PerfectHashingFunction: Serialize + for<'a> Deserialize<'a> { - /// Adds the key set and builds the perfect hashing function. - fn set_keys(&mut self, keys: &[T]) -> Result<(), NippyJarError>; - - /// Get corresponding associated integer. There might be false positives. - fn get_index(&self, key: &[u8]) -> Result, NippyJarError>; -} - -/// Enumerates all types of perfect hashing functions. -#[derive(Debug, Serialize, Deserialize)] -#[cfg_attr(test, derive(PartialEq))] -pub enum Functions { - Fmph(Fmph), - GoFmph(GoFmph), -} - -impl PerfectHashingFunction for Functions { - fn set_keys(&mut self, keys: &[T]) -> Result<(), NippyJarError> { - match self { - Self::Fmph(f) => f.set_keys(keys), - Self::GoFmph(f) => f.set_keys(keys), - } - } - - fn get_index(&self, key: &[u8]) -> Result, NippyJarError> { - match self { - Self::Fmph(f) => f.get_index(key), - Self::GoFmph(f) => f.get_index(key), - } - } -} diff --git a/crates/storage/nippy-jar/src/writer.rs b/crates/storage/nippy-jar/src/writer.rs index 695fd6642e..34272a9abd 100644 --- a/crates/storage/nippy-jar/src/writer.rs +++ b/crates/storage/nippy-jar/src/writer.rs @@ -1,13 +1,15 @@ -use crate::{compression::Compression, ColumnResult, NippyJar, NippyJarError, NippyJarHeader}; +use crate::{ + compression::Compression, ColumnResult, NippyJar, NippyJarChecker, NippyJarError, + NippyJarHeader, +}; use std::{ - cmp::Ordering, fs::{File, OpenOptions}, io::{BufWriter, Read, Seek, SeekFrom, Write}, path::Path, }; /// Size of one offset in bytes. -const OFFSET_SIZE_BYTES: u8 = 8; +pub(crate) const OFFSET_SIZE_BYTES: u8 = 8; /// Writer of [`NippyJar`]. Handles table data and offsets only. /// @@ -46,22 +48,32 @@ pub struct NippyJarWriter { impl NippyJarWriter { /// Creates a [`NippyJarWriter`] from [`NippyJar`]. /// - /// If `read_only` is set to `true`, any inconsistency issue won't be healed, and will return - /// [`NippyJarError::InconsistentState`] instead. - pub fn new( - jar: NippyJar, - check_mode: ConsistencyFailStrategy, - ) -> Result { + /// If will **always** attempt to heal any inconsistent state when called. + pub fn new(jar: NippyJar) -> Result { let (data_file, offsets_file, is_created) = Self::create_or_open_files(jar.data_path(), &jar.offsets_path())?; - // Makes sure we don't have dangling data and offset files - jar.freeze_config()?; + let (jar, data_file, offsets_file) = if is_created { + // Makes sure we don't have dangling data and offset files when we just created the file + jar.freeze_config()?; + + (jar, BufWriter::new(data_file), BufWriter::new(offsets_file)) + } else { + // If we are opening a previously created jar, we need to check its consistency, and + // make changes if necessary. + let mut checker = NippyJarChecker::new(jar); + checker.ensure_consistency()?; + + let NippyJarChecker { jar, data_file, offsets_file } = checker; + + // Calling ensure_consistency, will fill data_file and offsets_file + (jar, data_file.expect("qed"), offsets_file.expect("qed")) + }; let mut writer = Self { jar, - data_file: BufWriter::new(data_file), - offsets_file: BufWriter::new(offsets_file), + data_file, + offsets_file, tmp_buf: Vec::with_capacity(1_000_000), uncompressed_row_size: 0, offsets: Vec::with_capacity(1_000_000), @@ -69,13 +81,9 @@ impl NippyJarWriter { dirty: false, }; - // If we are opening a previously created jar, we need to check its consistency, and make - // changes if necessary. if !is_created { - writer.ensure_file_consistency(check_mode)?; - if check_mode.should_heal() { - writer.commit()?; - } + // Commit any potential heals done above. + writer.commit()?; } Ok(writer) @@ -147,107 +155,6 @@ impl NippyJarWriter { Ok((data_file, offsets_file, is_created)) } - /// Performs consistency checks on the [`NippyJar`] file and might self-heal or throw an error - /// according to [`ConsistencyFailStrategy`]. - /// * Is the offsets file size expected? - /// * Is the data file size expected? - /// - /// This is based on the assumption that [`NippyJar`] configuration is **always** the last one - /// to be updated when something is written, as by the `commit()` function shows. - pub fn ensure_file_consistency( - &mut self, - check_mode: ConsistencyFailStrategy, - ) -> Result<(), NippyJarError> { - let reader = self.jar.open_data_reader()?; - - // When an offset size is smaller than the initial (8), we are dealing with immutable - // data. - if reader.offset_size() != OFFSET_SIZE_BYTES { - return Err(NippyJarError::FrozenJar) - } - - let expected_offsets_file_size: u64 = (1 + // first byte is the size of one offset - OFFSET_SIZE_BYTES as usize* self.jar.rows * self.jar.columns + // `offset size * num rows * num columns` - OFFSET_SIZE_BYTES as usize) as u64; // expected size of the data file - let actual_offsets_file_size = self.offsets_file.get_ref().metadata()?.len(); - - if check_mode.should_err() && - expected_offsets_file_size.cmp(&actual_offsets_file_size) != Ordering::Equal - { - return Err(NippyJarError::InconsistentState) - } - - // Offsets configuration wasn't properly committed - match expected_offsets_file_size.cmp(&actual_offsets_file_size) { - Ordering::Less => { - // Happened during an appending job - // TODO: ideally we could truncate until the last offset of the last column of the - // last row inserted - self.offsets_file.get_mut().set_len(expected_offsets_file_size)?; - } - Ordering::Greater => { - // Happened during a pruning job - // `num rows = (file size - 1 - size of one offset) / num columns` - self.jar.rows = ((actual_offsets_file_size. - saturating_sub(1). // first byte is the size of one offset - saturating_sub(OFFSET_SIZE_BYTES as u64) / // expected size of the data file - (self.jar.columns as u64)) / - OFFSET_SIZE_BYTES as u64) as usize; - - // Freeze row count changed - self.jar.freeze_config()?; - } - Ordering::Equal => {} - } - - // last offset should match the data_file_len - let last_offset = reader.reverse_offset(0)?; - let data_file_len = self.data_file.get_ref().metadata()?.len(); - - if check_mode.should_err() && last_offset.cmp(&data_file_len) != Ordering::Equal { - return Err(NippyJarError::InconsistentState) - } - - // Offset list wasn't properly committed - match last_offset.cmp(&data_file_len) { - Ordering::Less => { - // Happened during an appending job, so we need to truncate the data, since there's - // no way to recover it. - self.data_file.get_mut().set_len(last_offset)?; - } - Ordering::Greater => { - // Happened during a pruning job, so we need to reverse iterate offsets until we - // find the matching one. - for index in 0..reader.offsets_count()? { - let offset = reader.reverse_offset(index + 1)?; - // It would only be equal if the previous row was fully pruned. - if offset <= data_file_len { - let new_len = self - .offsets_file - .get_ref() - .metadata()? - .len() - .saturating_sub(OFFSET_SIZE_BYTES as u64 * (index as u64 + 1)); - self.offsets_file.get_mut().set_len(new_len)?; - - drop(reader); - - // Since we decrease the offset list, we need to check the consistency of - // `self.jar.rows` again - self.ensure_file_consistency(ConsistencyFailStrategy::Heal)?; - break - } - } - } - Ordering::Equal => {} - } - - self.offsets_file.seek(SeekFrom::End(0))?; - self.data_file.seek(SeekFrom::End(0))?; - - Ok(()) - } - /// Appends rows to data file. `fn commit()` should be called to flush offsets and config to /// disk. /// @@ -540,24 +447,3 @@ impl NippyJarWriter { &self.jar } } - -/// Strategy on encountering an inconsistent state when creating a [`NippyJarWriter`]. -#[derive(Debug, Copy, Clone)] -pub enum ConsistencyFailStrategy { - /// Writer should heal. - Heal, - /// Writer should throw an error. - ThrowError, -} - -impl ConsistencyFailStrategy { - /// Whether writer should heal. - const fn should_heal(&self) -> bool { - matches!(self, Self::Heal) - } - - /// Whether writer should throw an error. - const fn should_err(&self) -> bool { - matches!(self, Self::ThrowError) - } -} diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 1393df5367..533cd83064 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -32,8 +32,10 @@ reth-nippy-jar.workspace = true reth-codecs.workspace = true reth-evm.workspace = true reth-chain-state.workspace = true +reth-node-types.workspace = true # ethereum +alloy-primitives.workspace = true alloy-rpc-types-engine.workspace = true revm.workspace = true @@ -50,12 +52,14 @@ metrics.workspace = true # misc auto_impl.workspace = true itertools.workspace = true +notify = { workspace = true, default-features = false, features = ["macos_fsevent"] } parking_lot.workspace = true dashmap = { workspace = true, features = ["inline"] } strum.workspace = true # test-utils once_cell = { workspace = true, optional = true } +reth-ethereum-engine-primitives = { workspace = true, optional = true } # parallel utils rayon.workspace = true @@ -82,4 +86,5 @@ test-utils = [ "reth-trie/test-utils", "reth-chain-state/test-utils", "once_cell", + "reth-ethereum-engine-primitives", ] diff --git a/crates/storage/provider/src/bundle_state/state_reverts.rs b/crates/storage/provider/src/bundle_state/state_reverts.rs index 37d44cde51..09b892562f 100644 --- a/crates/storage/provider/src/bundle_state/state_reverts.rs +++ b/crates/storage/provider/src/bundle_state/state_reverts.rs @@ -1,4 +1,4 @@ -use reth_primitives::{B256, U256}; +use alloy_primitives::{B256, U256}; use revm::db::states::RevertToSlot; use std::iter::Peekable; diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 613ca18065..b363f3b06e 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -4,25 +4,26 @@ use crate::{ CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, DatabaseProviderFactory, DatabaseProviderRO, EvmEnvProvider, FinalizedBlockReader, HeaderProvider, ParliaSnapshotReader, ProviderError, ProviderFactory, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, - RequestsProvider, StageCheckpointReader, StateProviderBox, StateProviderFactory, + RequestsProvider, StageCheckpointReader, StateProviderBox, StateProviderFactory, StateReader, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; +use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; use alloy_rpc_types_engine::ForkchoiceState; use reth_chain_state::{ BlockState, CanonicalInMemoryState, ForkChoiceNotifications, ForkChoiceSubscriptions, MemoryOverlayStateProvider, }; -use reth_chainspec::{ChainInfo, ChainSpec}; -use reth_db_api::{ - database::Database, - models::{AccountBeforeTx, StoredBlockBodyIndices}, -}; +use reth_chainspec::ChainInfo; +use reth_db::Database; +use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; +use reth_execution_types::ExecutionOutcome; +use reth_node_types::NodeTypesWithDB; use reth_primitives::{ - parlia::Snapshot, Account, Address, BlobSidecars, Block, BlockHash, BlockHashOrNumber, BlockId, - BlockNumHash, BlockNumber, BlockNumberOrTag, BlockWithSenders, EthereumHardforks, Header, - Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, TransactionMeta, TransactionSigned, - TransactionSignedNoHash, TxHash, TxNumber, Withdrawal, Withdrawals, B256, U256, + parlia::Snapshot, Account, BlobSidecars, Block, BlockHashOrNumber, BlockId, BlockNumHash, + BlockNumberOrTag, BlockWithSenders, EthereumHardforks, Header, Receipt, SealedBlock, + SealedBlockWithSenders, SealedHeader, TransactionMeta, TransactionSigned, + TransactionSignedNoHash, Withdrawal, Withdrawals, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; @@ -36,21 +37,23 @@ use std::{ }; use tracing::trace; +use super::{DatabaseProvider, ProviderNodeTypes}; + /// The main type for interacting with the blockchain. /// /// This type serves as the main entry point for interacting with the blockchain and provides data /// from database storage and from the blockchain tree (pending state etc.) It is a simple wrapper /// type that holds an instance of the database and the blockchain tree. #[derive(Debug)] -pub struct BlockchainProvider2 { +pub struct BlockchainProvider2 { /// Provider type used to access the database. - database: ProviderFactory, + database: ProviderFactory, /// Tracks the chain info wrt forkchoice updates and in memory canonical /// state. pub(super) canonical_in_memory_state: CanonicalInMemoryState, } -impl Clone for BlockchainProvider2 { +impl Clone for BlockchainProvider2 { fn clone(&self) -> Self { Self { database: self.database.clone(), @@ -59,13 +62,10 @@ impl Clone for BlockchainProvider2 { } } -impl BlockchainProvider2 -where - DB: Database, -{ +impl BlockchainProvider2 { /// Create a new provider using only the database, fetching the latest header from /// the database to initialize the provider. - pub fn new(database: ProviderFactory) -> ProviderResult { + pub fn new(database: ProviderFactory) -> ProviderResult { let provider = database.provider()?; let best: ChainInfo = provider.chain_info()?; match provider.header_by_number(best.best_number)? { @@ -82,10 +82,7 @@ where /// /// This returns a `ProviderResult` since it tries the retrieve the last finalized header from /// `database`. - pub fn with_latest( - database: ProviderFactory, - latest: SealedHeader, - ) -> ProviderResult { + pub fn with_latest(database: ProviderFactory, latest: SealedHeader) -> ProviderResult { let provider = database.provider()?; let finalized_header = provider .last_finalized_block_number()? @@ -191,7 +188,7 @@ where /// 2. The in-block transaction index. fn block_state_by_tx_id( &self, - provider: &DatabaseProviderRO, + provider: &DatabaseProviderRO, id: TxNumber, ) -> ProviderResult>, usize)>> { // Get the last block number stored in the database @@ -247,10 +244,7 @@ where } } -impl BlockchainProvider2 -where - DB: Database, -{ +impl BlockchainProvider2 { /// Ensures that the given block number is canonical (synced) /// /// This is a helper for guarding the `HistoricalStateProvider` against block numbers that are @@ -270,25 +264,27 @@ where } } -impl DatabaseProviderFactory for BlockchainProvider2 -where - DB: Database, -{ - fn database_provider_ro(&self) -> ProviderResult> { - self.database.provider() +impl DatabaseProviderFactory for BlockchainProvider2 { + type DB = N::DB; + type Provider = DatabaseProvider<::TX>; + type ProviderRW = DatabaseProvider<::TXMut>; + + fn database_provider_ro(&self) -> ProviderResult { + self.database.database_provider_ro() + } + + fn database_provider_rw(&self) -> ProviderResult { + self.database.database_provider_rw() } } -impl StaticFileProviderFactory for BlockchainProvider2 { +impl StaticFileProviderFactory for BlockchainProvider2 { fn static_file_provider(&self) -> StaticFileProvider { self.database.static_file_provider() } } -impl HeaderProvider for BlockchainProvider2 -where - DB: Database, -{ +impl HeaderProvider for BlockchainProvider2 { fn header(&self, block_hash: &BlockHash) -> ProviderResult> { if let Some(block_state) = self.canonical_in_memory_state.state_by_hash(*block_hash) { return Ok(Some(block_state.block().block().header.header().clone())); @@ -388,10 +384,7 @@ where } } -impl BlockHashReader for BlockchainProvider2 -where - DB: Database, -{ +impl BlockHashReader for BlockchainProvider2 { fn block_hash(&self, number: u64) -> ProviderResult> { if let Some(block_state) = self.canonical_in_memory_state.state_by_number(number) { return Ok(Some(block_state.hash())); @@ -418,10 +411,7 @@ where } } -impl BlockNumReader for BlockchainProvider2 -where - DB: Database, -{ +impl BlockNumReader for BlockchainProvider2 { fn chain_info(&self) -> ProviderResult { Ok(self.canonical_in_memory_state.chain_info()) } @@ -443,10 +433,7 @@ where } } -impl BlockIdReader for BlockchainProvider2 -where - DB: Database, -{ +impl BlockIdReader for BlockchainProvider2 { fn pending_block_num_hash(&self) -> ProviderResult> { Ok(self.canonical_in_memory_state.pending_block_num_hash()) } @@ -460,10 +447,7 @@ where } } -impl BlockReader for BlockchainProvider2 -where - DB: Database, -{ +impl BlockReader for BlockchainProvider2 { fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { match source { BlockSource::Any | BlockSource::Canonical => { @@ -662,10 +646,7 @@ where } } -impl TransactionsProvider for BlockchainProvider2 -where - DB: Database, -{ +impl TransactionsProvider for BlockchainProvider2 { fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { // First, check the database if let Some(id) = self.database.transaction_id(tx_hash)? { @@ -855,10 +836,7 @@ where } } -impl ReceiptProvider for BlockchainProvider2 -where - DB: Database, -{ +impl ReceiptProvider for BlockchainProvider2 { fn receipt(&self, id: TxNumber) -> ProviderResult> { let provider = self.database.provider()?; let Some((block_state, tx_index)) = self.block_state_by_tx_id(&provider, id)? else { @@ -920,10 +898,7 @@ where } } -impl ReceiptProviderIdExt for BlockchainProvider2 -where - DB: Database, -{ +impl ReceiptProviderIdExt for BlockchainProvider2 { fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { match block { BlockId::Hash(rpc_block_hash) => { @@ -954,10 +929,7 @@ where } } -impl WithdrawalsProvider for BlockchainProvider2 -where - DB: Database, -{ +impl WithdrawalsProvider for BlockchainProvider2 { fn withdrawals_by_block( &self, id: BlockHashOrNumber, @@ -989,10 +961,7 @@ where } } -impl SidecarsProvider for BlockchainProvider2 -where - DB: Database, -{ +impl SidecarsProvider for BlockchainProvider2 { fn sidecars(&self, block_hash: &BlockHash) -> ProviderResult> { self.database.sidecars(block_hash) } @@ -1002,10 +971,7 @@ where } } -impl RequestsProvider for BlockchainProvider2 -where - DB: Database, -{ +impl RequestsProvider for BlockchainProvider2 { fn requests_by_block( &self, id: BlockHashOrNumber, @@ -1023,10 +989,7 @@ where } } -impl StageCheckpointReader for BlockchainProvider2 -where - DB: Database, -{ +impl StageCheckpointReader for BlockchainProvider2 { fn get_stage_checkpoint(&self, id: StageId) -> ProviderResult> { self.database.provider()?.get_stage_checkpoint(id) } @@ -1040,10 +1003,7 @@ where } } -impl EvmEnvProvider for BlockchainProvider2 -where - DB: Database, -{ +impl EvmEnvProvider for BlockchainProvider2 { fn fill_env_at( &self, cfg: &mut CfgEnvWithHandlerCfg, @@ -1052,7 +1012,7 @@ where evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv, + EvmConfig: ConfigureEvmEnv
, { let hash = self.convert_number(at)?.ok_or(ProviderError::HeaderNotFound(at))?; let header = self.header(&hash)?.ok_or(ProviderError::HeaderNotFound(at))?; @@ -1067,18 +1027,12 @@ where evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv, + EvmConfig: ConfigureEvmEnv
, { let total_difficulty = self .header_td_by_number(header.number)? .ok_or_else(|| ProviderError::HeaderNotFound(header.number.into()))?; - evm_config.fill_cfg_and_block_env( - cfg, - block_env, - &self.database.chain_spec(), - header, - total_difficulty, - ); + evm_config.fill_cfg_and_block_env(cfg, block_env, header, total_difficulty); Ok(()) } @@ -1089,7 +1043,7 @@ where evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv, + EvmConfig: ConfigureEvmEnv
, { let hash = self.convert_number(at)?.ok_or(ProviderError::HeaderNotFound(at))?; let header = self.header(&hash)?.ok_or(ProviderError::HeaderNotFound(at))?; @@ -1103,20 +1057,17 @@ where evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv, + EvmConfig: ConfigureEvmEnv
, { let total_difficulty = self .header_td_by_number(header.number)? .ok_or_else(|| ProviderError::HeaderNotFound(header.number.into()))?; - evm_config.fill_cfg_env(cfg, &self.database.chain_spec(), header, total_difficulty); + evm_config.fill_cfg_env(cfg, header, total_difficulty); Ok(()) } } -impl PruneCheckpointReader for BlockchainProvider2 -where - DB: Database, -{ +impl PruneCheckpointReader for BlockchainProvider2 { fn get_prune_checkpoint( &self, segment: PruneSegment, @@ -1129,21 +1080,15 @@ where } } -impl ChainSpecProvider for BlockchainProvider2 -where - DB: Send + Sync, -{ - type ChainSpec = ChainSpec; +impl ChainSpecProvider for BlockchainProvider2 { + type ChainSpec = N::ChainSpec; - fn chain_spec(&self) -> Arc { + fn chain_spec(&self) -> Arc { self.database.chain_spec() } } -impl StateProviderFactory for BlockchainProvider2 -where - DB: Database, -{ +impl StateProviderFactory for BlockchainProvider2 { /// Storage provider for latest block fn latest(&self) -> ProviderResult { trace!(target: "providers::blockchain", "Getting latest block state provider"); @@ -1260,9 +1205,8 @@ where } } -impl CanonChainTracker for BlockchainProvider2 +impl CanonChainTracker for BlockchainProvider2 where - DB: Send + Sync, Self: BlockReader, { fn on_forkchoice_update_received(&self, _update: &ForkchoiceState) { @@ -1295,7 +1239,7 @@ where } } -impl BlockReaderIdExt for BlockchainProvider2 +impl BlockReaderIdExt for BlockchainProvider2 where Self: BlockReader + BlockIdReader + ReceiptProviderIdExt, { @@ -1383,19 +1327,13 @@ where } } -impl CanonStateSubscriptions for BlockchainProvider2 -where - DB: Send + Sync, -{ +impl CanonStateSubscriptions for BlockchainProvider2 { fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { self.canonical_in_memory_state.subscribe_canon_state() } } -impl ForkChoiceSubscriptions for BlockchainProvider2 -where - DB: Send + Sync, -{ +impl ForkChoiceSubscriptions for BlockchainProvider2 { fn subscribe_safe_block(&self) -> ForkChoiceNotifications { let receiver = self.canonical_in_memory_state.subscribe_safe_block(); ForkChoiceNotifications(receiver) @@ -1407,10 +1345,7 @@ where } } -impl ChangeSetReader for BlockchainProvider2 -where - DB: Database, -{ +impl ChangeSetReader for BlockchainProvider2 { fn account_block_changeset( &self, block_number: BlockNumber, @@ -1435,10 +1370,7 @@ where } } -impl AccountReader for BlockchainProvider2 -where - DB: Database + Sync + Send, -{ +impl AccountReader for BlockchainProvider2 { /// Get basic account information. fn basic_account(&self, address: Address) -> ProviderResult> { // use latest state provider @@ -1447,15 +1379,23 @@ where } } -impl ParliaSnapshotReader for BlockchainProvider2 -where - DB: Database + Sync + Send, -{ +impl ParliaSnapshotReader for BlockchainProvider2 { fn get_parlia_snapshot(&self, block_hash: B256) -> ProviderResult> { self.database.provider()?.get_parlia_snapshot(block_hash) } } +impl StateReader for BlockchainProvider2 { + fn get_state(&self, block: BlockNumber) -> ProviderResult> { + if let Some(state) = self.canonical_in_memory_state.state_by_number(block) { + let state = state.block().execution_outcome().clone(); + Ok(Some(state)) + } else { + self.database.provider()?.get_state(block..=block) + } + } +} + #[cfg(test)] mod tests { use std::{ @@ -1466,9 +1406,14 @@ mod tests { use crate::{ providers::BlockchainProvider2, - test_utils::{create_test_provider_factory, create_test_provider_factory_with_chain_spec}, - BlockWriter, CanonChainTracker, StaticFileWriter, + test_utils::{ + create_test_provider_factory, create_test_provider_factory_with_chain_spec, + MockNodeTypesWithDB, + }, + writer::UnifiedStorageWriter, + BlockWriter, CanonChainTracker, StaticFileProviderFactory, StaticFileWriter, }; + use alloy_primitives::B256; use itertools::Itertools; use rand::Rng; use reth_chain_state::{ @@ -1478,21 +1423,17 @@ mod tests { use reth_chainspec::{ ChainSpec, ChainSpecBuilder, ChainSpecProvider, EthereumHardfork, MAINNET, }; - use reth_db::{ - models::{AccountBeforeTx, StoredBlockBodyIndices}, - test_utils::TempDatabase, - DatabaseEnv, - }; + use reth_db::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_primitives::{ BlockHashOrNumber, BlockNumHash, BlockNumberOrTag, BlockWithSenders, Receipt, SealedBlock, SealedBlockWithSenders, StaticFileSegment, TransactionMeta, TransactionSignedNoHash, - Withdrawals, B256, + Withdrawals, }; use reth_storage_api::{ BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, BlockSource, - ChangeSetReader, HeaderProvider, ReceiptProviderIdExt, RequestsProvider, - TransactionVariant, TransactionsProvider, WithdrawalsProvider, + ChangeSetReader, HeaderProvider, ReceiptProvider, ReceiptProviderIdExt, RequestsProvider, + StateProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; use reth_testing_utils::generators::{ self, random_block, random_block_range, random_changeset_range, random_eoa_accounts, @@ -1546,7 +1487,7 @@ mod tests { in_memory_blocks: usize, block_range_params: BlockRangeParams, ) -> eyre::Result<( - BlockchainProvider2>>, + BlockchainProvider2, Vec, Vec, Vec>, @@ -1570,21 +1511,27 @@ mod tests { let factory = create_test_provider_factory_with_chain_spec(chain_spec); let provider_rw = factory.provider_rw()?; - // Insert blocks and receipts into the database + // Insert blocks into the database for block in &database_blocks { provider_rw.insert_historical_block( block.clone().seal_with_senders().expect("failed to seal block with senders"), )?; - - // Insert the receipts into the database using the writer from the provider_rw - let mut writer = - provider_rw.static_file_provider().latest_writer(StaticFileSegment::Receipts)?; - let block_number = block.number as usize; - for receipt in receipts.get(block_number).unwrap() { - writer.append_receipt(block.number, receipt)?; - } } - provider_rw.commit()?; + + // Insert receipts into the static files + UnifiedStorageWriter::new( + &provider_rw, + Some(factory.static_file_provider().latest_writer(StaticFileSegment::Receipts)?), + ) + .append_receipts_from_blocks( + // The initial block number is required + database_blocks.first().map(|b| b.number).unwrap_or_default(), + receipts.iter().map(|vec| vec.clone().into_iter().map(Some).collect::>()), + Vec::new(), + )?; + + // Commit to both storages: database and static files + UnifiedStorageWriter::commit(provider_rw, factory.static_file_provider())?; let provider = BlockchainProvider2::new(factory)?; @@ -1632,7 +1579,7 @@ mod tests { in_memory_blocks: usize, block_range_params: BlockRangeParams, ) -> eyre::Result<( - BlockchainProvider2>>, + BlockchainProvider2, Vec, Vec, Vec>, @@ -3065,6 +3012,37 @@ mod tests { Ok(()) } + #[test] + fn test_receipt_provider() -> eyre::Result<()> { + let mut rng = generators::rng(); + let (provider, database_blocks, in_memory_blocks, receipts) = provider_with_random_blocks( + &mut rng, + TEST_BLOCKS_COUNT, + TEST_BLOCKS_COUNT, + BlockRangeParams { tx_count: 1..3, ..Default::default() }, + )?; + + let blocks = [database_blocks, in_memory_blocks].concat(); + + for block in blocks { + let block_number = block.number as usize; + for (txn_number, _) in block.body.iter().enumerate() { + let txn_hash = block.body.get(txn_number).unwrap().hash(); + let txn_id = provider.transaction_id(txn_hash)?.unwrap(); + assert_eq!( + provider.receipt(txn_id)?.unwrap(), + receipts.get(block_number).unwrap().clone().get(txn_number).unwrap().clone() + ); + assert_eq!( + provider.receipt_by_hash(txn_hash)?.unwrap(), + receipts.get(block_number).unwrap().clone().get(txn_number).unwrap().clone() + ); + } + } + + Ok(()) + } + #[test] fn test_receipt_provider_id_ext_receipts_by_block_id() -> eyre::Result<()> { let mut rng = generators::rng(); @@ -3072,7 +3050,7 @@ mod tests { &mut rng, TEST_BLOCKS_COUNT, TEST_BLOCKS_COUNT, - BlockRangeParams::default(), + BlockRangeParams { tx_count: 1..3, ..Default::default() }, )?; let database_block = database_blocks.first().unwrap().clone(); @@ -3081,12 +3059,18 @@ mod tests { let block_number = database_block.number; let block_hash = database_block.header.hash(); + assert!(!receipts.get(database_block.number as usize).unwrap().is_empty()); + assert!(!provider + .receipts_by_number_or_tag(database_block.number.into())? + .unwrap() + .is_empty()); + assert_eq!( - provider.receipts_by_block_id(block_number.into())?.unwrap_or_default(), + provider.receipts_by_block_id(block_number.into())?.unwrap(), receipts.get(block_number as usize).unwrap().clone() ); assert_eq!( - provider.receipts_by_block_id(block_hash.into())?.unwrap_or_default(), + provider.receipts_by_block_id(block_hash.into())?.unwrap(), receipts.get(block_number as usize).unwrap().clone() ); @@ -3094,11 +3078,11 @@ mod tests { let block_hash = in_memory_block.header.hash(); assert_eq!( - provider.receipts_by_block_id(block_number.into())?.unwrap_or_default(), + provider.receipts_by_block_id(block_number.into())?.unwrap(), receipts.get(block_number as usize).unwrap().clone() ); assert_eq!( - provider.receipts_by_block_id(block_hash.into())?.unwrap_or_default(), + provider.receipts_by_block_id(block_hash.into())?.unwrap(), receipts.get(block_number as usize).unwrap().clone() ); @@ -3112,7 +3096,7 @@ mod tests { &mut rng, TEST_BLOCKS_COUNT, TEST_BLOCKS_COUNT, - BlockRangeParams::default(), + BlockRangeParams { tx_count: 1..3, ..Default::default() }, )?; let database_block = database_blocks.first().unwrap().clone(); @@ -3122,20 +3106,26 @@ mod tests { let safe_block = in_memory_blocks.get(in_memory_block_count - 2).unwrap().clone(); let finalized_block = in_memory_blocks.get(in_memory_block_count - 3).unwrap().clone(); + assert!(!receipts.get(database_block.number as usize).unwrap().is_empty()); + assert!(!provider + .receipts_by_number_or_tag(database_block.number.into())? + .unwrap() + .is_empty()); + assert_eq!( - provider.receipts_by_number_or_tag(database_block.number.into())?.unwrap_or_default(), + provider.receipts_by_number_or_tag(database_block.number.into())?.unwrap(), receipts.get(database_block.number as usize).unwrap().clone() ); assert_eq!( - provider.receipts_by_number_or_tag(BlockNumberOrTag::Latest)?.unwrap_or_default(), + provider.receipts_by_number_or_tag(BlockNumberOrTag::Latest)?.unwrap(), receipts.get(canonical_block.number as usize).unwrap().clone() ); assert_eq!( - provider.receipts_by_number_or_tag(BlockNumberOrTag::Safe)?.unwrap_or_default(), + provider.receipts_by_number_or_tag(BlockNumberOrTag::Safe)?.unwrap(), receipts.get(safe_block.number as usize).unwrap().clone() ); assert_eq!( - provider.receipts_by_number_or_tag(BlockNumberOrTag::Finalized)?.unwrap_or_default(), + provider.receipts_by_number_or_tag(BlockNumberOrTag::Finalized)?.unwrap(), receipts.get(finalized_block.number as usize).unwrap().clone() ); @@ -3289,6 +3279,172 @@ mod tests { Ok(()) } + #[test] + fn test_state_provider_factory() -> eyre::Result<()> { + let mut rng = generators::rng(); + + // test in-memory state use-cases + let (in_memory_provider, _, in_memory_blocks, _) = provider_with_random_blocks( + &mut rng, + TEST_BLOCKS_COUNT, + TEST_BLOCKS_COUNT, + BlockRangeParams::default(), + )?; + + // test database state use-cases + let (only_database_provider, database_blocks, _, _) = provider_with_random_blocks( + &mut rng, + TEST_BLOCKS_COUNT, + 0, + BlockRangeParams::default(), + )?; + + let blocks = [database_blocks.clone(), in_memory_blocks.clone()].concat(); + let first_in_memory_block = in_memory_blocks.first().unwrap(); + let first_db_block = database_blocks.first().unwrap(); + + // test latest state + assert_eq!( + first_in_memory_block.hash(), + in_memory_provider.latest().unwrap().block_hash(first_in_memory_block.number)?.unwrap() + ); + // test latest falls back to database state when there's no in-memory block + assert_eq!( + first_db_block.hash(), + only_database_provider.latest().unwrap().block_hash(first_db_block.number)?.unwrap() + ); + + // test history by block number + assert_eq!( + first_in_memory_block.hash(), + in_memory_provider + .history_by_block_number(first_in_memory_block.number)? + .block_hash(first_in_memory_block.number)? + .unwrap() + ); + assert_eq!( + first_db_block.hash(), + only_database_provider + .history_by_block_number(first_db_block.number)? + .block_hash(first_db_block.number)? + .unwrap() + ); + assert_eq!( + first_in_memory_block.hash(), + in_memory_provider + .history_by_block_hash(first_in_memory_block.hash())? + .block_hash(first_in_memory_block.number)? + .unwrap() + ); + assert!(only_database_provider.history_by_block_hash(B256::random()).is_err()); + + // test state by block hash + assert_eq!( + first_in_memory_block.hash(), + in_memory_provider + .state_by_block_hash(first_in_memory_block.hash())? + .block_hash(first_in_memory_block.number)? + .unwrap() + ); + assert_eq!( + first_db_block.hash(), + only_database_provider + .state_by_block_hash(first_db_block.hash())? + .block_hash(first_db_block.number)? + .unwrap() + ); + assert!(only_database_provider.state_by_block_hash(B256::random()).is_err()); + + // test pending without pending state- falls back to latest + assert_eq!( + first_in_memory_block.hash(), + in_memory_provider + .pending() + .unwrap() + .block_hash(first_in_memory_block.number) + .unwrap() + .unwrap() + ); + + // adding a pending block to state can test pending() and pending_state_by_hash() function + let pending_block = database_blocks[database_blocks.len() - 1].clone(); + only_database_provider.canonical_in_memory_state.set_pending_block(ExecutedBlock { + block: Arc::new(pending_block.clone()), + senders: Default::default(), + execution_output: Default::default(), + hashed_state: Default::default(), + trie: Default::default(), + }); + + assert_eq!( + pending_block.hash(), + only_database_provider + .pending() + .unwrap() + .block_hash(pending_block.number) + .unwrap() + .unwrap() + ); + + assert_eq!( + pending_block.hash(), + only_database_provider + .pending_state_by_hash(pending_block.hash())? + .unwrap() + .block_hash(pending_block.number)? + .unwrap() + ); + + // test state by block number or tag + assert_eq!( + first_in_memory_block.hash(), + in_memory_provider + .state_by_block_number_or_tag(BlockNumberOrTag::Number( + first_in_memory_block.number + ))? + .block_hash(first_in_memory_block.number)? + .unwrap() + ); + assert_eq!( + first_in_memory_block.hash(), + in_memory_provider + .state_by_block_number_or_tag(BlockNumberOrTag::Latest)? + .block_hash(first_in_memory_block.number)? + .unwrap() + ); + // test state by block tag for safe block + let safe_block = in_memory_blocks[in_memory_blocks.len() - 2].clone(); + in_memory_provider.canonical_in_memory_state.set_safe(safe_block.header.clone()); + assert_eq!( + safe_block.hash(), + in_memory_provider + .state_by_block_number_or_tag(BlockNumberOrTag::Safe)? + .block_hash(safe_block.number)? + .unwrap() + ); + // test state by block tag for finalized block + let finalized_block = in_memory_blocks[in_memory_blocks.len() - 3].clone(); + in_memory_provider.canonical_in_memory_state.set_finalized(finalized_block.header.clone()); + assert_eq!( + finalized_block.hash(), + in_memory_provider + .state_by_block_number_or_tag(BlockNumberOrTag::Finalized)? + .block_hash(finalized_block.number)? + .unwrap() + ); + // test state by block tag for earliest block + let earliest_block = blocks.first().unwrap().clone(); + assert_eq!( + earliest_block.hash(), + only_database_provider + .state_by_block_number_or_tag(BlockNumberOrTag::Earliest)? + .block_hash(earliest_block.number)? + .unwrap() + ); + + Ok(()) + } + #[test] fn test_canon_state_tracker() -> eyre::Result<()> { let mut rng = generators::rng(); diff --git a/crates/storage/provider/src/providers/bundle_state_provider.rs b/crates/storage/provider/src/providers/bundle_state_provider.rs index 50c88a51fc..50d3d46cef 100644 --- a/crates/storage/provider/src/providers/bundle_state_provider.rs +++ b/crates/storage/provider/src/providers/bundle_state_provider.rs @@ -1,14 +1,14 @@ use crate::{ AccountReader, BlockHashReader, ExecutionDataProvider, StateProvider, StateRootProvider, }; -use reth_primitives::{Account, Address, BlockNumber, Bytecode, Bytes, B256}; +use alloy_primitives::{Address, BlockNumber, Bytes, B256}; +use reth_primitives::{Account, Bytecode}; use reth_storage_api::{StateProofProvider, StorageRootProvider}; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ - prefix_set::TriePrefixSetsMut, updates::TrieUpdates, AccountProof, HashedPostState, - HashedStorage, + updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, TrieInput, }; -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; /// A state provider that resolves to data from either a wrapped [`crate::ExecutionOutcome`] /// or an underlying state provider. @@ -75,12 +75,7 @@ impl StateRootProvider self.state_provider.state_root(state) } - fn state_root_from_nodes( - &self, - _nodes: TrieUpdates, - _hashed_state: HashedPostState, - _prefix_sets: TriePrefixSetsMut, - ) -> ProviderResult { + fn state_root_from_nodes(&self, _input: TrieInput) -> ProviderResult { unimplemented!() } @@ -96,16 +91,11 @@ impl StateRootProvider fn state_root_from_nodes_with_updates( &self, - nodes: TrieUpdates, - hashed_state: HashedPostState, - prefix_sets: TriePrefixSetsMut, + mut input: TrieInput, ) -> ProviderResult<(B256, TrieUpdates)> { let bundle_state = self.block_execution_data_provider.execution_outcome().state(); - let mut state = HashedPostState::from_bundle_state(&bundle_state.state); - let mut state_prefix_sets = state.construct_prefix_sets(); - state.extend(hashed_state); - state_prefix_sets.extend(prefix_sets); - self.state_provider.state_root_from_nodes_with_updates(nodes, state, state_prefix_sets) + input.prepend(HashedPostState::from_bundle_state(&bundle_state.state)); + self.state_provider.state_root_from_nodes_with_updates(input) } } @@ -137,25 +127,33 @@ impl StateProofProvider { fn proof( &self, - hashed_state: HashedPostState, + mut input: TrieInput, address: Address, slots: &[B256], ) -> ProviderResult { let bundle_state = self.block_execution_data_provider.execution_outcome().state(); - let mut state = HashedPostState::from_bundle_state(&bundle_state.state); - state.extend(hashed_state); - self.state_provider.proof(state, address, slots) + input.prepend(HashedPostState::from_bundle_state(&bundle_state.state)); + self.state_provider.proof(input, address, slots) + } + + fn multiproof( + &self, + mut input: reth_trie::TrieInput, + targets: HashMap>, + ) -> ProviderResult { + let bundle_state = self.block_execution_data_provider.execution_outcome().state(); + input.prepend(HashedPostState::from_bundle_state(&bundle_state.state)); + self.state_provider.multiproof(input, targets) } fn witness( &self, - overlay: HashedPostState, + mut input: TrieInput, target: HashedPostState, ) -> ProviderResult> { let bundle_state = self.block_execution_data_provider.execution_outcome().state(); - let mut state = HashedPostState::from_bundle_state(&bundle_state.state); - state.extend(overlay); - self.state_provider.witness(state, target) + input.prepend(HashedPostState::from_bundle_state(&bundle_state.state)); + self.state_provider.witness(input, target) } } @@ -163,8 +161,8 @@ impl StateProvider for BundleStat fn storage( &self, account: Address, - storage_key: reth_primitives::StorageKey, - ) -> ProviderResult> { + storage_key: alloy_primitives::StorageKey, + ) -> ProviderResult> { let u256_storage_key = storage_key.into(); if let Some(value) = self .block_execution_data_provider diff --git a/crates/storage/provider/src/providers/consistent_view.rs b/crates/storage/provider/src/providers/consistent_view.rs index fe9b659411..4640f46033 100644 --- a/crates/storage/provider/src/providers/consistent_view.rs +++ b/crates/storage/provider/src/providers/consistent_view.rs @@ -1,8 +1,12 @@ -use crate::{BlockNumReader, DatabaseProviderFactory, DatabaseProviderRO, HeaderProvider}; -use reth_db_api::database::Database; -use reth_primitives::{GotExpected, B256}; +use crate::{BlockNumReader, DatabaseProviderFactory, HeaderProvider}; +use alloy_primitives::B256; +use reth_errors::ProviderError; +use reth_primitives::GotExpected; +use reth_storage_api::{BlockReader, DBProvider}; use reth_storage_errors::provider::ProviderResult; -use std::marker::PhantomData; + +use reth_trie::HashedPostState; +use reth_trie_db::DatabaseHashedPostState; pub use reth_storage_errors::provider::ConsistentViewError; @@ -22,34 +26,47 @@ pub use reth_storage_errors::provider::ConsistentViewError; /// appropriately. /// 2) be sure that the state does not change. #[derive(Clone, Debug)] -pub struct ConsistentDbView { - database: PhantomData, - provider: Provider, +pub struct ConsistentDbView { + factory: Factory, tip: Option, } -impl ConsistentDbView +impl ConsistentDbView where - DB: Database, - Provider: DatabaseProviderFactory, + Factory: DatabaseProviderFactory, { /// Creates new consistent database view. - pub const fn new(provider: Provider, tip: Option) -> Self { - Self { database: PhantomData, provider, tip } + pub const fn new(factory: Factory, tip: Option) -> Self { + Self { factory, tip } } /// Creates new consistent database view with latest tip. - pub fn new_with_latest_tip(provider: Provider) -> ProviderResult { + pub fn new_with_latest_tip(provider: Factory) -> ProviderResult { let provider_ro = provider.database_provider_ro()?; let last_num = provider_ro.last_block_number()?; let tip = provider_ro.sealed_header(last_num)?.map(|h| h.hash()); Ok(Self::new(provider, tip)) } + /// Retrieve revert hashed state down to the given block hash. + pub fn revert_state(&self, block_hash: B256) -> ProviderResult { + let provider = self.provider_ro()?; + let block_number = provider + .block_number(block_hash)? + .ok_or(ProviderError::BlockHashNotFound(block_hash))?; + if block_number == provider.best_block_number()? && + block_number == provider.last_block_number()? + { + Ok(HashedPostState::default()) + } else { + Ok(HashedPostState::from_reverts(provider.tx_ref(), block_number + 1)?) + } + } + /// Creates new read-only provider and performs consistency checks on the current tip. - pub fn provider_ro(&self) -> ProviderResult> { + pub fn provider_ro(&self) -> ProviderResult { // Create a new provider. - let provider_ro = self.provider.database_provider_ro()?; + let provider_ro = self.factory.database_provider_ro()?; // Check that the latest stored header number matches the number // that consistent view was initialized with. diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index f50bc5115f..c328899ad4 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -8,20 +8,22 @@ use crate::{ StateProviderBox, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; -use reth_chainspec::{ChainInfo, ChainSpec, EthChainSpec}; +use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; +use core::fmt; +use reth_chainspec::ChainInfo; use reth_db::{init_db, mdbx::DatabaseArguments, DatabaseEnv}; use reth_db_api::{database::Database, models::StoredBlockBodyIndices}; use reth_errors::{RethError, RethResult}; use reth_evm::ConfigureEvmEnv; +use reth_node_types::NodeTypesWithDB; use reth_primitives::{ - parlia::Snapshot, Address, BlobSidecars, Block, BlockHash, BlockHashOrNumber, BlockNumber, - BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, - StaticFileSegment, TransactionMeta, TransactionSigned, TransactionSignedNoHash, TxHash, - TxNumber, Withdrawal, Withdrawals, B256, U256, + parlia::Snapshot, BlobSidecars, Block, BlockHashOrNumber, BlockWithSenders, Header, Receipt, + SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, TransactionMeta, + TransactionSigned, TransactionSignedNoHash, Withdrawal, Withdrawals, }; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_api::SidecarsProvider; +use reth_storage_api::{SidecarsProvider, TryIntoHistoricalStateProvider}; use reth_storage_errors::provider::ProviderResult; use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg}; use std::{ @@ -35,31 +37,47 @@ use tracing::trace; mod provider; pub use provider::{DatabaseProvider, DatabaseProviderRO, DatabaseProviderRW}; +use super::ProviderNodeTypes; + mod metrics; /// A common provider that fetches data from a database or static file. /// /// This provider implements most provider or provider factory traits. -#[derive(Debug)] -pub struct ProviderFactory { +pub struct ProviderFactory { /// Database - db: Arc, + db: N::DB, /// Chain spec - chain_spec: Arc, + chain_spec: Arc, /// Static File Provider static_file_provider: StaticFileProvider, /// Optional pruning configuration prune_modes: PruneModes, } -impl ProviderFactory { +impl fmt::Debug for ProviderFactory +where + N: NodeTypesWithDB, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let Self { db, chain_spec, static_file_provider, prune_modes } = self; + f.debug_struct("ProviderFactory") + .field("db", &db) + .field("chain_spec", &chain_spec) + .field("static_file_provider", &static_file_provider) + .field("prune_modes", &prune_modes) + .finish() + } +} + +impl ProviderFactory { /// Create new database provider factory. pub fn new( - db: DB, - chain_spec: Arc, + db: N::DB, + chain_spec: Arc, static_file_provider: StaticFileProvider, ) -> Self { - Self { db: Arc::new(db), chain_spec, static_file_provider, prune_modes: PruneModes::none() } + Self { db, chain_spec, static_file_provider, prune_modes: PruneModes::none() } } /// Enables metrics on the static file provider. @@ -75,23 +93,23 @@ impl ProviderFactory { } /// Returns reference to the underlying database. - pub fn db_ref(&self) -> &DB { + pub const fn db_ref(&self) -> &N::DB { &self.db } #[cfg(any(test, feature = "test-utils"))] /// Consumes Self and returns DB - pub fn into_db(self) -> Arc { + pub fn into_db(self) -> N::DB { self.db } } -impl ProviderFactory { +impl>> ProviderFactory { /// Create new database provider by passing a path. [`ProviderFactory`] will own the database /// instance. pub fn new_with_database_path>( path: P, - chain_spec: Arc, + chain_spec: Arc, args: DatabaseArguments, static_file_provider: StaticFileProvider, ) -> RethResult { @@ -104,7 +122,7 @@ impl ProviderFactory { } } -impl ProviderFactory { +impl ProviderFactory { /// Returns a provider with a created `DbTx` inside, which allows fetching data from the /// database using different types of providers. Example: [`HeaderProvider`] /// [`BlockHashReader`]. This may fail if the inner read database transaction fails to open. @@ -112,7 +130,7 @@ impl ProviderFactory { /// This sets the [`PruneModes`] to [`None`], because they should only be relevant for writing /// data. #[track_caller] - pub fn provider(&self) -> ProviderResult> { + pub fn provider(&self) -> ProviderResult> { Ok(DatabaseProvider::new( self.db.tx()?, self.chain_spec.clone(), @@ -126,7 +144,7 @@ impl ProviderFactory { /// [`BlockHashReader`]. This may fail if the inner read/write database transaction fails to /// open. #[track_caller] - pub fn provider_rw(&self) -> ProviderResult> { + pub fn provider_rw(&self) -> ProviderResult> { Ok(DatabaseProviderRW(DatabaseProvider::new_rw( self.db.tx_mut()?, self.chain_spec.clone(), @@ -147,7 +165,7 @@ impl ProviderFactory { &self, block_number: BlockNumber, ) -> ProviderResult { - let state_provider = self.provider()?.state_provider_by_block_number(block_number)?; + let state_provider = self.provider()?.try_into_history_at_block(block_number)?; trace!(target: "providers::db", ?block_number, "Returning historical state provider for block number"); Ok(state_provider) } @@ -160,26 +178,34 @@ impl ProviderFactory { .block_number(block_hash)? .ok_or(ProviderError::BlockHashNotFound(block_hash))?; - let state_provider = self.provider()?.state_provider_by_block_number(block_number)?; + let state_provider = self.provider()?.try_into_history_at_block(block_number)?; trace!(target: "providers::db", ?block_number, %block_hash, "Returning historical state provider for block hash"); Ok(state_provider) } } -impl DatabaseProviderFactory for ProviderFactory { - fn database_provider_ro(&self) -> ProviderResult> { +impl DatabaseProviderFactory for ProviderFactory { + type DB = N::DB; + type Provider = DatabaseProviderRO; + type ProviderRW = DatabaseProvider<::TXMut>; + + fn database_provider_ro(&self) -> ProviderResult { self.provider() } + + fn database_provider_rw(&self) -> ProviderResult { + self.provider_rw().map(|provider| provider.0) + } } -impl StaticFileProviderFactory for ProviderFactory { +impl StaticFileProviderFactory for ProviderFactory { /// Returns static file provider fn static_file_provider(&self) -> StaticFileProvider { self.static_file_provider.clone() } } -impl HeaderSyncGapProvider for ProviderFactory { +impl HeaderSyncGapProvider for ProviderFactory { fn sync_gap( &self, tip: watch::Receiver, @@ -189,7 +215,7 @@ impl HeaderSyncGapProvider for ProviderFactory { } } -impl HeaderProvider for ProviderFactory { +impl HeaderProvider for ProviderFactory { fn header(&self, block_hash: &BlockHash) -> ProviderResult> { self.provider()?.header(block_hash) } @@ -263,7 +289,7 @@ impl HeaderProvider for ProviderFactory { } } -impl BlockHashReader for ProviderFactory { +impl BlockHashReader for ProviderFactory { fn block_hash(&self, number: u64) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Headers, @@ -288,7 +314,7 @@ impl BlockHashReader for ProviderFactory { } } -impl BlockNumReader for ProviderFactory { +impl BlockNumReader for ProviderFactory { fn chain_info(&self) -> ProviderResult { self.provider()?.chain_info() } @@ -306,7 +332,7 @@ impl BlockNumReader for ProviderFactory { } } -impl BlockReader for ProviderFactory { +impl BlockReader for ProviderFactory { fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { self.provider()?.find_block_by_hash(hash, source) } @@ -373,7 +399,7 @@ impl BlockReader for ProviderFactory { } } -impl TransactionsProvider for ProviderFactory { +impl TransactionsProvider for ProviderFactory { fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { self.provider()?.transaction_id(tx_hash) } @@ -447,7 +473,7 @@ impl TransactionsProvider for ProviderFactory { } } -impl ReceiptProvider for ProviderFactory { +impl ReceiptProvider for ProviderFactory { fn receipt(&self, id: TxNumber) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Receipts, @@ -479,7 +505,7 @@ impl ReceiptProvider for ProviderFactory { } } -impl WithdrawalsProvider for ProviderFactory { +impl WithdrawalsProvider for ProviderFactory { fn withdrawals_by_block( &self, id: BlockHashOrNumber, @@ -493,10 +519,7 @@ impl WithdrawalsProvider for ProviderFactory { } } -impl SidecarsProvider for ProviderFactory -where - DB: Database, -{ +impl SidecarsProvider for ProviderFactory { fn sidecars(&self, block_hash: &BlockHash) -> ProviderResult> { self.provider()?.sidecars(block_hash) } @@ -511,10 +534,7 @@ where } } -impl RequestsProvider for ProviderFactory -where - DB: Database, -{ +impl RequestsProvider for ProviderFactory { fn requests_by_block( &self, id: BlockHashOrNumber, @@ -524,7 +544,7 @@ where } } -impl StageCheckpointReader for ProviderFactory { +impl StageCheckpointReader for ProviderFactory { fn get_stage_checkpoint(&self, id: StageId) -> ProviderResult> { self.provider()?.get_stage_checkpoint(id) } @@ -537,7 +557,7 @@ impl StageCheckpointReader for ProviderFactory { } } -impl EvmEnvProvider for ProviderFactory { +impl EvmEnvProvider for ProviderFactory { fn fill_env_at( &self, cfg: &mut CfgEnvWithHandlerCfg, @@ -546,7 +566,7 @@ impl EvmEnvProvider for ProviderFactory { evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv, + EvmConfig: ConfigureEvmEnv
, { self.provider()?.fill_env_at(cfg, block_env, at, evm_config) } @@ -559,7 +579,7 @@ impl EvmEnvProvider for ProviderFactory { evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv, + EvmConfig: ConfigureEvmEnv
, { self.provider()?.fill_env_with_header(cfg, block_env, header, evm_config) } @@ -571,7 +591,7 @@ impl EvmEnvProvider for ProviderFactory { evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv, + EvmConfig: ConfigureEvmEnv
, { self.provider()?.fill_cfg_env_at(cfg, at, evm_config) } @@ -583,24 +603,21 @@ impl EvmEnvProvider for ProviderFactory { evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv, + EvmConfig: ConfigureEvmEnv
, { self.provider()?.fill_cfg_env_with_header(cfg, header, evm_config) } } -impl ChainSpecProvider for ProviderFactory -where - DB: Send + Sync, - ChainSpec: EthChainSpec, -{ - type ChainSpec = ChainSpec; - fn chain_spec(&self) -> Arc { +impl ChainSpecProvider for ProviderFactory { + type ChainSpec = N::ChainSpec; + + fn chain_spec(&self) -> Arc { self.chain_spec.clone() } } -impl PruneCheckpointReader for ProviderFactory { +impl PruneCheckpointReader for ProviderFactory { fn get_prune_checkpoint( &self, segment: PruneSegment, @@ -613,10 +630,16 @@ impl PruneCheckpointReader for ProviderFactory { } } -impl Clone for ProviderFactory { +impl ParliaSnapshotReader for ProviderFactory { + fn get_parlia_snapshot(&self, block_hash: B256) -> ProviderResult> { + self.provider()?.get_parlia_snapshot(block_hash) + } +} + +impl Clone for ProviderFactory { fn clone(&self) -> Self { Self { - db: Arc::clone(&self.db), + db: self.db.clone(), chain_spec: self.chain_spec.clone(), static_file_provider: self.static_file_provider.clone(), prune_modes: self.prune_modes.clone(), @@ -624,20 +647,15 @@ impl Clone for ProviderFactory { } } -impl ParliaSnapshotReader for ProviderFactory { - fn get_parlia_snapshot(&self, block_hash: B256) -> ProviderResult> { - self.provider()?.get_parlia_snapshot(block_hash) - } -} - #[cfg(test)] mod tests { use super::*; use crate::{ providers::{StaticFileProvider, StaticFileWriter}, - test_utils::{blocks::TEST_BLOCK, create_test_provider_factory}, + test_utils::{blocks::TEST_BLOCK, create_test_provider_factory, MockNodeTypesWithDB}, BlockHashReader, BlockNumReader, BlockWriter, HeaderSyncGapProvider, TransactionsProvider, }; + use alloy_primitives::{TxNumber, B256, U256}; use assert_matches::assert_matches; use rand::Rng; use reth_chainspec::ChainSpecBuilder; @@ -646,7 +664,7 @@ mod tests { tables, test_utils::{create_test_static_files_dir, ERROR_TEMPDIR}, }; - use reth_primitives::{StaticFileSegment, TxNumber, B256, U256}; + use reth_primitives::StaticFileSegment; use reth_prune_types::{PruneMode, PruneModes}; use reth_storage_errors::provider::ProviderError; use reth_testing_utils::generators::{self, random_block, random_header, BlockParams}; @@ -683,7 +701,7 @@ mod tests { fn provider_factory_with_database_path() { let chain_spec = ChainSpecBuilder::mainnet().build(); let (_static_dir, static_dir_path) = create_test_static_files_dir(); - let factory = ProviderFactory::new_with_database_path( + let factory = ProviderFactory::>::new_with_database_path( tempfile::TempDir::new().expect(ERROR_TEMPDIR).into_path(), Arc::new(chain_spec), DatabaseArguments::new(Default::default()), diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index e68a50b9c8..6e4b5edf91 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -7,45 +7,46 @@ use crate::{ }, writer::UnifiedStorageWriter, AccountReader, BlockExecutionReader, BlockExecutionWriter, BlockHashReader, BlockNumReader, - BlockReader, BlockWriter, BundleStateInit, Chain, EvmEnvProvider, FinalizedBlockReader, + BlockReader, BlockWriter, BundleStateInit, DBProvider, EvmEnvProvider, FinalizedBlockReader, FinalizedBlockWriter, HashingWriter, HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider, HistoricalStateProvider, HistoryWriter, LatestStateProvider, OriginalValuesKnown, ParliaSnapshotReader, ProviderError, PruneCheckpointReader, PruneCheckpointWriter, RequestsProvider, RevertsInit, SidecarsProvider, StageCheckpointReader, StateChangeWriter, - StateProviderBox, StateWriter, StatsReader, StorageReader, StorageTrieWriter, + StateProviderBox, StateReader, StateWriter, StatsReader, StorageReader, StorageTrieWriter, TransactionVariant, TransactionsProvider, TransactionsProviderExt, TrieWriter, WithdrawalsProvider, }; +use alloy_primitives::{keccak256, Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; use itertools::{izip, Itertools}; use rayon::slice::ParallelSliceMut; -use reth_chainspec::{ChainInfo, ChainSpec, EthereumHardforks}; +use reth_chainspec::{ChainInfo, ChainSpec, ChainSpecProvider, EthereumHardforks}; use reth_db::{ cursor::DbDupCursorRW, tables, BlockNumberList, PlainAccountState, PlainStorageState, }; use reth_db_api::{ common::KeyValue, - cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO, RangeWalker}, + cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO}, database::Database, models::{ sharded_key, storage_sharded_key::StorageShardedKey, AccountBeforeTx, BlockNumberAddress, ShardedKey, StoredBlockBodyIndices, StoredBlockOmmers, StoredBlockWithdrawals, }, - table::{Table, TableRow}, + table::Table, transaction::{DbTx, DbTxMut}, DatabaseError, }; use reth_evm::ConfigureEvmEnv; -use reth_execution_types::ExecutionOutcome; +use reth_execution_types::{Chain, ExecutionOutcome}; use reth_network_p2p::headers::downloader::SyncTarget; use reth_primitives::{ - keccak256, parlia::Snapshot, Account, Address, BlobSidecars, Block, BlockHash, - BlockHashOrNumber, BlockNumber, BlockWithSenders, Bytecode, GotExpected, Header, Receipt, - Requests, SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, StorageEntry, - TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, - TxHash, TxNumber, Withdrawal, Withdrawals, B256, U256, + parlia::Snapshot, Account, BlobSidecars, Block, BlockHashOrNumber, BlockWithSenders, Bytecode, + GotExpected, Header, Receipt, Requests, SealedBlock, SealedBlockWithSenders, SealedHeader, + StaticFileSegment, StorageEntry, TransactionMeta, TransactionSigned, + TransactionSignedEcRecovered, TransactionSignedNoHash, Withdrawal, Withdrawals, }; -use reth_prune_types::{PruneCheckpoint, PruneLimiter, PruneModes, PruneSegment}; +use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; +use reth_storage_api::TryIntoHistoricalStateProvider; use reth_storage_errors::provider::{ProviderResult, RootMismatch}; use reth_trie::{ prefix_set::{PrefixSet, PrefixSetMut, TriePrefixSets}, @@ -92,6 +93,12 @@ impl DerefMut for DatabaseProviderRW { } } +impl AsRef::TXMut>> for DatabaseProviderRW { + fn as_ref(&self) -> &DatabaseProvider<::TXMut> { + &self.0 + } +} + impl DatabaseProviderRW { /// Commit database transaction and static file if it exists. pub fn commit(self) -> ProviderResult { @@ -104,6 +111,12 @@ impl DatabaseProviderRW { } } +impl From> for DatabaseProvider<::TXMut> { + fn from(provider: DatabaseProviderRW) -> Self { + provider.0 + } +} + /// A provider struct that fetches data from the database. /// Wrapper around [`DbTx`] and [`DbTxMut`]. Example: [`HeaderProvider`] [`BlockHashReader`] #[derive(Debug)] @@ -142,9 +155,14 @@ impl DatabaseProvider { } } -impl DatabaseProvider { - /// Storage provider for state at that given block - pub fn state_provider_by_block_number( +impl AsRef for DatabaseProvider { + fn as_ref(&self) -> &Self { + self + } +} + +impl TryIntoHistoricalStateProvider for DatabaseProvider { + fn try_into_history_at_block( self, mut block_number: BlockNumber, ) -> ProviderResult { @@ -701,6 +719,7 @@ impl DatabaseProvider { let block_sidecars = self.get::(range.clone())?; let block_tx = self.get_block_transaction_range(range)?; + let mut blocks = Vec::with_capacity(block_headers.len()); // merge all into block let block_header_iter = block_headers.into_iter(); @@ -717,9 +736,8 @@ impl DatabaseProvider { let mut block_requests = block_requests_iter.next(); let mut block_sidecars = block_sidecars_iter.next(); - let mut blocks = Vec::new(); for ((main_block_number, header), (_, header_hash), (_, tx)) in - izip!(block_header_iter.into_iter(), block_header_hashes_iter, block_tx_iter) + izip!(block_header_iter, block_header_hashes_iter, block_tx_iter) { let header = header.seal(header_hash); @@ -805,12 +823,14 @@ impl DatabaseProvider { /// 1. Take the old value from the changeset /// 2. Take the new value from the local state /// 3. Set the local state to the value in the changeset + /// + /// If the range is empty, or there are no blocks for the given range, then this returns `None`. pub fn get_state( &self, range: RangeInclusive, - ) -> ProviderResult { + ) -> ProviderResult> { if range.is_empty() { - return Ok(ExecutionOutcome::default()) + return Ok(None) } let start_block_number = *range.start(); @@ -818,10 +838,14 @@ impl DatabaseProvider { let block_bodies = self.get::(range.clone())?; // get transaction receipts - let from_transaction_num = - block_bodies.first().expect("already checked if there are blocks").1.first_tx_num(); - let to_transaction_num = - block_bodies.last().expect("already checked if there are blocks").1.last_tx_num(); + let Some(from_transaction_num) = block_bodies.first().map(|bodies| bodies.1.first_tx_num()) + else { + return Ok(None) + }; + let Some(to_transaction_num) = block_bodies.last().map(|bodies| bodies.1.last_tx_num()) + else { + return Ok(None) + }; let storage_range = BlockNumberAddress::range(range.clone()); @@ -846,7 +870,7 @@ impl DatabaseProvider { let mut receipt_iter = self.get::(from_transaction_num..=to_transaction_num)?.into_iter(); - let mut receipts = Vec::new(); + let mut receipts = Vec::with_capacity(block_bodies.len()); // loop break if we are at the end of the blocks. for (_, block_body) in block_bodies { let mut block_receipts = Vec::with_capacity(block_body.tx_count as usize); @@ -858,14 +882,14 @@ impl DatabaseProvider { receipts.push(block_receipts); } - Ok(ExecutionOutcome::new_init( + Ok(Some(ExecutionOutcome::new_init( state, reverts, Vec::new(), receipts.into(), start_block_number, Vec::new(), - )) + ))) } /// Populate a [`BundleStateInit`] and [`RevertsInit`] using cursors over the @@ -1144,7 +1168,7 @@ impl DatabaseProvider { let mut receipt_iter = self.take::(from_transaction_num..=to_transaction_num)?.into_iter(); - let mut receipts = Vec::new(); + let mut receipts = Vec::with_capacity(block_bodies.len()); // loop break if we are at the end of the blocks. for (_, block_body) in block_bodies { let mut block_receipts = Vec::with_capacity(block_body.tx_count as usize); @@ -1412,6 +1436,8 @@ impl DatabaseProvider { let block_tx = self.take_block_transaction_range(range.clone())?; let block_sidecars = self.take::(range.clone())?; + let mut blocks = Vec::with_capacity(block_headers.len()); + // rm HeaderTerminalDifficulties self.remove::(range)?; @@ -1430,9 +1456,8 @@ impl DatabaseProvider { let mut block_requests = block_requests_iter.next(); let mut block_sidecars = block_sidecars_iter.next(); - let mut blocks = Vec::new(); for ((main_block_number, header), (_, header_hash), (_, tx)) in - izip!(block_header_iter.into_iter(), block_header_hashes_iter, block_tx_iter) + izip!(block_header_iter, block_header_hashes_iter, block_tx_iter) { let header = header.seal(header_hash); @@ -1539,7 +1564,9 @@ impl DatabaseProvider { Ok(deleted) } - /// Unwind a table forward by a [`Walker`][reth_db_api::cursor::Walker] on another table + /// Unwind a table forward by a [`Walker`][reth_db_api::cursor::Walker] on another table. + /// + /// Note: Range is inclusive and first key in the range is removed. pub fn unwind_table_by_walker( &self, range: impl RangeBounds, @@ -1556,119 +1583,6 @@ impl DatabaseProvider { Ok(()) } - /// Prune the table for the specified pre-sorted key iterator. - /// - /// Returns number of rows pruned. - pub fn prune_table_with_iterator( - &self, - keys: impl IntoIterator, - limiter: &mut PruneLimiter, - mut delete_callback: impl FnMut(TableRow), - ) -> Result<(usize, bool), DatabaseError> { - let mut cursor = self.tx.cursor_write::()?; - let mut keys = keys.into_iter(); - - let mut deleted_entries = 0; - - for key in &mut keys { - if limiter.is_limit_reached() { - debug!( - target: "providers::db", - ?limiter, - deleted_entries_limit = %limiter.is_deleted_entries_limit_reached(), - time_limit = %limiter.is_time_limit_reached(), - table = %T::NAME, - "Pruning limit reached" - ); - break - } - - let row = cursor.seek_exact(key)?; - if let Some(row) = row { - cursor.delete_current()?; - limiter.increment_deleted_entries_count(); - deleted_entries += 1; - delete_callback(row); - } - } - - let done = keys.next().is_none(); - Ok((deleted_entries, done)) - } - - /// Prune the table for the specified key range. - /// - /// Returns number of rows pruned. - pub fn prune_table_with_range( - &self, - keys: impl RangeBounds + Clone + Debug, - limiter: &mut PruneLimiter, - mut skip_filter: impl FnMut(&TableRow) -> bool, - mut delete_callback: impl FnMut(TableRow), - ) -> Result<(usize, bool), DatabaseError> { - let mut cursor = self.tx.cursor_write::()?; - let mut walker = cursor.walk_range(keys)?; - - let mut deleted_entries = 0; - - let done = loop { - // check for time out must be done in this scope since it's not done in - // `prune_table_with_range_step` - if limiter.is_limit_reached() { - debug!( - target: "providers::db", - ?limiter, - deleted_entries_limit = %limiter.is_deleted_entries_limit_reached(), - time_limit = %limiter.is_time_limit_reached(), - table = %T::NAME, - "Pruning limit reached" - ); - break false - } - - let done = self.prune_table_with_range_step( - &mut walker, - limiter, - &mut skip_filter, - &mut delete_callback, - )?; - - if done { - break true - } - deleted_entries += 1; - }; - - Ok((deleted_entries, done)) - } - - /// Steps once with the given walker and prunes the entry in the table. - /// - /// Returns `true` if the walker is finished, `false` if it may have more data to prune. - /// - /// CAUTION: Pruner limits are not checked. This allows for a clean exit of a prune run that's - /// pruning different tables concurrently, by letting them step to the same height before - /// timing out. - pub fn prune_table_with_range_step( - &self, - walker: &mut RangeWalker<'_, T, ::CursorMut>, - limiter: &mut PruneLimiter, - skip_filter: &mut impl FnMut(&TableRow) -> bool, - delete_callback: &mut impl FnMut(TableRow), - ) -> Result { - let Some(res) = walker.next() else { return Ok(true) }; - - let row = res?; - - if !skip_filter(&row) { - walker.delete_current()?; - limiter.increment_deleted_entries_count(); - delete_callback(row); - } - - Ok(false) - } - /// Load shard and remove it. If list is empty, last shard was full or /// there are no shards at all. fn take_shard(&self, key: T::Key) -> ProviderResult> @@ -1695,7 +1609,7 @@ impl DatabaseProvider { /// This function is used by history indexing stages. fn append_history_index( &self, - index_updates: BTreeMap>, + index_updates: impl IntoIterator)>, mut sharded_key_factory: impl FnMut(P, BlockNumber) -> T::Key, ) -> ProviderResult<()> where @@ -1703,21 +1617,17 @@ impl DatabaseProvider { T: Table, { for (partial_key, indices) in index_updates { - let last_shard = self.take_shard::(sharded_key_factory(partial_key, u64::MAX))?; - // chunk indices and insert them in shards of N size. - let indices = last_shard.iter().chain(indices.iter()); - let chunks = indices - .chunks(sharded_key::NUM_OF_INDICES_IN_SHARD) - .into_iter() - .map(|chunks| chunks.copied().collect()) - .collect::>>(); - - let mut chunks = chunks.into_iter().peekable(); + let mut last_shard = + self.take_shard::(sharded_key_factory(partial_key, u64::MAX))?; + last_shard.extend(indices); + // Chunk indices and insert them in shards of N size. + let indices = last_shard; + let mut chunks = indices.chunks(sharded_key::NUM_OF_INDICES_IN_SHARD).peekable(); while let Some(list) = chunks.next() { let highest_block_number = if chunks.peek().is_some() { *list.last().expect("`chunks` does not return empty list") } else { - // Insert last list with u64::MAX + // Insert last list with `u64::MAX`. u64::MAX }; self.tx.put::( @@ -1730,6 +1640,14 @@ impl DatabaseProvider { } } +impl ChainSpecProvider for DatabaseProvider { + type ChainSpec = ChainSpec; + + fn chain_spec(&self) -> Arc { + self.chain_spec.clone() + } +} + impl AccountReader for DatabaseProvider { fn basic_account(&self, address: Address) -> ProviderResult> { Ok(self.tx.get::(address)?) @@ -2539,7 +2457,7 @@ impl EvmEnvProvider for DatabaseProvider { evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv, + EvmConfig: ConfigureEvmEnv
, { let hash = self.convert_number(at)?.ok_or(ProviderError::HeaderNotFound(at))?; let header = self.header(&hash)?.ok_or(ProviderError::HeaderNotFound(at))?; @@ -2554,18 +2472,12 @@ impl EvmEnvProvider for DatabaseProvider { evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv, + EvmConfig: ConfigureEvmEnv
, { let total_difficulty = self .header_td_by_number(header.number)? .ok_or_else(|| ProviderError::HeaderNotFound(header.number.into()))?; - evm_config.fill_cfg_and_block_env( - cfg, - block_env, - &self.chain_spec, - header, - total_difficulty, - ); + evm_config.fill_cfg_and_block_env(cfg, block_env, header, total_difficulty); Ok(()) } @@ -2576,7 +2488,7 @@ impl EvmEnvProvider for DatabaseProvider { evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv, + EvmConfig: ConfigureEvmEnv
, { let hash = self.convert_number(at)?.ok_or(ProviderError::HeaderNotFound(at))?; let header = self.header(&hash)?.ok_or(ProviderError::HeaderNotFound(at))?; @@ -2590,12 +2502,12 @@ impl EvmEnvProvider for DatabaseProvider { evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv, + EvmConfig: ConfigureEvmEnv
, { let total_difficulty = self .header_td_by_number(header.number)? .ok_or_else(|| ProviderError::HeaderNotFound(header.number.into()))?; - evm_config.fill_cfg_env(cfg, &self.chain_spec, header, total_difficulty); + evm_config.fill_cfg_env(cfg, header, total_difficulty); Ok(()) } } @@ -3224,7 +3136,7 @@ impl HistoryWriter for DatabaseProvider { fn insert_account_history_index( &self, - account_transitions: BTreeMap>, + account_transitions: impl IntoIterator)>, ) -> ProviderResult<()> { self.append_history_index::<_, tables::AccountsHistory>( account_transitions, @@ -3274,7 +3186,7 @@ impl HistoryWriter for DatabaseProvider { fn insert_storage_history_index( &self, - storage_transitions: BTreeMap<(Address, B256), Vec>, + storage_transitions: impl IntoIterator)>, ) -> ProviderResult<()> { self.append_history_index::<_, tables::StoragesHistory>( storage_transitions, @@ -3310,13 +3222,19 @@ impl BlockExecutionReader for DatabaseProvider { let blocks = self.get_block_range(range.clone())?; // get execution res - let execution_state = self.get_state(range)?; + let execution_state = self.get_state(range)?.unwrap_or_default(); Ok(Chain::new(blocks, execution_state, None)) } } -impl BlockExecutionWriter for DatabaseProvider { +impl StateReader for DatabaseProvider { + fn get_state(&self, block: BlockNumber) -> ProviderResult> { + self.get_state(block..=block) + } +} + +impl BlockExecutionWriter for DatabaseProvider { fn take_block_and_execution_range( &self, range: RangeInclusive, @@ -3494,7 +3412,7 @@ impl BlockExecutionWriter for DatabaseProvider { } } -impl BlockWriter for DatabaseProvider { +impl BlockWriter for DatabaseProvider { /// Inserts the block into the database, always modifying the following tables: /// * [`CanonicalHeaders`](tables::CanonicalHeaders) /// * [`Headers`](tables::Headers) @@ -3783,6 +3701,26 @@ impl ParliaSnapshotReader for DatabaseProvider { } } +impl DBProvider for DatabaseProvider { + type Tx = TX; + + fn tx_ref(&self) -> &Self::Tx { + &self.tx + } + + fn tx_mut(&mut self) -> &mut Self::Tx { + &mut self.tx + } + + fn into_tx(self) -> Self::Tx { + self.tx + } + + fn prune_modes_ref(&self) -> &PruneModes { + self.prune_modes_ref() + } +} + /// Helper method to recover senders for any blocks in the db which do not have senders. This /// compares the length of the input senders [`Vec`], with the length of given transactions [`Vec`], /// and will add to the input senders vec if there are more transactions. diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 18ebdea4ca..d1fa723724 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -8,23 +8,23 @@ use crate::{ StateProviderFactory, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, TreeViewer, WithdrawalsProvider, }; +use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; use reth_blockchain_tree_api::{ error::{CanonicalError, InsertBlockError}, BlockValidationKind, BlockchainTreeEngine, BlockchainTreeViewer, CanonicalOutcome, InsertPayloadOk, }; use reth_chain_state::{ChainInfoTracker, ForkChoiceNotifications, ForkChoiceSubscriptions}; -use reth_chainspec::{ChainInfo, ChainSpec, EthChainSpec}; -use reth_db_api::{ - database::Database, - models::{AccountBeforeTx, StoredBlockBodyIndices}, -}; +use reth_chainspec::{ChainInfo, ChainSpec}; +use reth_db::Database; +use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; +use reth_node_types::NodeTypesWithDB; use reth_primitives::{ - parlia::Snapshot, Account, Address, BlobSidecars, Block, BlockHash, BlockHashOrNumber, BlockId, - BlockNumHash, BlockNumber, BlockNumberOrTag, BlockWithSenders, Header, Receipt, SealedBlock, - SealedBlockWithSenders, SealedHeader, TransactionMeta, TransactionSigned, - TransactionSignedNoHash, TxHash, TxNumber, Withdrawal, Withdrawals, B256, U256, + parlia::Snapshot, Account, BlobSidecars, Block, BlockHashOrNumber, BlockId, BlockNumHash, + BlockNumberOrTag, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, + SealedHeader, TransactionMeta, TransactionSigned, TransactionSignedNoHash, Withdrawal, + Withdrawals, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; @@ -64,22 +64,27 @@ use reth_storage_api::SidecarsProvider; mod blockchain_provider; pub use blockchain_provider::BlockchainProvider2; +/// Helper trait keeping common requirements of providers for [`NodeTypesWithDB`]. +pub trait ProviderNodeTypes: NodeTypesWithDB {} + +impl ProviderNodeTypes for T where T: NodeTypesWithDB {} + /// The main type for interacting with the blockchain. /// /// This type serves as the main entry point for interacting with the blockchain and provides data /// from database storage and from the blockchain tree (pending state etc.) It is a simple wrapper /// type that holds an instance of the database and the blockchain tree. #[allow(missing_debug_implementations)] -pub struct BlockchainProvider { +pub struct BlockchainProvider { /// Provider type used to access the database. - database: ProviderFactory, + database: ProviderFactory, /// The blockchain tree instance. tree: Arc, /// Tracks the chain info wrt forkchoice updates chain_info: ChainInfoTracker, } -impl Clone for BlockchainProvider { +impl Clone for BlockchainProvider { fn clone(&self) -> Self { Self { database: self.database.clone(), @@ -89,7 +94,7 @@ impl Clone for BlockchainProvider { } } -impl BlockchainProvider { +impl BlockchainProvider { /// Sets the treeviewer for the provider. #[doc(hidden)] pub fn with_tree(mut self, tree: Arc) -> Self { @@ -98,15 +103,12 @@ impl BlockchainProvider { } } -impl BlockchainProvider -where - DB: Database, -{ +impl BlockchainProvider { /// Create new provider instance that wraps the database and the blockchain tree, using the /// provided latest header to initialize the chain info tracker, alongside the finalized header /// if it exists. pub fn with_blocks( - database: ProviderFactory, + database: ProviderFactory, tree: Arc, latest: SealedHeader, finalized: Option, @@ -116,7 +118,7 @@ where /// Create a new provider using only the database and the tree, fetching the latest header from /// the database to initialize the provider. - pub fn new(database: ProviderFactory, tree: Arc) -> ProviderResult { + pub fn new(database: ProviderFactory, tree: Arc) -> ProviderResult { let provider = database.provider()?; let best: ChainInfo = provider.chain_info()?; let latest_header = provider @@ -151,10 +153,10 @@ where } } -impl BlockchainProvider +impl BlockchainProvider where Self: StateProviderFactory, - DB: Database, + N: NodeTypesWithDB, { /// Return a [`StateProviderBox`] that contains bundle state data provider. /// Used to inspect or execute transaction on the pending state. @@ -171,25 +173,27 @@ where } } -impl DatabaseProviderFactory for BlockchainProvider -where - DB: Database, -{ - fn database_provider_ro(&self) -> ProviderResult> { +impl DatabaseProviderFactory for BlockchainProvider { + type DB = N::DB; + type Provider = DatabaseProvider<::TX>; + type ProviderRW = DatabaseProvider<::TXMut>; + + fn database_provider_ro(&self) -> ProviderResult { self.database.provider() } + + fn database_provider_rw(&self) -> ProviderResult { + self.database.provider_rw().map(|p| p.0) + } } -impl StaticFileProviderFactory for BlockchainProvider { +impl StaticFileProviderFactory for BlockchainProvider { fn static_file_provider(&self) -> StaticFileProvider { self.database.static_file_provider() } } -impl HeaderProvider for BlockchainProvider -where - DB: Database, -{ +impl HeaderProvider for BlockchainProvider { fn header(&self, block_hash: &BlockHash) -> ProviderResult> { self.database.header(block_hash) } @@ -230,10 +234,7 @@ where } } -impl BlockHashReader for BlockchainProvider -where - DB: Database, -{ +impl BlockHashReader for BlockchainProvider { fn block_hash(&self, number: u64) -> ProviderResult> { self.database.block_hash(number) } @@ -247,10 +248,7 @@ where } } -impl BlockNumReader for BlockchainProvider -where - DB: Database, -{ +impl BlockNumReader for BlockchainProvider { fn chain_info(&self) -> ProviderResult { Ok(self.chain_info.chain_info()) } @@ -268,10 +266,7 @@ where } } -impl BlockIdReader for BlockchainProvider -where - DB: Database, -{ +impl BlockIdReader for BlockchainProvider { fn pending_block_num_hash(&self) -> ProviderResult> { Ok(self.tree.pending_block_num_hash()) } @@ -285,10 +280,7 @@ where } } -impl BlockReader for BlockchainProvider -where - DB: Database, -{ +impl BlockReader for BlockchainProvider { fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { let block = match source { BlockSource::Any => { @@ -379,10 +371,7 @@ where } } -impl TransactionsProvider for BlockchainProvider -where - DB: Database, -{ +impl TransactionsProvider for BlockchainProvider { fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { self.database.transaction_id(tx_hash) } @@ -446,10 +435,7 @@ where } } -impl ReceiptProvider for BlockchainProvider -where - DB: Database, -{ +impl ReceiptProvider for BlockchainProvider { fn receipt(&self, id: TxNumber) -> ProviderResult> { self.database.receipt(id) } @@ -470,10 +456,7 @@ where } } -impl ReceiptProviderIdExt for BlockchainProvider -where - DB: Database, -{ +impl ReceiptProviderIdExt for BlockchainProvider { fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { match block { BlockId::Hash(rpc_block_hash) => { @@ -497,10 +480,7 @@ where } } -impl WithdrawalsProvider for BlockchainProvider -where - DB: Database, -{ +impl WithdrawalsProvider for BlockchainProvider { fn withdrawals_by_block( &self, id: BlockHashOrNumber, @@ -514,10 +494,7 @@ where } } -impl SidecarsProvider for BlockchainProvider -where - DB: Database, -{ +impl SidecarsProvider for BlockchainProvider { fn sidecars(&self, block_hash: &BlockHash) -> ProviderResult> { self.database.sidecars(block_hash) } @@ -527,10 +504,7 @@ where } } -impl RequestsProvider for BlockchainProvider -where - DB: Database, -{ +impl RequestsProvider for BlockchainProvider { fn requests_by_block( &self, id: BlockHashOrNumber, @@ -540,10 +514,7 @@ where } } -impl StageCheckpointReader for BlockchainProvider -where - DB: Database, -{ +impl StageCheckpointReader for BlockchainProvider { fn get_stage_checkpoint(&self, id: StageId) -> ProviderResult> { self.database.provider()?.get_stage_checkpoint(id) } @@ -557,10 +528,7 @@ where } } -impl EvmEnvProvider for BlockchainProvider -where - DB: Database, -{ +impl EvmEnvProvider for BlockchainProvider { fn fill_env_at( &self, cfg: &mut CfgEnvWithHandlerCfg, @@ -569,7 +537,7 @@ where evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv, + EvmConfig: ConfigureEvmEnv
, { self.database.provider()?.fill_env_at(cfg, block_env, at, evm_config) } @@ -582,7 +550,7 @@ where evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv, + EvmConfig: ConfigureEvmEnv
, { self.database.provider()?.fill_env_with_header(cfg, block_env, header, evm_config) } @@ -594,7 +562,7 @@ where evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv, + EvmConfig: ConfigureEvmEnv
, { self.database.provider()?.fill_cfg_env_at(cfg, at, evm_config) } @@ -606,16 +574,13 @@ where evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv, + EvmConfig: ConfigureEvmEnv
, { self.database.provider()?.fill_cfg_env_with_header(cfg, header, evm_config) } } -impl PruneCheckpointReader for BlockchainProvider -where - DB: Database, -{ +impl PruneCheckpointReader for BlockchainProvider { fn get_prune_checkpoint( &self, segment: PruneSegment, @@ -628,22 +593,15 @@ where } } -impl ChainSpecProvider for BlockchainProvider -where - DB: Send + Sync, - ChainSpec: EthChainSpec, -{ - type ChainSpec = ChainSpec; +impl ChainSpecProvider for BlockchainProvider { + type ChainSpec = N::ChainSpec; fn chain_spec(&self) -> Arc { self.database.chain_spec() } } -impl StateProviderFactory for BlockchainProvider -where - DB: Database, -{ +impl StateProviderFactory for BlockchainProvider { /// Storage provider for latest block fn latest(&self) -> ProviderResult { trace!(target: "providers::blockchain", "Getting latest block state provider"); @@ -736,10 +694,7 @@ where } } -impl BlockchainTreeEngine for BlockchainProvider -where - DB: Send + Sync, -{ +impl BlockchainTreeEngine for BlockchainProvider { fn buffer_block(&self, block: SealedBlockWithSenders) -> Result<(), InsertBlockError> { self.tree.buffer_block(block) } @@ -778,10 +733,7 @@ where } } -impl BlockchainTreeViewer for BlockchainProvider -where - DB: Send + Sync, -{ +impl BlockchainTreeViewer for BlockchainProvider { fn header_by_hash(&self, hash: BlockHash) -> Option { self.tree.header_by_hash(hash) } @@ -823,9 +775,8 @@ where } } -impl CanonChainTracker for BlockchainProvider +impl CanonChainTracker for BlockchainProvider where - DB: Send + Sync, Self: BlockReader, { fn on_forkchoice_update_received(&self, _update: &ForkchoiceState) { @@ -858,7 +809,7 @@ where } } -impl BlockReaderIdExt for BlockchainProvider +impl BlockReaderIdExt for BlockchainProvider where Self: BlockReader + BlockIdReader + ReceiptProviderIdExt, { @@ -874,7 +825,7 @@ where // check the database, canonical blocks are only stored in the database self.find_block_by_hash(hash.block_hash, BlockSource::Canonical) } else { - self.block_by_hash(hash.block_hash) + BlockReader::block_by_hash(self, hash.block_hash) } } } @@ -937,10 +888,7 @@ where } } -impl BlockchainTreePendingStateProvider for BlockchainProvider -where - DB: Send + Sync, -{ +impl BlockchainTreePendingStateProvider for BlockchainProvider { fn find_pending_state_provider( &self, block_hash: BlockHash, @@ -949,19 +897,13 @@ where } } -impl CanonStateSubscriptions for BlockchainProvider -where - DB: Send + Sync, -{ +impl CanonStateSubscriptions for BlockchainProvider { fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { self.tree.subscribe_to_canonical_state() } } -impl ForkChoiceSubscriptions for BlockchainProvider -where - DB: Send + Sync, -{ +impl ForkChoiceSubscriptions for BlockchainProvider { fn subscribe_safe_block(&self) -> ForkChoiceNotifications { let receiver = self.chain_info.subscribe_safe_block(); ForkChoiceNotifications(receiver) @@ -973,10 +915,7 @@ where } } -impl ChangeSetReader for BlockchainProvider -where - DB: Database, -{ +impl ChangeSetReader for BlockchainProvider { fn account_block_changeset( &self, block_number: BlockNumber, @@ -985,20 +924,14 @@ where } } -impl AccountReader for BlockchainProvider -where - DB: Database + Sync + Send, -{ +impl AccountReader for BlockchainProvider { /// Get basic account information. fn basic_account(&self, address: Address) -> ProviderResult> { self.database.provider()?.basic_account(address) } } -impl ParliaSnapshotReader for BlockchainProvider -where - DB: Database + Sync + Send, -{ +impl ParliaSnapshotReader for BlockchainProvider { fn get_parlia_snapshot(&self, block_hash: B256) -> ProviderResult> { self.database.provider()?.get_parlia_snapshot(block_hash) } diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index 19f758bc62..a8613a8d1a 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -2,6 +2,7 @@ use crate::{ providers::{state::macros::delegate_provider_impls, StaticFileProvider}, AccountReader, BlockHashReader, ProviderError, StateProvider, StateRootProvider, }; +use alloy_primitives::{Address, BlockNumber, Bytes, StorageKey, StorageValue, B256}; use reth_db::{tables, BlockNumberList}; use reth_db_api::{ cursor::{DbCursorRO, DbDupCursorRO}, @@ -9,21 +10,21 @@ use reth_db_api::{ table::Table, transaction::DbTx, }; -use reth_primitives::{ - constants::EPOCH_SLOTS, Account, Address, BlockNumber, Bytecode, Bytes, StaticFileSegment, - StorageKey, StorageValue, B256, -}; +use reth_primitives::{constants::EPOCH_SLOTS, Account, Bytecode, StaticFileSegment}; use reth_storage_api::{StateProofProvider, StorageRootProvider}; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ - prefix_set::TriePrefixSetsMut, proof::Proof, updates::TrieUpdates, witness::TrieWitness, - AccountProof, HashedPostState, HashedStorage, StateRoot, StorageRoot, + proof::Proof, updates::TrieUpdates, witness::TrieWitness, AccountProof, HashedPostState, + HashedStorage, MultiProof, StateRoot, StorageRoot, TrieInput, }; use reth_trie_db::{ DatabaseHashedPostState, DatabaseHashedStorage, DatabaseProof, DatabaseStateRoot, DatabaseStorageRoot, DatabaseTrieWitness, }; -use std::{collections::HashMap, fmt::Debug}; +use std::{ + collections::{HashMap, HashSet}, + fmt::Debug, +}; /// State provider for a given block number which takes a tx reference. /// @@ -292,17 +293,9 @@ impl<'b, TX: DbTx> StateRootProvider for HistoricalStateProviderRef<'b, TX> { .map_err(|err| ProviderError::Database(err.into())) } - fn state_root_from_nodes( - &self, - nodes: TrieUpdates, - hashed_state: HashedPostState, - prefix_sets: TriePrefixSetsMut, - ) -> ProviderResult { - let mut revert_state = self.revert_state()?; - let mut revert_prefix_sets = revert_state.construct_prefix_sets(); - revert_state.extend(hashed_state); - revert_prefix_sets.extend(prefix_sets); - StateRoot::overlay_root_from_nodes(self.tx, nodes, revert_state, revert_prefix_sets) + fn state_root_from_nodes(&self, mut input: TrieInput) -> ProviderResult { + input.prepend(self.revert_state()?); + StateRoot::overlay_root_from_nodes(self.tx, input) .map_err(|err| ProviderError::Database(err.into())) } @@ -318,21 +311,11 @@ impl<'b, TX: DbTx> StateRootProvider for HistoricalStateProviderRef<'b, TX> { fn state_root_from_nodes_with_updates( &self, - nodes: TrieUpdates, - hashed_state: HashedPostState, - prefix_sets: TriePrefixSetsMut, + mut input: TrieInput, ) -> ProviderResult<(B256, TrieUpdates)> { - let mut revert_state = self.revert_state()?; - let mut revert_prefix_sets = revert_state.construct_prefix_sets(); - revert_state.extend(hashed_state); - revert_prefix_sets.extend(prefix_sets); - StateRoot::overlay_root_from_nodes_with_updates( - self.tx, - nodes, - revert_state, - revert_prefix_sets, - ) - .map_err(|err| ProviderError::Database(err.into())) + input.prepend(self.revert_state()?); + StateRoot::overlay_root_from_nodes_with_updates(self.tx, input) + .map_err(|err| ProviderError::Database(err.into())) } } @@ -353,25 +336,31 @@ impl<'b, TX: DbTx> StateProofProvider for HistoricalStateProviderRef<'b, TX> { /// Get account and storage proofs. fn proof( &self, - hashed_state: HashedPostState, + mut input: TrieInput, address: Address, slots: &[B256], ) -> ProviderResult { - let mut revert_state = self.revert_state()?; - revert_state.extend(hashed_state); - Proof::overlay_account_proof(self.tx, revert_state, address, slots) + input.prepend(self.revert_state()?); + Proof::overlay_account_proof(self.tx, input, address, slots) .map_err(Into::::into) } + fn multiproof( + &self, + mut input: TrieInput, + targets: HashMap>, + ) -> ProviderResult { + input.prepend(self.revert_state()?); + Proof::overlay_multiproof(self.tx, input, targets).map_err(Into::::into) + } + fn witness( &self, - overlay: HashedPostState, + mut input: TrieInput, target: HashedPostState, ) -> ProviderResult> { - let mut revert_state = self.revert_state()?; - revert_state.extend(overlay); - TrieWitness::overlay_witness(self.tx, revert_state, target) - .map_err(Into::::into) + input.prepend(self.revert_state()?); + TrieWitness::overlay_witness(self.tx, input, target).map_err(Into::::into) } } @@ -505,12 +494,13 @@ mod tests { AccountReader, HistoricalStateProvider, HistoricalStateProviderRef, StateProvider, StaticFileProviderFactory, }; + use alloy_primitives::{address, b256, Address, B256, U256}; use reth_db::{tables, BlockNumberList}; use reth_db_api::{ models::{storage_sharded_key::StorageShardedKey, AccountBeforeTx, ShardedKey}, transaction::{DbTx, DbTxMut}, }; - use reth_primitives::{address, b256, Account, Address, StorageEntry, B256, U256}; + use reth_primitives::{Account, StorageEntry}; use reth_storage_errors::provider::ProviderError; const ADDRESS: Address = address!("0000000000000000000000000000000000000001"); diff --git a/crates/storage/provider/src/providers/state/latest.rs b/crates/storage/provider/src/providers/state/latest.rs index 72fd5baac0..74dfdac732 100644 --- a/crates/storage/provider/src/providers/state/latest.rs +++ b/crates/storage/provider/src/providers/state/latest.rs @@ -1,25 +1,22 @@ -use std::collections::HashMap; - use crate::{ providers::{state::macros::delegate_provider_impls, StaticFileProvider}, AccountReader, BlockHashReader, StateProvider, StateRootProvider, }; +use alloy_primitives::{Address, BlockNumber, Bytes, StorageKey, StorageValue, B256}; use reth_db::tables; use reth_db_api::{ cursor::{DbCursorRO, DbDupCursorRO}, transaction::DbTx, }; -use reth_primitives::{ - Account, Address, BlockNumber, Bytecode, Bytes, StaticFileSegment, StorageKey, StorageValue, - B256, -}; +use reth_primitives::{Account, Bytecode, StaticFileSegment}; use reth_storage_api::{StateProofProvider, StorageRootProvider}; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use reth_trie::{ - prefix_set::TriePrefixSetsMut, proof::Proof, updates::TrieUpdates, witness::TrieWitness, - AccountProof, HashedPostState, HashedStorage, StateRoot, StorageRoot, + proof::Proof, updates::TrieUpdates, witness::TrieWitness, AccountProof, HashedPostState, + HashedStorage, MultiProof, StateRoot, StorageRoot, TrieInput, }; use reth_trie_db::{DatabaseProof, DatabaseStateRoot, DatabaseStorageRoot, DatabaseTrieWitness}; +use std::collections::{HashMap, HashSet}; /// State provider over latest state that takes tx reference. #[derive(Debug)] @@ -86,13 +83,8 @@ impl<'b, TX: DbTx> StateRootProvider for LatestStateProviderRef<'b, TX> { .map_err(|err| ProviderError::Database(err.into())) } - fn state_root_from_nodes( - &self, - nodes: TrieUpdates, - hashed_state: HashedPostState, - prefix_sets: TriePrefixSetsMut, - ) -> ProviderResult { - StateRoot::overlay_root_from_nodes(self.tx, nodes, hashed_state, prefix_sets) + fn state_root_from_nodes(&self, input: TrieInput) -> ProviderResult { + StateRoot::overlay_root_from_nodes(self.tx, input) .map_err(|err| ProviderError::Database(err.into())) } @@ -106,11 +98,9 @@ impl<'b, TX: DbTx> StateRootProvider for LatestStateProviderRef<'b, TX> { fn state_root_from_nodes_with_updates( &self, - nodes: TrieUpdates, - hashed_state: HashedPostState, - prefix_sets: TriePrefixSetsMut, + input: TrieInput, ) -> ProviderResult<(B256, TrieUpdates)> { - StateRoot::overlay_root_from_nodes_with_updates(self.tx, nodes, hashed_state, prefix_sets) + StateRoot::overlay_root_from_nodes_with_updates(self.tx, input) .map_err(|err| ProviderError::Database(err.into())) } } @@ -129,20 +119,28 @@ impl<'b, TX: DbTx> StorageRootProvider for LatestStateProviderRef<'b, TX> { impl<'b, TX: DbTx> StateProofProvider for LatestStateProviderRef<'b, TX> { fn proof( &self, - hashed_state: HashedPostState, + input: TrieInput, address: Address, slots: &[B256], ) -> ProviderResult { - Proof::overlay_account_proof(self.tx, hashed_state, address, slots) + Proof::overlay_account_proof(self.tx, input, address, slots) .map_err(Into::::into) } + fn multiproof( + &self, + input: TrieInput, + targets: HashMap>, + ) -> ProviderResult { + Proof::overlay_multiproof(self.tx, input, targets).map_err(Into::::into) + } + fn witness( &self, - overlay: HashedPostState, + input: TrieInput, target: HashedPostState, ) -> ProviderResult> { - TrieWitness::overlay_witness(self.tx, overlay, target).map_err(Into::::into) + TrieWitness::overlay_witness(self.tx, input, target).map_err(Into::::into) } } diff --git a/crates/storage/provider/src/providers/state/macros.rs b/crates/storage/provider/src/providers/state/macros.rs index e3499c96ff..49a168f4e7 100644 --- a/crates/storage/provider/src/providers/state/macros.rs +++ b/crates/storage/provider/src/providers/state/macros.rs @@ -31,28 +31,29 @@ macro_rules! delegate_provider_impls { $crate::providers::state::macros::delegate_impls_to_as_ref!( for $target => AccountReader $(where [$($generics)*])? { - fn basic_account(&self, address: reth_primitives::Address) -> reth_storage_errors::provider::ProviderResult>; + fn basic_account(&self, address: alloy_primitives::Address) -> reth_storage_errors::provider::ProviderResult>; } BlockHashReader $(where [$($generics)*])? { - fn block_hash(&self, number: u64) -> reth_storage_errors::provider::ProviderResult>; - fn canonical_hashes_range(&self, start: reth_primitives::BlockNumber, end: reth_primitives::BlockNumber) -> reth_storage_errors::provider::ProviderResult>; + fn block_hash(&self, number: u64) -> reth_storage_errors::provider::ProviderResult>; + fn canonical_hashes_range(&self, start: alloy_primitives::BlockNumber, end: alloy_primitives::BlockNumber) -> reth_storage_errors::provider::ProviderResult>; } StateProvider $(where [$($generics)*])? { - fn storage(&self, account: reth_primitives::Address, storage_key: reth_primitives::StorageKey) -> reth_storage_errors::provider::ProviderResult>; - fn bytecode_by_hash(&self, code_hash: reth_primitives::B256) -> reth_storage_errors::provider::ProviderResult>; + fn storage(&self, account: alloy_primitives::Address, storage_key: alloy_primitives::StorageKey) -> reth_storage_errors::provider::ProviderResult>; + fn bytecode_by_hash(&self, code_hash: alloy_primitives::B256) -> reth_storage_errors::provider::ProviderResult>; } StateRootProvider $(where [$($generics)*])? { - fn state_root(&self, state: reth_trie::HashedPostState) -> reth_storage_errors::provider::ProviderResult; - fn state_root_from_nodes(&self, nodes: reth_trie::updates::TrieUpdates, state: reth_trie::HashedPostState, prefix_sets: reth_trie::prefix_set::TriePrefixSetsMut) -> reth_storage_errors::provider::ProviderResult; - fn state_root_with_updates(&self, state: reth_trie::HashedPostState) -> reth_storage_errors::provider::ProviderResult<(reth_primitives::B256, reth_trie::updates::TrieUpdates)>; - fn state_root_from_nodes_with_updates(&self, nodes: reth_trie::updates::TrieUpdates, state: reth_trie::HashedPostState, prefix_sets: reth_trie::prefix_set::TriePrefixSetsMut) -> reth_storage_errors::provider::ProviderResult<(reth_primitives::B256, reth_trie::updates::TrieUpdates)>; + fn state_root(&self, state: reth_trie::HashedPostState) -> reth_storage_errors::provider::ProviderResult; + fn state_root_from_nodes(&self, input: reth_trie::TrieInput) -> reth_storage_errors::provider::ProviderResult; + fn state_root_with_updates(&self, state: reth_trie::HashedPostState) -> reth_storage_errors::provider::ProviderResult<(alloy_primitives::B256, reth_trie::updates::TrieUpdates)>; + fn state_root_from_nodes_with_updates(&self, input: reth_trie::TrieInput) -> reth_storage_errors::provider::ProviderResult<(alloy_primitives::B256, reth_trie::updates::TrieUpdates)>; } StorageRootProvider $(where [$($generics)*])? { - fn storage_root(&self, address: reth_primitives::Address, storage: reth_trie::HashedStorage) -> reth_storage_errors::provider::ProviderResult; + fn storage_root(&self, address: alloy_primitives::Address, storage: reth_trie::HashedStorage) -> reth_storage_errors::provider::ProviderResult; } StateProofProvider $(where [$($generics)*])? { - fn proof(&self, state: reth_trie::HashedPostState, address: reth_primitives::Address, slots: &[reth_primitives::B256]) -> reth_storage_errors::provider::ProviderResult; - fn witness(&self, state: reth_trie::HashedPostState, target: reth_trie::HashedPostState) -> reth_storage_errors::provider::ProviderResult>; + fn proof(&self, input: reth_trie::TrieInput, address: alloy_primitives::Address, slots: &[alloy_primitives::B256]) -> reth_storage_errors::provider::ProviderResult; + fn multiproof(&self, input: reth_trie::TrieInput, targets: std::collections::HashMap>) -> reth_storage_errors::provider::ProviderResult; + fn witness(&self, input: reth_trie::TrieInput, target: reth_trie::HashedPostState) -> reth_storage_errors::provider::ProviderResult>; } ); } diff --git a/crates/storage/provider/src/providers/static_file/jar.rs b/crates/storage/provider/src/providers/static_file/jar.rs index a5c8c814df..bcc3c71b19 100644 --- a/crates/storage/provider/src/providers/static_file/jar.rs +++ b/crates/storage/provider/src/providers/static_file/jar.rs @@ -6,15 +6,15 @@ use crate::{ to_range, BlockHashReader, BlockNumReader, HeaderProvider, ReceiptProvider, TransactionsProvider, }; +use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; use reth_chainspec::ChainInfo; use reth_db::static_file::{ HeaderMask, ReceiptMask, SidecarMask, StaticFileCursor, TransactionMask, }; use reth_db_api::models::CompactU256; use reth_primitives::{ - Address, BlobSidecars, BlockHash, BlockHashOrNumber, BlockNumber, Header, Receipt, - SealedHeader, TransactionMeta, TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, - B256, U256, + BlobSidecars, BlockHashOrNumber, Header, Receipt, SealedHeader, TransactionMeta, + TransactionSigned, TransactionSignedNoHash, }; use reth_storage_api::SidecarsProvider; use reth_storage_errors::provider::{ProviderError, ProviderResult}; diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index d8656961f9..76b0d7c8b9 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -8,7 +8,9 @@ use crate::{ HeaderProvider, ReceiptProvider, RequestsProvider, StageCheckpointReader, StatsReader, TransactionVariant, TransactionsProvider, TransactionsProviderExt, WithdrawalsProvider, }; +use alloy_primitives::{keccak256, Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; use dashmap::DashMap; +use notify::{RecommendedWatcher, RecursiveMode, Watcher}; use parking_lot::RwLock; use reth_chainspec::ChainInfo; use reth_db::{ @@ -24,14 +26,12 @@ use reth_db_api::{ table::Table, transaction::DbTx, }; -use reth_nippy_jar::NippyJar; +use reth_nippy_jar::{NippyJar, NippyJarChecker, CONFIG_FILE_EXTENSION}; use reth_primitives::{ - keccak256, static_file::{find_fixed_range, HighestStaticFiles, SegmentHeader, SegmentRangeInclusive}, - Address, BlobSidecars, Block, BlockHash, BlockHashOrNumber, BlockNumber, BlockWithSenders, - Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, - TransactionMeta, TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, Withdrawal, - Withdrawals, B256, U256, + BlobSidecars, Block, BlockHashOrNumber, BlockWithSenders, Header, Receipt, SealedBlock, + SealedBlockWithSenders, SealedHeader, StaticFileSegment, TransactionMeta, TransactionSigned, + TransactionSignedNoHash, Withdrawal, Withdrawals, }; use reth_stages_types::{PipelineTarget, StageId}; use reth_storage_api::SidecarsProvider; @@ -85,14 +85,105 @@ impl StaticFileProvider { } /// Creates a new [`StaticFileProvider`] with read-only access. - pub fn read_only(path: impl AsRef) -> ProviderResult { - Self::new(path, StaticFileAccess::RO) + /// + /// Set `watch_directory` to `true` to track the most recent changes in static files. Otherwise, + /// new data won't be detected or queryable. + pub fn read_only(path: impl AsRef, watch_directory: bool) -> ProviderResult { + let provider = Self::new(path, StaticFileAccess::RO)?; + + if watch_directory { + provider.watch_directory(); + } + + Ok(provider) } /// Creates a new [`StaticFileProvider`] with read-write access. pub fn read_write(path: impl AsRef) -> ProviderResult { Self::new(path, StaticFileAccess::RW) } + + /// Watches the directory for changes and updates the in-memory index when modifications + /// are detected. + /// + /// This may be necessary, since a non-node process that owns a [`StaticFileProvider`] does not + /// receive `update_index` notifications from a node that appends/truncates data. + pub fn watch_directory(&self) { + let provider = self.clone(); + std::thread::spawn(move || { + let (tx, rx) = std::sync::mpsc::channel(); + let mut watcher = RecommendedWatcher::new( + move |res| tx.send(res).unwrap(), + notify::Config::default(), + ) + .expect("failed to create watcher"); + + watcher + .watch(&provider.path, RecursiveMode::NonRecursive) + .expect("failed to watch path"); + + // Some backends send repeated modified events + let mut last_event_timestamp = None; + + while let Ok(res) = rx.recv() { + match res { + Ok(event) => { + // We only care about modified data events + if !matches!( + event.kind, + notify::EventKind::Modify(notify::event::ModifyKind::Data(_)) + ) { + continue + } + + // We only trigger a re-initialization if a configuration file was + // modified. This means that a + // static_file_provider.commit() was called on the node after + // appending/truncating rows + for segment in event.paths { + // Ensure it's a file with the .conf extension + if !segment + .extension() + .is_some_and(|s| s.to_str() == Some(CONFIG_FILE_EXTENSION)) + { + continue + } + + // Ensure it's well formatted static file name + if StaticFileSegment::parse_filename( + &segment.file_stem().expect("qed").to_string_lossy(), + ) + .is_none() + { + continue + } + + // If we can read the metadata and modified timestamp, ensure this is + // not an old or repeated event. + if let Ok(current_modified_timestamp) = + std::fs::metadata(&segment).and_then(|m| m.modified()) + { + if last_event_timestamp.is_some_and(|last_timestamp| { + last_timestamp >= current_modified_timestamp + }) { + continue + } + last_event_timestamp = Some(current_modified_timestamp); + } + + info!(target: "providers::static_file", updated_file = ?segment.file_stem(), "re-initializing static file provider index"); + if let Err(err) = provider.initialize_index() { + warn!(target: "providers::static_file", "failed to re-initialize index: {err}"); + } + break + } + } + + Err(err) => warn!(target: "providers::watcher", "watch error: {err:?}"), + } + } + }); + } } impl Deref for StaticFileProvider { @@ -115,9 +206,6 @@ pub struct StaticFileProviderInner { static_files_tx_index: RwLock, /// Directory where `static_files` are located path: PathBuf, - /// Whether [`StaticFileJarProvider`] loads filters into memory. If not, `by_hash` queries - /// won't be able to be queried directly. - load_filters: bool, /// Maintains a writer set of [`StaticFileSegment`]. writers: StaticFileWriters, metrics: Option>, @@ -142,7 +230,6 @@ impl StaticFileProviderInner { static_files_max_block: Default::default(), static_files_tx_index: Default::default(), path: path.as_ref().to_path_buf(), - load_filters: false, metrics: None, access, _lock_file, @@ -157,14 +244,6 @@ impl StaticFileProviderInner { } impl StaticFileProvider { - /// Loads filters into memory when creating a [`StaticFileJarProvider`]. - pub fn with_filters(self) -> Self { - let mut provider = - Arc::try_unwrap(self.0).expect("should be called when initializing only"); - provider.load_filters = true; - Self(Arc::new(provider)) - } - /// Enables metrics on the [`StaticFileProvider`]. pub fn with_metrics(self) -> Self { let mut provider = @@ -281,6 +360,8 @@ impl StaticFileProvider { } /// Given a segment and block range it removes the cached provider from the map. + /// + /// CAUTION: cached provider should be dropped before calling this or IT WILL deadlock. pub fn remove_cached_provider( &self, segment: StaticFileSegment, @@ -301,14 +382,8 @@ impl StaticFileProvider { let jar = if let Some((_, jar)) = self.map.remove(&key) { jar.jar } else { - let mut jar = NippyJar::::load( - &self.path.join(segment.filename(&fixed_block_range)), - ) - .map_err(|e| ProviderError::NippyJar(e.to_string()))?; - if self.load_filters { - jar.load_filters().map_err(|e| ProviderError::NippyJar(e.to_string()))?; - } - jar + NippyJar::::load(&self.path.join(segment.filename(&fixed_block_range))) + .map_err(|e| ProviderError::NippyJar(e.to_string()))? }; jar.delete().map_err(|e| ProviderError::NippyJar(e.to_string()))?; @@ -340,12 +415,7 @@ impl StaticFileProvider { } else { trace!(target: "provider::static_file", ?segment, ?fixed_block_range, "Creating jar from scratch"); let path = self.path.join(segment.filename(fixed_block_range)); - let mut jar = - NippyJar::load(&path).map_err(|e| ProviderError::NippyJar(e.to_string()))?; - if self.load_filters { - jar.load_filters().map_err(|e| ProviderError::NippyJar(e.to_string()))?; - } - + let jar = NippyJar::load(&path).map_err(|e| ProviderError::NippyJar(e.to_string()))?; self.map.entry(key).insert(LoadedJar::new(jar)?).downgrade().into() }; @@ -480,6 +550,7 @@ impl StaticFileProvider { let mut max_block = self.static_files_max_block.write(); let mut tx_index = self.static_files_tx_index.write(); + max_block.clear(); tx_index.clear(); for (segment, ranges) in @@ -507,6 +578,9 @@ impl StaticFileProvider { } } + // If this is a re-initialization, we need to clear this as well + self.map.clear(); + Ok(()) } @@ -576,7 +650,12 @@ impl StaticFileProvider { // * pruning data was interrupted before a config commit, then we have deleted data that // we are expected to still have. We need to check the Database and unwind everything // accordingly. - self.ensure_file_consistency(segment)?; + if self.access.is_read_only() { + self.check_segment_consistency(segment)?; + } else { + // Fetching the writer will attempt to heal any file level inconsistency. + self.latest_writer(segment)?; + } // Only applies to block-based static files. (Headers) // @@ -664,6 +743,23 @@ impl StaticFileProvider { Ok(unwind_target.map(PipelineTarget::Unwind)) } + /// Checks consistency of the latest static file segment and throws an error if at fault. + /// Read-only. + pub fn check_segment_consistency(&self, segment: StaticFileSegment) -> ProviderResult<()> { + if let Some(latest_block) = self.get_highest_static_file_block(segment) { + let file_path = + self.directory().join(segment.filename(&find_fixed_range(latest_block))); + + let jar = NippyJar::::load(&file_path) + .map_err(|e| ProviderError::NippyJar(e.to_string()))?; + + NippyJarChecker::new(jar) + .check_consistency() + .map_err(|e| ProviderError::NippyJar(e.to_string()))?; + } + Ok(()) + } + /// Check invariants for each corresponding table and static file segment: /// /// * the corresponding database table should overlap or have continuity in their keys @@ -1085,9 +1181,6 @@ pub trait StaticFileWriter { /// Commits all changes of all [`StaticFileProviderRW`] of all [`StaticFileSegment`]. fn commit(&self) -> ProviderResult<()>; - - /// Checks consistency of the segment latest file and heals if possible. - fn ensure_file_consistency(&self, segment: StaticFileSegment) -> ProviderResult<()>; } impl StaticFileWriter for StaticFileProvider { @@ -1116,28 +1209,6 @@ impl StaticFileWriter for StaticFileProvider { fn commit(&self) -> ProviderResult<()> { self.writers.commit() } - - fn ensure_file_consistency(&self, segment: StaticFileSegment) -> ProviderResult<()> { - match self.access { - StaticFileAccess::RO => { - let latest_block = self.get_highest_static_file_block(segment).unwrap_or_default(); - - let mut writer = StaticFileProviderRW::new( - segment, - latest_block, - Arc::downgrade(&self.0), - self.metrics.clone(), - )?; - - writer.ensure_file_consistency(self.access.is_read_only())?; - } - StaticFileAccess::RW => { - self.latest_writer(segment)?.ensure_file_consistency(self.access.is_read_only())?; - } - } - - Ok(()) - } } impl HeaderProvider for StaticFileProvider { diff --git a/crates/storage/provider/src/providers/static_file/mod.rs b/crates/storage/provider/src/providers/static_file/mod.rs index f3739986ca..c5f1d8c12b 100644 --- a/crates/storage/provider/src/providers/static_file/mod.rs +++ b/crates/storage/provider/src/providers/static_file/mod.rs @@ -58,12 +58,12 @@ impl Deref for LoadedJar { mod tests { use super::*; use crate::{test_utils::create_test_provider_factory, HeaderProvider}; + use alloy_primitives::{B256, U256}; use rand::seq::SliceRandom; use reth_db::{CanonicalHeaders, HeaderNumbers, HeaderTerminalDifficulties, Headers, Sidecars}; use reth_db_api::transaction::DbTxMut; use reth_primitives::{ - static_file::find_fixed_range, BlobSidecar, BlobSidecars, BlobTransactionSidecar, B256, - U256, + static_file::find_fixed_range, BlobSidecar, BlobSidecars, BlobTransactionSidecar, }; use reth_storage_api::SidecarsProvider; use reth_testing_utils::generators::{self, random_header_range}; diff --git a/crates/storage/provider/src/providers/static_file/writer.rs b/crates/storage/provider/src/providers/static_file/writer.rs index dd7d5274f5..85179d2ff5 100644 --- a/crates/storage/provider/src/providers/static_file/writer.rs +++ b/crates/storage/provider/src/providers/static_file/writer.rs @@ -2,14 +2,14 @@ use super::{ manager::StaticFileProviderInner, metrics::StaticFileProviderMetrics, StaticFileProvider, }; use crate::providers::static_file::metrics::StaticFileProviderOperation; +use alloy_primitives::{BlockHash, BlockNumber, TxNumber, U256}; use parking_lot::{lock_api::RwLockWriteGuard, RawRwLock, RwLock}; use reth_codecs::Compact; use reth_db_api::models::CompactU256; -use reth_nippy_jar::{ConsistencyFailStrategy, NippyJar, NippyJarError, NippyJarWriter}; +use reth_nippy_jar::{NippyJar, NippyJarError, NippyJarWriter}; use reth_primitives::{ static_file::{find_fixed_range, SegmentHeader, SegmentRangeInclusive}, - BlobSidecars, BlockHash, BlockNumber, Header, Receipt, StaticFileSegment, - TransactionSignedNoHash, TxNumber, U256, + BlobSidecars, Header, Receipt, StaticFileSegment, TransactionSignedNoHash, }; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::{ @@ -120,6 +120,9 @@ pub struct StaticFileProviderRW { impl StaticFileProviderRW { /// Creates a new [`StaticFileProviderRW`] for a [`StaticFileSegment`]. + /// + /// Before use, transaction based segments should ensure the block end range is the expected + /// one, and heal if not. For more check `Self::ensure_end_range_consistency`. pub fn new( segment: StaticFileSegment, block: BlockNumber, @@ -127,14 +130,18 @@ impl StaticFileProviderRW { metrics: Option>, ) -> ProviderResult { let (writer, data_path) = Self::open(segment, block, reader.clone(), metrics.clone())?; - Ok(Self { + let mut writer = Self { writer, data_path, buf: Vec::with_capacity(100), reader, metrics, prune_on_commit: None, - }) + }; + + writer.ensure_end_range_consistency()?; + + Ok(writer) } fn open( @@ -165,14 +172,7 @@ impl StaticFileProviderRW { Err(err) => return Err(err), }; - let reader = Self::upgrade_provider_to_strong_reference(&reader); - let access = if reader.is_read_only() { - ConsistencyFailStrategy::ThrowError - } else { - ConsistencyFailStrategy::Heal - }; - - let result = match NippyJarWriter::new(jar, access) { + let result = match NippyJarWriter::new(jar) { Ok(writer) => Ok((writer, path)), Err(NippyJarError::FrozenJar) => { // This static file has been frozen, so we should @@ -192,33 +192,15 @@ impl StaticFileProviderRW { Ok(result) } - /// Checks the consistency of the file and heals it if necessary and `read_only` is set to - /// false. If the check fails, it will return an error. + /// If a file level healing happens, we need to update the end range on the + /// [`SegmentHeader`]. /// - /// If healing does happen, it will update the end range on the [`SegmentHeader`]. However, for - /// transaction based segments, the block end range has to be found and healed externally. + /// However, for transaction based segments, the block end range has to be found and healed + /// externally. /// - /// Check [`NippyJarWriter::ensure_file_consistency`] for more on healing. - pub fn ensure_file_consistency(&mut self, read_only: bool) -> ProviderResult<()> { - let inconsistent_error = || { - ProviderError::NippyJar( - "Inconsistent state found. Restart the node to heal.".to_string(), - ) - }; - - let check_mode = if read_only { - ConsistencyFailStrategy::ThrowError - } else { - ConsistencyFailStrategy::Heal - }; - - self.writer.ensure_file_consistency(check_mode).map_err(|error| { - if matches!(error, NippyJarError::InconsistentState) { - return inconsistent_error() - } - ProviderError::NippyJar(error.to_string()) - })?; - + /// Check [`reth_nippy_jar::NippyJarChecker`] & + /// [`NippyJarWriter`] for more on healing. + fn ensure_end_range_consistency(&mut self) -> ProviderResult<()> { // If we have lost rows (in this run or previous), we need to update the [SegmentHeader]. let expected_rows = if self.user_header().segment().is_headers() || self.user_header().segment().is_sidecars() @@ -229,9 +211,6 @@ impl StaticFileProviderRW { }; let pruned_rows = expected_rows - self.writer.rows() as u64; if pruned_rows > 0 { - if read_only { - return Err(inconsistent_error()) - } self.user_header_mut().prune(pruned_rows); } @@ -352,7 +331,7 @@ impl StaticFileProviderRW { ) -> ProviderResult { let segment = self.writer.user_header().segment(); - self.check_next_block_number(expected_block_number, segment)?; + self.check_next_block_number(expected_block_number)?; let start = Instant::now(); if let Some(last_block) = self.writer.user_header().block_end() { @@ -386,11 +365,7 @@ impl StaticFileProviderRW { /// Verifies if the incoming block number matches the next expected block number /// for a static file. This ensures data continuity when adding new blocks. - fn check_next_block_number( - &self, - expected_block_number: u64, - segment: StaticFileSegment, - ) -> ProviderResult<()> { + fn check_next_block_number(&self, expected_block_number: u64) -> ProviderResult<()> { // The next static file block number can be found by checking the one after block_end. // However if it's a new file that hasn't been added any data, its block range will actually // be None. In that case, the next block will be found on `expected_block_start`. @@ -403,7 +378,7 @@ impl StaticFileProviderRW { if expected_block_number != next_static_file_block { return Err(ProviderError::UnexpectedStaticFileBlockNumber( - segment, + self.writer.user_header().segment(), expected_block_number, next_static_file_block, )) @@ -418,15 +393,10 @@ impl StaticFileProviderRW { /// /// # Note /// Commits to the configuration file at the end. - fn truncate( - &mut self, - segment: StaticFileSegment, - num_rows: u64, - last_block: Option, - ) -> ProviderResult<()> { + fn truncate(&mut self, num_rows: u64, last_block: Option) -> ProviderResult<()> { let mut remaining_rows = num_rows; while remaining_rows > 0 { - let len = match segment { + let len = match self.writer.user_header().segment() { StaticFileSegment::Headers | StaticFileSegment::Sidecars => { self.writer.user_header().block_len().unwrap_or_default() } @@ -521,12 +491,9 @@ impl StaticFileProviderRW { /// Returns the current [`TxNumber`] as seen in the static file. fn append_with_tx_number( &mut self, - segment: StaticFileSegment, tx_num: TxNumber, value: V, ) -> ProviderResult { - debug_assert!(self.writer.user_header().segment() == segment); - if self.writer.user_header().tx_range().is_none() { self.writer.user_header_mut().set_tx_range(tx_num, tx_num); } else { @@ -619,7 +586,8 @@ impl StaticFileProviderRW { let start = Instant::now(); self.ensure_no_queued_prune()?; - let result = self.append_with_tx_number(StaticFileSegment::Transactions, tx_num, tx)?; + debug_assert!(self.writer.user_header().segment() == StaticFileSegment::Transactions); + let result = self.append_with_tx_number(tx_num, tx)?; if let Some(metrics) = &self.metrics { metrics.record_segment_operation( @@ -646,7 +614,8 @@ impl StaticFileProviderRW { let start = Instant::now(); self.ensure_no_queued_prune()?; - let result = self.append_with_tx_number(StaticFileSegment::Receipts, tx_num, receipt)?; + debug_assert!(self.writer.user_header().segment() == StaticFileSegment::Receipts); + let result = self.append_with_tx_number(tx_num, receipt)?; if let Some(metrics) = &self.metrics { metrics.record_segment_operation( @@ -667,6 +636,8 @@ impl StaticFileProviderRW { I: Iterator>, R: Borrow, { + debug_assert!(self.writer.user_header().segment() == StaticFileSegment::Receipts); + let mut receipts_iter = receipts.into_iter().peekable(); // If receipts are empty, we can simply return None if receipts_iter.peek().is_none() { @@ -682,8 +653,7 @@ impl StaticFileProviderRW { for receipt_result in receipts_iter { let (tx_num, receipt) = receipt_result?; - tx_number = - self.append_with_tx_number(StaticFileSegment::Receipts, tx_num, receipt.borrow())?; + tx_number = self.append_with_tx_number(tx_num, receipt.borrow())?; count += 1; } @@ -767,10 +737,9 @@ impl StaticFileProviderRW { ) -> ProviderResult<()> { let start = Instant::now(); - let segment = StaticFileSegment::Transactions; - debug_assert!(self.writer.user_header().segment() == segment); + debug_assert!(self.writer.user_header().segment() == StaticFileSegment::Transactions); - self.truncate(segment, to_delete, Some(last_block))?; + self.truncate(to_delete, Some(last_block))?; if let Some(metrics) = &self.metrics { metrics.record_segment_operation( @@ -791,10 +760,9 @@ impl StaticFileProviderRW { ) -> ProviderResult<()> { let start = Instant::now(); - let segment = StaticFileSegment::Receipts; - debug_assert!(self.writer.user_header().segment() == segment); + debug_assert!(self.writer.user_header().segment() == StaticFileSegment::Receipts); - self.truncate(segment, to_delete, Some(last_block))?; + self.truncate(to_delete, Some(last_block))?; if let Some(metrics) = &self.metrics { metrics.record_segment_operation( @@ -811,10 +779,9 @@ impl StaticFileProviderRW { fn prune_header_data(&mut self, to_delete: u64) -> ProviderResult<()> { let start = Instant::now(); - let segment = StaticFileSegment::Headers; - debug_assert!(self.writer.user_header().segment() == segment); + debug_assert!(self.writer.user_header().segment() == StaticFileSegment::Headers); - self.truncate(segment, to_delete, None)?; + self.truncate(to_delete, None)?; if let Some(metrics) = &self.metrics { metrics.record_segment_operation( @@ -834,7 +801,7 @@ impl StaticFileProviderRW { let segment = StaticFileSegment::Sidecars; debug_assert!(self.writer.user_header().segment() == segment); - self.truncate(segment, to_delete, None)?; + self.truncate(to_delete, None)?; if let Some(metrics) = &self.metrics { metrics.record_segment_operation( diff --git a/crates/storage/provider/src/test_utils/blocks.rs b/crates/storage/provider/src/test_utils/blocks.rs index 0a6cbea36c..997ef5a44c 100644 --- a/crates/storage/provider/src/test_utils/blocks.rs +++ b/crates/storage/provider/src/test_utils/blocks.rs @@ -1,13 +1,15 @@ //! Dummy blocks and data for tests use crate::{DatabaseProviderRW, ExecutionOutcome}; -use alloy_primitives::Log; +use alloy_primitives::{ + b256, hex_literal::hex, Address, BlockNumber, Bytes, Log, TxKind, B256, U256, +}; use once_cell::sync::Lazy; use reth_db::tables; use reth_db_api::{database::Database, models::StoredBlockBodyIndices}; use reth_primitives::{ - alloy_primitives, b256, hex_literal::hex, Account, Address, BlobSidecars, BlockNumber, Bytes, - Header, Receipt, Requests, SealedBlock, SealedBlockWithSenders, SealedHeader, Signature, - Transaction, TransactionSigned, TxKind, TxLegacy, TxType, Withdrawal, Withdrawals, B256, U256, + Account, BlobSidecars, Header, Receipt, Requests, SealedBlock, SealedBlockWithSenders, + SealedHeader, Signature, Transaction, TransactionSigned, TxLegacy, TxType, Withdrawal, + Withdrawals, }; use reth_trie::root::{state_root_unhashed, storage_root_unhashed}; use revm::{ diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 22ba0604ae..2734cb815b 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -1,33 +1,38 @@ use crate::{ traits::{BlockSource, ReceiptProvider}, - AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, - ChainSpecProvider, ChangeSetReader, EvmEnvProvider, HeaderProvider, ReceiptProviderIdExt, - RequestsProvider, StateProvider, StateProviderBox, StateProviderFactory, StateRootProvider, - TransactionVariant, TransactionsProvider, WithdrawalsProvider, + AccountReader, BlockExecutionReader, BlockHashReader, BlockIdReader, BlockNumReader, + BlockReader, BlockReaderIdExt, ChainSpecProvider, ChangeSetReader, DatabaseProvider, + EvmEnvProvider, HeaderProvider, ReceiptProviderIdExt, RequestsProvider, StateProvider, + StateProviderBox, StateProviderFactory, StateReader, StateRootProvider, TransactionVariant, + TransactionsProvider, WithdrawalsProvider, +}; +use alloy_primitives::{ + keccak256, Address, BlockHash, BlockNumber, Bytes, StorageKey, StorageValue, TxHash, TxNumber, + B256, U256, }; use parking_lot::Mutex; use reth_chainspec::{ChainInfo, ChainSpec}; +use reth_db::mock::{DatabaseMock, TxMock}; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; +use reth_execution_types::{Chain, ExecutionOutcome}; use reth_primitives::{ - keccak256, Account, Address, BlobSidecars, Block, BlockHash, BlockHashOrNumber, BlockId, - BlockNumber, BlockNumberOrTag, BlockWithSenders, Bytecode, Bytes, Header, Receipt, SealedBlock, - SealedBlockWithSenders, SealedHeader, StorageKey, StorageValue, TransactionMeta, - TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, Withdrawal, Withdrawals, B256, - U256, + Account, BlobSidecars, Block, BlockHashOrNumber, BlockId, BlockNumberOrTag, BlockWithSenders, + Bytecode, GotExpected, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, + TransactionMeta, TransactionSigned, TransactionSignedNoHash, Withdrawal, Withdrawals, }; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{ - SidecarsProvider, StageCheckpointReader, StateProofProvider, StorageRootProvider, + DatabaseProviderFactory, SidecarsProvider, StageCheckpointReader, StateProofProvider, + StorageRootProvider, }; -use reth_storage_errors::provider::{ProviderError, ProviderResult}; +use reth_storage_errors::provider::{ConsistentViewError, ProviderError, ProviderResult}; use reth_trie::{ - prefix_set::TriePrefixSetsMut, updates::TrieUpdates, AccountProof, HashedPostState, - HashedStorage, + updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, TrieInput, }; use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg}; use std::{ - collections::{BTreeMap, HashMap}, + collections::{BTreeMap, HashMap, HashSet}, ops::{RangeBounds, RangeInclusive}, sync::Arc, }; @@ -141,6 +146,20 @@ impl MockEthProvider { } } +impl DatabaseProviderFactory for MockEthProvider { + type DB = DatabaseMock; + type Provider = DatabaseProvider; + type ProviderRW = DatabaseProvider; + + fn database_provider_ro(&self) -> ProviderResult { + Err(ConsistentViewError::Syncing { best_block: GotExpected::new(0, 0) }.into()) + } + + fn database_provider_rw(&self) -> ProviderResult { + Err(ConsistentViewError::Syncing { best_block: GotExpected::new(0, 0) }.into()) + } +} + impl HeaderProvider for MockEthProvider { fn header(&self, block_hash: &BlockHash) -> ProviderResult> { let lock = self.headers.lock(); @@ -292,7 +311,7 @@ impl TransactionsProvider for MockEthProvider { fn transactions_by_block_range( &self, - range: impl RangeBounds, + range: impl RangeBounds, ) -> ProviderResult>> { // init btreemap so we can return in order let mut map = BTreeMap::new(); @@ -423,7 +442,7 @@ impl BlockNumReader for MockEthProvider { self.best_block_number() } - fn block_number(&self, hash: B256) -> ProviderResult> { + fn block_number(&self, hash: B256) -> ProviderResult> { let lock = self.blocks.lock(); let num = lock.iter().find_map(|(h, b)| (*h == hash).then_some(b.number)); Ok(num) @@ -574,12 +593,7 @@ impl StateRootProvider for MockEthProvider { Ok(self.state_roots.lock().pop().unwrap_or_default()) } - fn state_root_from_nodes( - &self, - _nodes: TrieUpdates, - _hashed_state: HashedPostState, - _prefix_sets: TriePrefixSetsMut, - ) -> ProviderResult { + fn state_root_from_nodes(&self, _input: TrieInput) -> ProviderResult { Ok(self.state_roots.lock().pop().unwrap_or_default()) } @@ -593,9 +607,7 @@ impl StateRootProvider for MockEthProvider { fn state_root_from_nodes_with_updates( &self, - _nodes: TrieUpdates, - _hashed_state: HashedPostState, - _prefix_sets: TriePrefixSetsMut, + _input: TrieInput, ) -> ProviderResult<(B256, TrieUpdates)> { let state_root = self.state_roots.lock().pop().unwrap_or_default(); Ok((state_root, Default::default())) @@ -615,16 +627,24 @@ impl StorageRootProvider for MockEthProvider { impl StateProofProvider for MockEthProvider { fn proof( &self, - _hashed_state: HashedPostState, + _input: TrieInput, address: Address, _slots: &[B256], ) -> ProviderResult { Ok(AccountProof::new(address)) } + fn multiproof( + &self, + _input: TrieInput, + _targets: HashMap>, + ) -> ProviderResult { + Ok(MultiProof::default()) + } + fn witness( &self, - _overlay: HashedPostState, + _input: TrieInput, _target: HashedPostState, ) -> ProviderResult> { Ok(HashMap::default()) @@ -663,7 +683,7 @@ impl EvmEnvProvider for MockEthProvider { _evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv, + EvmConfig: ConfigureEvmEnv
, { Ok(()) } @@ -676,7 +696,7 @@ impl EvmEnvProvider for MockEthProvider { _evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv, + EvmConfig: ConfigureEvmEnv
, { Ok(()) } @@ -688,7 +708,7 @@ impl EvmEnvProvider for MockEthProvider { _evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv, + EvmConfig: ConfigureEvmEnv
, { Ok(()) } @@ -700,7 +720,7 @@ impl EvmEnvProvider for MockEthProvider { _evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv, + EvmConfig: ConfigureEvmEnv
, { Ok(()) } @@ -799,3 +819,18 @@ impl ChangeSetReader for MockEthProvider { Ok(Vec::default()) } } + +impl BlockExecutionReader for MockEthProvider { + fn get_block_and_execution_range( + &self, + _range: RangeInclusive, + ) -> ProviderResult { + Ok(Chain::default()) + } +} + +impl StateReader for MockEthProvider { + fn get_state(&self, _block: BlockNumber) -> ProviderResult> { + Ok(None) + } +} diff --git a/crates/storage/provider/src/test_utils/mod.rs b/crates/storage/provider/src/test_utils/mod.rs index 8e3146633f..2200781096 100644 --- a/crates/storage/provider/src/test_utils/mod.rs +++ b/crates/storage/provider/src/test_utils/mod.rs @@ -1,11 +1,13 @@ use crate::{providers::StaticFileProvider, HashingWriter, ProviderFactory, TrieWriter}; +use alloy_primitives::B256; use reth_chainspec::{ChainSpec, MAINNET}; use reth_db::{ test_utils::{create_test_rw_db, create_test_static_files_dir, TempDatabase}, - Database, DatabaseEnv, + DatabaseEnv, }; use reth_errors::ProviderResult; -use reth_primitives::{Account, StorageEntry, B256}; +use reth_node_types::{NodeTypesWithDB, NodeTypesWithDBAdapter}; +use reth_primitives::{Account, StorageEntry}; use reth_trie::StateRoot; use reth_trie_db::DatabaseStateRoot; use std::sync::Arc; @@ -18,15 +20,26 @@ pub use mock::{ExtendedAccount, MockEthProvider}; pub use noop::NoopProvider; pub use reth_chain_state::test_utils::TestCanonStateSubscriptions; +/// Mock [`reth_node_types::NodeTypes`] for testing. +pub type MockNodeTypes = reth_node_types::AnyNodeTypesWithEngine< + (), + reth_ethereum_engine_primitives::EthEngineTypes, + reth_chainspec::ChainSpec, +>; + +/// Mock [`reth_node_types::NodeTypesWithDB`] for testing. +pub type MockNodeTypesWithDB> = + NodeTypesWithDBAdapter>; + /// Creates test provider factory with mainnet chain spec. -pub fn create_test_provider_factory() -> ProviderFactory>> { +pub fn create_test_provider_factory() -> ProviderFactory { create_test_provider_factory_with_chain_spec(MAINNET.clone()) } /// Creates test provider factory with provided chain spec. pub fn create_test_provider_factory_with_chain_spec( chain_spec: Arc, -) -> ProviderFactory>> { +) -> ProviderFactory { let (static_dir, _) = create_test_static_files_dir(); let db = create_test_rw_db(); ProviderFactory::new( @@ -37,9 +50,9 @@ pub fn create_test_provider_factory_with_chain_spec( } /// Inserts the genesis alloc from the provided chain spec into the trie. -pub fn insert_genesis( - provider_factory: &ProviderFactory, - chain_spec: Arc, +pub fn insert_genesis>( + provider_factory: &ProviderFactory, + chain_spec: Arc, ) -> ProviderResult { let provider = provider_factory.provider_rw()?; diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index e4ff0adeab..f3261c930e 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -1,9 +1,12 @@ use std::{ - collections::HashMap, + collections::{HashMap, HashSet}, ops::{RangeBounds, RangeInclusive}, sync::Arc, }; +use alloy_primitives::{ + Address, BlockHash, BlockNumber, Bytes, StorageKey, StorageValue, TxHash, TxNumber, B256, U256, +}; use reth_chain_state::{ CanonStateNotifications, CanonStateSubscriptions, ForkChoiceNotifications, ForkChoiceSubscriptions, @@ -13,19 +16,16 @@ use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_errors::ProviderError; use reth_evm::ConfigureEvmEnv; use reth_primitives::{ - parlia::Snapshot, Account, Address, BlobSidecars, Block, BlockHash, BlockHashOrNumber, BlockId, - BlockNumber, BlockNumberOrTag, BlockWithSenders, Bytecode, Bytes, Header, Receipt, SealedBlock, - SealedBlockWithSenders, SealedHeader, StorageKey, StorageValue, TransactionMeta, - TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, Withdrawal, Withdrawals, B256, - U256, + parlia::Snapshot, Account, BlobSidecars, Block, BlockHashOrNumber, BlockId, BlockNumberOrTag, + BlockWithSenders, Bytecode, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, + TransactionMeta, TransactionSigned, TransactionSignedNoHash, Withdrawal, Withdrawals, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{SidecarsProvider, StateProofProvider, StorageRootProvider}; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ - prefix_set::TriePrefixSetsMut, updates::TrieUpdates, AccountProof, HashedPostState, - HashedStorage, + updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, TrieInput, }; use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg}; use tokio::sync::{broadcast, watch}; @@ -327,12 +327,7 @@ impl StateRootProvider for NoopProvider { Ok(B256::default()) } - fn state_root_from_nodes( - &self, - _nodes: TrieUpdates, - _hashed_state: HashedPostState, - _prefix_sets: TriePrefixSetsMut, - ) -> ProviderResult { + fn state_root_from_nodes(&self, _input: TrieInput) -> ProviderResult { Ok(B256::default()) } @@ -345,9 +340,7 @@ impl StateRootProvider for NoopProvider { fn state_root_from_nodes_with_updates( &self, - _nodes: TrieUpdates, - _hashed_state: HashedPostState, - _prefix_sets: TriePrefixSetsMut, + _input: TrieInput, ) -> ProviderResult<(B256, TrieUpdates)> { Ok((B256::default(), TrieUpdates::default())) } @@ -366,16 +359,24 @@ impl StorageRootProvider for NoopProvider { impl StateProofProvider for NoopProvider { fn proof( &self, - _hashed_state: HashedPostState, + _input: TrieInput, address: Address, _slots: &[B256], ) -> ProviderResult { Ok(AccountProof::new(address)) } + fn multiproof( + &self, + _input: TrieInput, + _targets: HashMap>, + ) -> ProviderResult { + Ok(MultiProof::default()) + } + fn witness( &self, - _overlay: HashedPostState, + _input: TrieInput, _target: HashedPostState, ) -> ProviderResult> { Ok(HashMap::default()) @@ -405,7 +406,7 @@ impl EvmEnvProvider for NoopProvider { _evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv, + EvmConfig: ConfigureEvmEnv
, { Ok(()) } @@ -418,7 +419,7 @@ impl EvmEnvProvider for NoopProvider { _evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv, + EvmConfig: ConfigureEvmEnv
, { Ok(()) } @@ -430,7 +431,7 @@ impl EvmEnvProvider for NoopProvider { _evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv, + EvmConfig: ConfigureEvmEnv
, { Ok(()) } @@ -442,7 +443,7 @@ impl EvmEnvProvider for NoopProvider { _evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv, + EvmConfig: ConfigureEvmEnv
, { Ok(()) } diff --git a/crates/storage/provider/src/traits/block.rs b/crates/storage/provider/src/traits/block.rs index c5623fa01a..8e3a54d86b 100644 --- a/crates/storage/provider/src/traits/block.rs +++ b/crates/storage/provider/src/traits/block.rs @@ -1,6 +1,7 @@ +use alloy_primitives::BlockNumber; use reth_db_api::models::StoredBlockBodyIndices; use reth_execution_types::{Chain, ExecutionOutcome}; -use reth_primitives::{BlockNumber, SealedBlockWithSenders}; +use reth_primitives::SealedBlockWithSenders; use reth_storage_api::BlockReader; use reth_storage_errors::provider::ProviderResult; use reth_trie::{updates::TrieUpdates, HashedPostStateSorted}; @@ -22,7 +23,7 @@ pub trait BlockExecutionWriter: BlockWriter + Send + Sync { ) -> ProviderResult<()>; } -/// BlockExecution Writer +/// BlockExecution Reader #[auto_impl::auto_impl(&, Arc, Box)] pub trait BlockExecutionReader: BlockReader + Send + Sync { /// Get range of blocks and its execution result @@ -32,6 +33,13 @@ pub trait BlockExecutionReader: BlockReader + Send + Sync { ) -> ProviderResult; } +/// This just receives state, or [`ExecutionOutcome`], from the provider +#[auto_impl::auto_impl(&, Arc, Box)] +pub trait StateReader: Send + Sync { + /// Get the [`ExecutionOutcome`] for the given block + fn get_state(&self, block: BlockNumber) -> ProviderResult>; +} + /// Block Writer #[auto_impl::auto_impl(&, Arc, Box)] pub trait BlockWriter: Send + Sync { diff --git a/crates/storage/provider/src/traits/database_provider.rs b/crates/storage/provider/src/traits/database_provider.rs deleted file mode 100644 index fab60fe2e7..0000000000 --- a/crates/storage/provider/src/traits/database_provider.rs +++ /dev/null @@ -1,9 +0,0 @@ -use crate::DatabaseProviderRO; -use reth_db_api::database::Database; -use reth_storage_errors::provider::ProviderResult; - -/// Database provider factory. -pub trait DatabaseProviderFactory { - /// Create new read-only database provider. - fn database_provider_ro(&self) -> ProviderResult>; -} diff --git a/crates/storage/provider/src/traits/finalized_block.rs b/crates/storage/provider/src/traits/finalized_block.rs index 4bf4da7980..5509db0aa9 100644 --- a/crates/storage/provider/src/traits/finalized_block.rs +++ b/crates/storage/provider/src/traits/finalized_block.rs @@ -1,5 +1,5 @@ +use alloy_primitives::BlockNumber; use reth_errors::ProviderResult; -use reth_primitives::BlockNumber; /// Functionality to read the last known finalized block from the database. pub trait FinalizedBlockReader: Send + Sync { diff --git a/crates/storage/provider/src/traits/full.rs b/crates/storage/provider/src/traits/full.rs index 522591c203..1fe996df0e 100644 --- a/crates/storage/provider/src/traits/full.rs +++ b/crates/storage/provider/src/traits/full.rs @@ -6,18 +6,18 @@ use crate::{ StateProviderFactory, StaticFileProviderFactory, TransactionsProvider, }; use reth_chain_state::{CanonStateSubscriptions, ForkChoiceSubscriptions}; -use reth_chainspec::{ChainSpec, EthChainSpec}; -use reth_db_api::database::Database; +use reth_chainspec::ChainSpec; +use reth_node_types::NodeTypesWithDB; /// Helper trait to unify all provider traits for simplicity. -pub trait FullProvider: - DatabaseProviderFactory +pub trait FullProvider: + DatabaseProviderFactory + StaticFileProviderFactory + BlockReaderIdExt + AccountReader + StateProviderFactory + EvmEnvProvider - + ChainSpecProvider + + ChainSpecProvider + ChangeSetReader + CanonStateSubscriptions + ForkChoiceSubscriptions @@ -30,14 +30,14 @@ pub trait FullProvider: { } -impl FullProvider for T where - T: DatabaseProviderFactory +impl FullProvider for T where + T: DatabaseProviderFactory + StaticFileProviderFactory + BlockReaderIdExt + AccountReader + StateProviderFactory + EvmEnvProvider - + ChainSpecProvider + + ChainSpecProvider + ChangeSetReader + CanonStateSubscriptions + ForkChoiceSubscriptions diff --git a/crates/storage/provider/src/traits/hashing.rs b/crates/storage/provider/src/traits/hashing.rs index 78efcc6273..2b759afa72 100644 --- a/crates/storage/provider/src/traits/hashing.rs +++ b/crates/storage/provider/src/traits/hashing.rs @@ -1,6 +1,7 @@ +use alloy_primitives::{Address, BlockNumber, B256}; use auto_impl::auto_impl; use reth_db_api::models::BlockNumberAddress; -use reth_primitives::{Account, Address, BlockNumber, StorageEntry, B256}; +use reth_primitives::{Account, StorageEntry}; use reth_storage_errors::provider::ProviderResult; use std::{ collections::{BTreeMap, BTreeSet, HashMap}, diff --git a/crates/storage/provider/src/traits/header_sync_gap.rs b/crates/storage/provider/src/traits/header_sync_gap.rs index faa02b39e9..e5da6f799d 100644 --- a/crates/storage/provider/src/traits/header_sync_gap.rs +++ b/crates/storage/provider/src/traits/header_sync_gap.rs @@ -1,5 +1,6 @@ +use alloy_primitives::{BlockNumber, B256}; use reth_network_p2p::headers::downloader::SyncTarget; -use reth_primitives::{BlockHashOrNumber, BlockNumber, SealedHeader, B256}; +use reth_primitives::{BlockHashOrNumber, SealedHeader}; use reth_storage_errors::provider::ProviderResult; use tokio::sync::watch; diff --git a/crates/storage/provider/src/traits/history.rs b/crates/storage/provider/src/traits/history.rs index 726fe0cb78..cbf9bece4b 100644 --- a/crates/storage/provider/src/traits/history.rs +++ b/crates/storage/provider/src/traits/history.rs @@ -1,11 +1,8 @@ +use alloy_primitives::{Address, BlockNumber, B256}; use auto_impl::auto_impl; use reth_db_api::models::BlockNumberAddress; -use reth_primitives::{Address, BlockNumber, B256}; use reth_storage_errors::provider::ProviderResult; -use std::{ - collections::BTreeMap, - ops::{Range, RangeInclusive}, -}; +use std::ops::{Range, RangeInclusive}; /// History Writer #[auto_impl(&, Arc, Box)] @@ -21,7 +18,7 @@ pub trait HistoryWriter: Send + Sync { /// Insert account change index to database. Used inside AccountHistoryIndex stage fn insert_account_history_index( &self, - account_transitions: BTreeMap>, + index_updates: impl IntoIterator)>, ) -> ProviderResult<()>; /// Unwind and clear storage history indices. @@ -35,7 +32,7 @@ pub trait HistoryWriter: Send + Sync { /// Insert storage change index to database. Used inside StorageHistoryIndex stage fn insert_storage_history_index( &self, - storage_transitions: BTreeMap<(Address, B256), Vec>, + storage_transitions: impl IntoIterator)>, ) -> ProviderResult<()>; /// Read account/storage changesets and update account/storage history indices. diff --git a/crates/storage/provider/src/traits/mod.rs b/crates/storage/provider/src/traits/mod.rs index 3a12e72bd7..86e65527f4 100644 --- a/crates/storage/provider/src/traits/mod.rs +++ b/crates/storage/provider/src/traits/mod.rs @@ -29,9 +29,6 @@ pub use trie::{StorageTrieWriter, TrieWriter}; mod history; pub use history::HistoryWriter; -mod database_provider; -pub use database_provider::DatabaseProviderFactory; - mod static_file_provider; pub use static_file_provider::StaticFileProviderFactory; diff --git a/crates/storage/provider/src/traits/state.rs b/crates/storage/provider/src/traits/state.rs index 8c68c2acdf..14546442a2 100644 --- a/crates/storage/provider/src/traits/state.rs +++ b/crates/storage/provider/src/traits/state.rs @@ -1,5 +1,5 @@ +use alloy_primitives::BlockNumber; use reth_execution_types::ExecutionOutcome; -use reth_primitives::BlockNumber; use reth_storage_errors::provider::ProviderResult; use reth_trie::HashedPostStateSorted; use revm::db::{ diff --git a/crates/storage/provider/src/traits/trie.rs b/crates/storage/provider/src/traits/trie.rs index 960af93c85..2edb4e072d 100644 --- a/crates/storage/provider/src/traits/trie.rs +++ b/crates/storage/provider/src/traits/trie.rs @@ -1,7 +1,7 @@ use std::collections::HashMap; +use alloy_primitives::B256; use auto_impl::auto_impl; -use reth_primitives::B256; use reth_storage_errors::provider::ProviderResult; use reth_trie::updates::{StorageTrieUpdates, TrieUpdates}; diff --git a/crates/storage/provider/src/writer/database.rs b/crates/storage/provider/src/writer/database.rs index 86da1d6eaa..3ae42b4bf1 100644 --- a/crates/storage/provider/src/writer/database.rs +++ b/crates/storage/provider/src/writer/database.rs @@ -1,9 +1,10 @@ +use alloy_primitives::{BlockNumber, TxNumber}; use reth_db::{ cursor::{DbCursorRO, DbCursorRW}, tables, }; use reth_errors::ProviderResult; -use reth_primitives::{BlockNumber, Receipt, TxNumber}; +use reth_primitives::Receipt; use reth_storage_api::ReceiptWriter; pub(crate) struct DatabaseWriter<'a, W>(pub(crate) &'a mut W); @@ -18,11 +19,9 @@ where _: BlockNumber, receipts: Vec>, ) -> ProviderResult<()> { - if !receipts.is_empty() { - for (tx_idx, receipt) in receipts.into_iter().enumerate() { - if let Some(receipt) = receipt { - self.0.append(first_tx_index + tx_idx as u64, receipt)?; - } + for (tx_idx, receipt) in receipts.into_iter().enumerate() { + if let Some(receipt) = receipt { + self.0.append(first_tx_index + tx_idx as u64, receipt)?; } } Ok(()) diff --git a/crates/storage/provider/src/writer/mod.rs b/crates/storage/provider/src/writer/mod.rs index 1bcbd90817..9d764620bf 100644 --- a/crates/storage/provider/src/writer/mod.rs +++ b/crates/storage/provider/src/writer/mod.rs @@ -1,26 +1,24 @@ use crate::{ providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter as SfWriter}, writer::static_file::StaticFileWriter, - BlockExecutionWriter, BlockWriter, DatabaseProvider, DatabaseProviderRW, HistoryWriter, - StateChangeWriter, StateWriter, TrieWriter, + BlockExecutionWriter, BlockWriter, HistoryWriter, StateChangeWriter, StateWriter, TrieWriter, }; +use alloy_primitives::{BlockNumber, B256, U256}; use reth_chain_state::ExecutedBlock; use reth_db::{ cursor::DbCursorRO, models::CompactU256, tables, transaction::{DbTx, DbTxMut}, - Database, }; use reth_errors::{ProviderError, ProviderResult}; use reth_execution_types::ExecutionOutcome; use reth_primitives::{ - parlia::Snapshot, BlobSidecars, BlockNumber, Header, SealedBlock, StaticFileSegment, - TransactionSignedNoHash, B256, U256, + parlia::Snapshot, BlobSidecars, Header, SealedBlock, StaticFileSegment, TransactionSignedNoHash, }; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{ - BlockNumReader, HeaderProvider, ReceiptWriter, StageCheckpointWriter, TransactionsProviderExt, + DBProvider, HeaderProvider, ReceiptWriter, StageCheckpointWriter, TransactionsProviderExt, }; use reth_storage_errors::writer::UnifiedStorageWriterError; use revm::db::OriginalValuesKnown; @@ -40,29 +38,38 @@ enum StorageType { /// [`UnifiedStorageWriter`] is responsible for managing the writing to storage with both database /// and static file providers. #[derive(Debug)] -pub struct UnifiedStorageWriter<'a, TX, SF> { - database: &'a DatabaseProvider, - static_file: Option, +pub struct UnifiedStorageWriter<'a, ProviderDB, ProviderSF> { + database: &'a ProviderDB, + static_file: Option, } -impl<'a, TX, SF> UnifiedStorageWriter<'a, TX, SF> { +impl<'a, ProviderDB, ProviderSF> UnifiedStorageWriter<'a, ProviderDB, ProviderSF> { /// Creates a new instance of [`UnifiedStorageWriter`]. /// /// # Parameters /// - `database`: An optional reference to a database provider. /// - `static_file`: An optional mutable reference to a static file instance. - pub const fn new(database: &'a DatabaseProvider, static_file: Option) -> Self { - Self { database, static_file } + pub fn new

(database: &'a P, static_file: Option) -> Self + where + P: AsRef, + { + Self { database: database.as_ref(), static_file } } /// Creates a new instance of [`UnifiedStorageWriter`] from a database provider and a static /// file instance. - pub const fn from(database: &'a DatabaseProvider, static_file: SF) -> Self { + pub fn from

(database: &'a P, static_file: ProviderSF) -> Self + where + P: AsRef, + { Self::new(database, Some(static_file)) } /// Creates a new instance of [`UnifiedStorageWriter`] from a database provider. - pub const fn from_database(database: &'a DatabaseProvider) -> Self { + pub fn from_database

(database: &'a P) -> Self + where + P: AsRef, + { Self::new(database, None) } @@ -70,7 +77,7 @@ impl<'a, TX, SF> UnifiedStorageWriter<'a, TX, SF> { /// /// # Panics /// If the database provider is not set. - const fn database(&self) -> &DatabaseProvider { + const fn database(&self) -> &ProviderDB { self.database } @@ -78,7 +85,7 @@ impl<'a, TX, SF> UnifiedStorageWriter<'a, TX, SF> { /// /// # Panics /// If the static file instance is not set. - fn static_file(&self) -> &SF { + fn static_file(&self) -> &ProviderSF { self.static_file.as_ref().expect("should exist") } @@ -86,7 +93,7 @@ impl<'a, TX, SF> UnifiedStorageWriter<'a, TX, SF> { /// /// # Panics /// If the static file instance is not set. - fn static_file_mut(&mut self) -> &mut SF { + fn static_file_mut(&mut self) -> &mut ProviderSF { self.static_file.as_mut().expect("should exist") } @@ -113,12 +120,15 @@ impl UnifiedStorageWriter<'_, (), ()> { /// start-up. /// /// NOTE: If unwinding data from storage, use `commit_unwind` instead! - pub fn commit( - database: DatabaseProviderRW, + pub fn commit

( + database: impl Into

+ AsRef

, static_file: StaticFileProvider, - ) -> ProviderResult<()> { + ) -> ProviderResult<()> + where + P: DBProvider, + { static_file.commit()?; - database.commit()?; + database.into().into_tx().commit()?; Ok(()) } @@ -130,19 +140,30 @@ impl UnifiedStorageWriter<'_, (), ()> { /// checkpoints on the next start-up. /// /// NOTE: Should only be used after unwinding data from storage! - pub fn commit_unwind( - database: DatabaseProviderRW, + pub fn commit_unwind

( + database: impl Into

+ AsRef

, static_file: StaticFileProvider, - ) -> ProviderResult<()> { - database.commit()?; + ) -> ProviderResult<()> + where + P: DBProvider, + { + database.into().into_tx().commit()?; static_file.commit()?; Ok(()) } } -impl<'a, 'b, TX> UnifiedStorageWriter<'a, TX, &'b StaticFileProvider> +impl<'a, 'b, ProviderDB> UnifiedStorageWriter<'a, ProviderDB, &'b StaticFileProvider> where - TX: DbTxMut + DbTx, + ProviderDB: DBProvider + + BlockWriter + + TransactionsProviderExt + + StateChangeWriter + + TrieWriter + + HistoryWriter + + StageCheckpointWriter + + BlockExecutionWriter + + AsRef, { /// Writes executed blocks and receipts to storage. pub fn save_blocks(&self, blocks: &[ExecutedBlock]) -> ProviderResult<()> { @@ -272,16 +293,23 @@ where // Get the total txs for the block range, so we have the correct number of columns for // receipts and transactions + // IMPORTANT: we use `block_number+1` to make sure we remove only what is ABOVE the block let tx_range = self .database() - .transaction_range_by_block_range(block_number..=highest_static_file_block)?; + .transaction_range_by_block_range(block_number + 1..=highest_static_file_block)?; let total_txs = tx_range.end().saturating_sub(*tx_range.start()); + // IMPORTANT: we use `block_number+1` to make sure we remove only what is ABOVE the block debug!(target: "provider::storage_writer", ?block_number, "Removing blocks from database above block_number"); self.database().remove_block_and_execution_range( - block_number..=self.database().last_block_number()?, + block_number + 1..=self.database().last_block_number()?, )?; + // IMPORTANT: we use `highest_static_file_block.saturating_sub(block_number)` to make sure + // we remove only what is ABOVE the block. + // + // i.e., if the highest static file block is 8, we want to remove above block 5 only, we + // will have three blocks to remove, which will be block 8, 7, and 6. debug!(target: "provider::storage_writer", ?block_number, "Removing static file blocks above block_number"); self.static_file() .get_writer(block_number, StaticFileSegment::Headers)? @@ -301,9 +329,9 @@ where } } -impl<'a, 'b, TX> UnifiedStorageWriter<'a, TX, StaticFileProviderRWRefMut<'b>> +impl<'a, 'b, ProviderDB> UnifiedStorageWriter<'a, ProviderDB, StaticFileProviderRWRefMut<'b>> where - TX: DbTx, + ProviderDB: DBProvider + HeaderProvider, { /// Ensures that the static file writer is set and of the right [`StaticFileSegment`] variant. /// @@ -435,9 +463,9 @@ where } } -impl<'a, 'b, TX> UnifiedStorageWriter<'a, TX, StaticFileProviderRWRefMut<'b>> +impl<'a, 'b, ProviderDB> UnifiedStorageWriter<'a, ProviderDB, StaticFileProviderRWRefMut<'b>> where - TX: DbTxMut + DbTx, + ProviderDB: DBProvider + HeaderProvider, { /// Appends receipts block by block. /// @@ -527,9 +555,10 @@ where } } -impl<'a, 'b, TX> StateWriter for UnifiedStorageWriter<'a, TX, StaticFileProviderRWRefMut<'b>> +impl<'a, 'b, ProviderDB> StateWriter + for UnifiedStorageWriter<'a, ProviderDB, StaticFileProviderRWRefMut<'b>> where - TX: DbTxMut + DbTx, + ProviderDB: DBProvider + StateChangeWriter + HeaderProvider, { /// Write the data and receipts to the database or static files if `static_file_producer` is /// `Some`. It should be `None` if there is any kind of pruning/filtering over the receipts. @@ -561,15 +590,14 @@ mod tests { use crate::{ test_utils::create_test_provider_factory, AccountReader, StorageTrieWriter, TrieWriter, }; + use alloy_primitives::{keccak256, B256, U256}; use reth_db::tables; use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO}, models::{AccountBeforeTx, BlockNumberAddress}, transaction::{DbTx, DbTxMut}, }; - use reth_primitives::{ - keccak256, Account, Address, Receipt, Receipts, StorageEntry, B256, U256, - }; + use reth_primitives::{Account, Address, Receipt, Receipts, StorageEntry}; use reth_trie::{ test_utils::{state_root, storage_root_prehashed}, HashedPostState, HashedStorage, StateRoot, StorageRoot, diff --git a/crates/storage/provider/src/writer/static_file.rs b/crates/storage/provider/src/writer/static_file.rs index 11c46d52e9..aca226ca9b 100644 --- a/crates/storage/provider/src/writer/static_file.rs +++ b/crates/storage/provider/src/writer/static_file.rs @@ -1,6 +1,7 @@ use crate::providers::StaticFileProviderRWRefMut; +use alloy_primitives::{BlockNumber, TxNumber}; use reth_errors::ProviderResult; -use reth_primitives::{BlockNumber, Receipt, TxNumber}; +use reth_primitives::Receipt; use reth_storage_api::ReceiptWriter; pub(crate) struct StaticFileWriter<'a, W>(pub(crate) &'a mut W); diff --git a/crates/storage/storage-api/Cargo.toml b/crates/storage/storage-api/Cargo.toml index 474ca93a7f..ce043213c9 100644 --- a/crates/storage/storage-api/Cargo.toml +++ b/crates/storage/storage-api/Cargo.toml @@ -15,6 +15,7 @@ workspace = true # reth reth-chainspec.workspace = true reth-db-models.workspace = true +reth-db-api.workspace = true reth-execution-types.workspace = true reth-primitives.workspace = true reth-prune-types.workspace = true @@ -22,4 +23,7 @@ reth-stages-types.workspace = true reth-storage-errors.workspace = true reth-trie.workspace = true +# alloy +alloy-primitives.workspace = true + auto_impl.workspace = true diff --git a/crates/storage/storage-api/src/account.rs b/crates/storage/storage-api/src/account.rs index 8f77894ec6..e082980311 100644 --- a/crates/storage/storage-api/src/account.rs +++ b/crates/storage/storage-api/src/account.rs @@ -1,6 +1,7 @@ +use alloy_primitives::{Address, BlockNumber}; use auto_impl::auto_impl; use reth_db_models::AccountBeforeTx; -use reth_primitives::{Account, Address, BlockNumber}; +use reth_primitives::Account; use reth_storage_errors::provider::ProviderResult; use std::{ collections::{BTreeMap, BTreeSet}, diff --git a/crates/storage/storage-api/src/block.rs b/crates/storage/storage-api/src/block.rs index 24b263a83a..3291b5cac6 100644 --- a/crates/storage/storage-api/src/block.rs +++ b/crates/storage/storage-api/src/block.rs @@ -3,10 +3,11 @@ use crate::{ RequestsProvider, SidecarsProvider, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; +use alloy_primitives::{BlockNumber, B256}; use reth_db_models::StoredBlockBodyIndices; use reth_primitives::{ - Block, BlockHashOrNumber, BlockId, BlockNumber, BlockNumberOrTag, BlockWithSenders, Header, - Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, B256, + Block, BlockHashOrNumber, BlockId, BlockNumberOrTag, BlockWithSenders, Header, Receipt, + SealedBlock, SealedBlockWithSenders, SealedHeader, }; use reth_storage_errors::provider::ProviderResult; use std::ops::RangeInclusive; diff --git a/crates/storage/storage-api/src/block_hash.rs b/crates/storage/storage-api/src/block_hash.rs index bb6ff5a3a0..84b8c2380a 100644 --- a/crates/storage/storage-api/src/block_hash.rs +++ b/crates/storage/storage-api/src/block_hash.rs @@ -1,4 +1,5 @@ -use reth_primitives::{BlockHashOrNumber, BlockNumber, B256}; +use alloy_primitives::{BlockNumber, B256}; +use reth_primitives::BlockHashOrNumber; use reth_storage_errors::provider::ProviderResult; /// Client trait for fetching block hashes by number. diff --git a/crates/storage/storage-api/src/block_id.rs b/crates/storage/storage-api/src/block_id.rs index fbbf6fce55..b96da25ade 100644 --- a/crates/storage/storage-api/src/block_id.rs +++ b/crates/storage/storage-api/src/block_id.rs @@ -1,6 +1,7 @@ use crate::BlockHashReader; +use alloy_primitives::{BlockNumber, B256}; use reth_chainspec::ChainInfo; -use reth_primitives::{BlockHashOrNumber, BlockId, BlockNumber, BlockNumberOrTag, B256}; +use reth_primitives::{BlockHashOrNumber, BlockId, BlockNumberOrTag}; use reth_storage_errors::provider::{ProviderError, ProviderResult}; /// Client trait for getting important block numbers (such as the latest block number), converting diff --git a/crates/storage/storage-api/src/database_provider.rs b/crates/storage/storage-api/src/database_provider.rs new file mode 100644 index 0000000000..6a463ed01e --- /dev/null +++ b/crates/storage/storage-api/src/database_provider.rs @@ -0,0 +1,56 @@ +use reth_db_api::{database::Database, transaction::DbTx}; +use reth_prune_types::PruneModes; +use reth_storage_errors::provider::ProviderResult; + +/// Database provider. +pub trait DBProvider: Send + Sync + Sized + 'static { + /// Underlying database transaction held by the provider. + type Tx: DbTx; + + /// Returns a reference to the underlying transaction. + fn tx_ref(&self) -> &Self::Tx; + + /// Returns a mutable reference to the underlying transaction. + fn tx_mut(&mut self) -> &mut Self::Tx; + + /// Consumes the provider and returns the underlying transaction. + fn into_tx(self) -> Self::Tx; + + /// Disables long-lived read transaction safety guarantees for leaks prevention and + /// observability improvements. + /// + /// CAUTION: In most of the cases, you want the safety guarantees for long read transactions + /// enabled. Use this only if you're sure that no write transaction is open in parallel, meaning + /// that Reth as a node is offline and not progressing. + fn disable_long_read_transaction_safety(mut self) -> Self { + self.tx_mut().disable_long_read_transaction_safety(); + self + } + + /// Commit database transaction + fn commit(self) -> ProviderResult { + Ok(self.into_tx().commit()?) + } + + /// Returns a reference to prune modes. + fn prune_modes_ref(&self) -> &PruneModes; +} + +/// Database provider factory. +#[auto_impl::auto_impl(&, Arc)] +pub trait DatabaseProviderFactory: Send + Sync { + /// Database this factory produces providers for. + type DB: Database; + + /// Provider type returned by the factory. + type Provider: DBProvider::TX>; + + /// Read-write provider type returned by the factory. + type ProviderRW: DBProvider::TXMut>; + + /// Create new read-only database provider. + fn database_provider_ro(&self) -> ProviderResult; + + /// Create new read-write database provider. + fn database_provider_rw(&self) -> ProviderResult; +} diff --git a/crates/storage/storage-api/src/header.rs b/crates/storage/storage-api/src/header.rs index 433c907710..cd7e2a49fe 100644 --- a/crates/storage/storage-api/src/header.rs +++ b/crates/storage/storage-api/src/header.rs @@ -1,4 +1,5 @@ -use reth_primitives::{BlockHash, BlockHashOrNumber, BlockNumber, Header, SealedHeader, U256}; +use alloy_primitives::{BlockHash, BlockNumber, U256}; +use reth_primitives::{BlockHashOrNumber, Header, SealedHeader}; use reth_storage_errors::provider::ProviderResult; use std::ops::RangeBounds; diff --git a/crates/storage/storage-api/src/lib.rs b/crates/storage/storage-api/src/lib.rs index 57b0dbe0fc..ab98df1118 100644 --- a/crates/storage/storage-api/src/lib.rs +++ b/crates/storage/storage-api/src/lib.rs @@ -55,4 +55,7 @@ pub use withdrawals::*; mod sidecars; pub use sidecars::*; +mod database_provider; +pub use database_provider::*; + pub mod noop; diff --git a/crates/storage/storage-api/src/noop.rs b/crates/storage/storage-api/src/noop.rs index a55371f3c3..c3c33ac379 100644 --- a/crates/storage/storage-api/src/noop.rs +++ b/crates/storage/storage-api/src/noop.rs @@ -1,8 +1,8 @@ //! Various noop implementations for traits. use crate::{BlockHashReader, BlockNumReader}; +use alloy_primitives::{BlockNumber, B256}; use reth_chainspec::ChainInfo; -use reth_primitives::{BlockNumber, B256}; use reth_storage_errors::provider::ProviderResult; /// Supports various api interfaces for testing purposes. diff --git a/crates/storage/storage-api/src/receipts.rs b/crates/storage/storage-api/src/receipts.rs index ec28d81435..7329d728c3 100644 --- a/crates/storage/storage-api/src/receipts.rs +++ b/crates/storage/storage-api/src/receipts.rs @@ -1,7 +1,6 @@ use crate::BlockIdReader; -use reth_primitives::{ - BlockHashOrNumber, BlockId, BlockNumber, BlockNumberOrTag, Receipt, TxHash, TxNumber, -}; +use alloy_primitives::{BlockNumber, TxHash, TxNumber}; +use reth_primitives::{BlockHashOrNumber, BlockId, BlockNumberOrTag, Receipt}; use reth_storage_errors::provider::ProviderResult; use std::ops::RangeBounds; diff --git a/crates/storage/storage-api/src/stage_checkpoint.rs b/crates/storage/storage-api/src/stage_checkpoint.rs index d59f3dfb25..90ad9eadba 100644 --- a/crates/storage/storage-api/src/stage_checkpoint.rs +++ b/crates/storage/storage-api/src/stage_checkpoint.rs @@ -1,4 +1,4 @@ -use reth_primitives::BlockNumber; +use alloy_primitives::BlockNumber; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_errors::provider::ProviderResult; diff --git a/crates/storage/storage-api/src/state.rs b/crates/storage/storage-api/src/state.rs index adf0601eb9..16ad938b40 100644 --- a/crates/storage/storage-api/src/state.rs +++ b/crates/storage/storage-api/src/state.rs @@ -2,12 +2,10 @@ use super::{ AccountReader, BlockHashReader, BlockIdReader, StateProofProvider, StateRootProvider, StorageRootProvider, }; +use alloy_primitives::{Address, BlockHash, BlockNumber, StorageKey, StorageValue, B256, U256}; use auto_impl::auto_impl; use reth_execution_types::ExecutionOutcome; -use reth_primitives::{ - Address, BlockHash, BlockId, BlockNumHash, BlockNumber, BlockNumberOrTag, Bytecode, StorageKey, - StorageValue, B256, KECCAK_EMPTY, U256, -}; +use reth_primitives::{BlockId, BlockNumHash, BlockNumberOrTag, Bytecode, KECCAK_EMPTY}; use reth_storage_errors::provider::{ProviderError, ProviderResult}; /// Type alias of boxed [`StateProvider`]. @@ -82,6 +80,15 @@ pub trait StateProvider: } } +/// Trait implemented for database providers that can be converted into a historical state provider. +pub trait TryIntoHistoricalStateProvider { + /// Returns a historical [`StateProvider`] indexed by the given historic block number. + fn try_into_history_at_block( + self, + block_number: BlockNumber, + ) -> ProviderResult; +} + /// Light wrapper that returns `StateProvider` implementations that correspond to the given /// `BlockNumber`, the latest state, or the pending state. /// @@ -140,7 +147,7 @@ pub trait StateProviderFactory: BlockIdReader + Send + Sync { /// Note: this only looks at historical blocks, not pending blocks. fn history_by_block_hash(&self, block: BlockHash) -> ProviderResult; - /// Returns _any_[StateProvider] with matching block hash. + /// Returns _any_ [StateProvider] with matching block hash. /// /// This will return a [StateProvider] for either a historical or pending block. fn state_by_block_hash(&self, block: BlockHash) -> ProviderResult; diff --git a/crates/storage/storage-api/src/storage.rs b/crates/storage/storage-api/src/storage.rs index a45df9d72f..91d0bc8c73 100644 --- a/crates/storage/storage-api/src/storage.rs +++ b/crates/storage/storage-api/src/storage.rs @@ -1,4 +1,5 @@ -use reth_primitives::{Address, BlockNumber, StorageEntry, B256}; +use alloy_primitives::{Address, BlockNumber, B256}; +use reth_primitives::StorageEntry; use reth_storage_errors::provider::ProviderResult; use std::{ collections::{BTreeMap, BTreeSet}, diff --git a/crates/storage/storage-api/src/transactions.rs b/crates/storage/storage-api/src/transactions.rs index 0553ef787f..f21365b0d3 100644 --- a/crates/storage/storage-api/src/transactions.rs +++ b/crates/storage/storage-api/src/transactions.rs @@ -1,7 +1,7 @@ use crate::{BlockNumReader, BlockReader}; +use alloy_primitives::{Address, BlockNumber, TxHash, TxNumber}; use reth_primitives::{ - Address, BlockHashOrNumber, BlockNumber, TransactionMeta, TransactionSigned, - TransactionSignedNoHash, TxHash, TxNumber, + BlockHashOrNumber, TransactionMeta, TransactionSigned, TransactionSignedNoHash, }; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::ops::{Range, RangeBounds, RangeInclusive}; diff --git a/crates/storage/storage-api/src/trie.rs b/crates/storage/storage-api/src/trie.rs index 5be0b161bc..e41a15e107 100644 --- a/crates/storage/storage-api/src/trie.rs +++ b/crates/storage/storage-api/src/trie.rs @@ -1,10 +1,9 @@ -use reth_primitives::{Address, Bytes, B256}; +use alloy_primitives::{Address, Bytes, B256}; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ - prefix_set::TriePrefixSetsMut, updates::TrieUpdates, AccountProof, HashedPostState, - HashedStorage, + updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, TrieInput, }; -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; /// A type that can compute the state root of a given post state. #[auto_impl::auto_impl(&, Box, Arc)] @@ -21,12 +20,7 @@ pub trait StateRootProvider: Send + Sync { /// Returns the state root of the `HashedPostState` on top of the current state but re-uses the /// intermediate nodes to speed up the computation. It's up to the caller to construct the /// prefix sets and inform the provider of the trie paths that have changes. - fn state_root_from_nodes( - &self, - nodes: TrieUpdates, - hashed_state: HashedPostState, - prefix_sets: TriePrefixSetsMut, - ) -> ProviderResult; + fn state_root_from_nodes(&self, input: TrieInput) -> ProviderResult; /// Returns the state root of the `HashedPostState` on top of the current state with trie /// updates to be committed to the database. @@ -39,9 +33,7 @@ pub trait StateRootProvider: Send + Sync { /// See [`StateRootProvider::state_root_from_nodes`] for more info. fn state_root_from_nodes_with_updates( &self, - nodes: TrieUpdates, - hashed_state: HashedPostState, - prefix_sets: TriePrefixSetsMut, + input: TrieInput, ) -> ProviderResult<(B256, TrieUpdates)>; } @@ -61,15 +53,23 @@ pub trait StateProofProvider: Send + Sync { /// on top of the current state. fn proof( &self, - hashed_state: HashedPostState, + input: TrieInput, address: Address, slots: &[B256], ) -> ProviderResult; + /// Generate [`MultiProof`] for target hashed account and corresponding + /// hashed storage slot keys. + fn multiproof( + &self, + input: TrieInput, + targets: HashMap>, + ) -> ProviderResult; + /// Get trie witness for provided state. fn witness( &self, - overlay: HashedPostState, + input: TrieInput, target: HashedPostState, ) -> ProviderResult>; } diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index 8b8ac34ddd..a0720c037a 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -24,6 +24,8 @@ reth-tasks.workspace = true revm.workspace = true # ethereum +alloy-eips.workspace = true +alloy-primitives.workspace = true alloy-rlp.workspace = true # async/futures diff --git a/crates/transaction-pool/benches/truncate.rs b/crates/transaction-pool/benches/truncate.rs index 0df1337fd4..22e4576305 100644 --- a/crates/transaction-pool/benches/truncate.rs +++ b/crates/transaction-pool/benches/truncate.rs @@ -1,4 +1,5 @@ #![allow(missing_docs)] +use alloy_primitives::{hex_literal::hex, Address}; use criterion::{ criterion_group, criterion_main, measurement::WallTime, BenchmarkGroup, Criterion, }; @@ -8,7 +9,6 @@ use proptest::{ strategy::ValueTree, test_runner::{RngAlgorithm, TestRng, TestRunner}, }; -use reth_primitives::{hex_literal::hex, Address}; use reth_transaction_pool::{ pool::{BasefeeOrd, ParkedPool, PendingPool, QueuedOrd}, test_utils::{MockOrdering, MockTransaction, MockTransactionFactory}, diff --git a/crates/transaction-pool/src/blobstore/disk.rs b/crates/transaction-pool/src/blobstore/disk.rs index e2aa05841f..96119a0f81 100644 --- a/crates/transaction-pool/src/blobstore/disk.rs +++ b/crates/transaction-pool/src/blobstore/disk.rs @@ -1,9 +1,11 @@ //! A simple diskstore for blobs use crate::blobstore::{BlobStore, BlobStoreCleanupStat, BlobStoreError, BlobStoreSize}; +use alloy_eips::eip4844::BlobAndProofV1; +use alloy_primitives::{TxHash, B256}; use alloy_rlp::{Decodable, Encodable}; use parking_lot::{Mutex, RwLock}; -use reth_primitives::{BlobTransactionSidecar, TxHash, B256}; +use reth_primitives::BlobTransactionSidecar; use schnellru::{ByLength, LruMap}; use std::{collections::HashSet, fmt, fs, io, path::PathBuf, sync::Arc}; use tracing::{debug, trace}; @@ -127,6 +129,31 @@ impl BlobStore for DiskFileBlobStore { self.inner.get_exact(txs) } + fn get_by_versioned_hashes( + &self, + versioned_hashes: &[B256], + ) -> Result>, BlobStoreError> { + let mut result = vec![None; versioned_hashes.len()]; + for (_tx_hash, blob_sidecar) in self.inner.blob_cache.lock().iter() { + for (i, blob_versioned_hash) in blob_sidecar.versioned_hashes().enumerate() { + for (j, target_versioned_hash) in versioned_hashes.iter().enumerate() { + if blob_versioned_hash == *target_versioned_hash { + result[j].get_or_insert_with(|| BlobAndProofV1 { + blob: Box::new(blob_sidecar.blobs[i]), + proof: blob_sidecar.proofs[i], + }); + } + } + } + + // Return early if all blobs are found. + if result.iter().all(|blob| blob.is_some()) { + break; + } + } + Ok(result) + } + fn data_size_hint(&self) -> Option { Some(self.inner.size_tracker.data_size()) } diff --git a/crates/transaction-pool/src/blobstore/mem.rs b/crates/transaction-pool/src/blobstore/mem.rs index 326cd987a9..15160c2c3f 100644 --- a/crates/transaction-pool/src/blobstore/mem.rs +++ b/crates/transaction-pool/src/blobstore/mem.rs @@ -1,8 +1,9 @@ use crate::blobstore::{ BlobStore, BlobStoreCleanupStat, BlobStoreError, BlobStoreSize, BlobTransactionSidecar, }; +use alloy_eips::eip4844::BlobAndProofV1; +use alloy_primitives::B256; use parking_lot::RwLock; -use reth_primitives::B256; use std::{collections::HashMap, sync::Arc}; /// An in-memory blob store. @@ -113,6 +114,31 @@ impl BlobStore for InMemoryBlobStore { Ok(items) } + fn get_by_versioned_hashes( + &self, + versioned_hashes: &[B256], + ) -> Result>, BlobStoreError> { + let mut result = vec![None; versioned_hashes.len()]; + for (_tx_hash, blob_sidecar) in self.inner.store.read().iter() { + for (i, blob_versioned_hash) in blob_sidecar.versioned_hashes().enumerate() { + for (j, target_versioned_hash) in versioned_hashes.iter().enumerate() { + if blob_versioned_hash == *target_versioned_hash { + result[j].get_or_insert_with(|| BlobAndProofV1 { + blob: Box::new(blob_sidecar.blobs[i]), + proof: blob_sidecar.proofs[i], + }); + } + } + } + + // Return early if all blobs are found. + if result.iter().all(|blob| blob.is_some()) { + break; + } + } + Ok(result) + } + fn data_size_hint(&self) -> Option { Some(self.inner.size_tracker.data_size()) } diff --git a/crates/transaction-pool/src/blobstore/mod.rs b/crates/transaction-pool/src/blobstore/mod.rs index bba4b85336..d127b3e8e6 100644 --- a/crates/transaction-pool/src/blobstore/mod.rs +++ b/crates/transaction-pool/src/blobstore/mod.rs @@ -1,9 +1,11 @@ //! Storage for blob data of EIP4844 transactions. +use alloy_eips::eip4844::BlobAndProofV1; +use alloy_primitives::B256; pub use disk::{DiskFileBlobStore, DiskFileBlobStoreConfig, OpenDiskFileBlobStore}; pub use mem::InMemoryBlobStore; pub use noop::NoopBlobStore; -use reth_primitives::{BlobTransactionSidecar, B256}; +use reth_primitives::BlobTransactionSidecar; use std::{ fmt, sync::atomic::{AtomicUsize, Ordering}, @@ -64,6 +66,12 @@ pub trait BlobStore: fmt::Debug + Send + Sync + 'static { /// Returns an error if any of the blobs are not found in the blob store. fn get_exact(&self, txs: Vec) -> Result, BlobStoreError>; + /// Return the [`BlobTransactionSidecar`]s for a list of blob versioned hashes. + fn get_by_versioned_hashes( + &self, + versioned_hashes: &[B256], + ) -> Result>, BlobStoreError>; + /// Data size of all transactions in the blob store. fn data_size_hint(&self) -> Option; diff --git a/crates/transaction-pool/src/blobstore/noop.rs b/crates/transaction-pool/src/blobstore/noop.rs index ef9773daba..0e99858bd6 100644 --- a/crates/transaction-pool/src/blobstore/noop.rs +++ b/crates/transaction-pool/src/blobstore/noop.rs @@ -1,5 +1,6 @@ use crate::blobstore::{BlobStore, BlobStoreCleanupStat, BlobStoreError, BlobTransactionSidecar}; -use reth_primitives::B256; +use alloy_eips::eip4844::BlobAndProofV1; +use alloy_primitives::B256; /// A blobstore implementation that does nothing #[derive(Clone, Copy, Debug, PartialOrd, PartialEq, Eq, Default)] @@ -49,6 +50,13 @@ impl BlobStore for NoopBlobStore { Err(BlobStoreError::MissingSidecar(txs[0])) } + fn get_by_versioned_hashes( + &self, + versioned_hashes: &[B256], + ) -> Result>, BlobStoreError> { + Ok(vec![None; versioned_hashes.len()]) + } + fn data_size_hint(&self) -> Option { Some(0) } diff --git a/crates/transaction-pool/src/blobstore/tracker.rs b/crates/transaction-pool/src/blobstore/tracker.rs index f121dfed2d..2383167086 100644 --- a/crates/transaction-pool/src/blobstore/tracker.rs +++ b/crates/transaction-pool/src/blobstore/tracker.rs @@ -1,7 +1,7 @@ //! Support for maintaining the blob pool. +use alloy_primitives::{BlockNumber, B256}; use reth_execution_types::ChainBlocks; -use reth_primitives::{BlockNumber, B256}; use std::collections::BTreeMap; /// The type that is used to track canonical blob transactions. diff --git a/crates/transaction-pool/src/config.rs b/crates/transaction-pool/src/config.rs index e95fef6726..b5063f5b37 100644 --- a/crates/transaction-pool/src/config.rs +++ b/crates/transaction-pool/src/config.rs @@ -2,7 +2,11 @@ use crate::{ pool::{NEW_TX_LISTENER_BUFFER_SIZE, PENDING_TX_LISTENER_BUFFER_SIZE}, PoolSize, TransactionOrigin, }; -use reth_primitives::{Address, EIP4844_TX_TYPE_ID}; +use alloy_primitives::Address; +use reth_primitives::{ + constants::{ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE}, + EIP4844_TX_TYPE_ID, +}; use std::collections::HashSet; /// Guarantees max transactions for one sender, compatible with geth/erigon pub const TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER: usize = 16; @@ -39,8 +43,12 @@ pub struct PoolConfig { pub max_account_slots: usize, /// Price bump (in %) for the transaction pool underpriced check. pub price_bumps: PriceBumpConfig, + /// Minimum base fee required by the protocol. + pub minimal_protocol_basefee: u64, + /// The max gas limit for transactions in the pool + pub gas_limit: u64, /// How to handle locally received transactions: - /// [`TransactionOrigin::Local`](crate::TransactionOrigin). + /// [`TransactionOrigin::Local`](TransactionOrigin). pub local_transactions_config: LocalTransactionConfig, /// Bound on number of pending transactions from `reth_network::TransactionsManager` to buffer. pub pending_tx_listener_buffer_size: usize, @@ -49,7 +57,7 @@ pub struct PoolConfig { } impl PoolConfig { - /// Returns whether or not the size and amount constraints in any sub-pools are exceeded. + /// Returns whether the size and amount constraints in any sub-pools are exceeded. #[inline] pub const fn is_exceeded(&self, pool_size: PoolSize) -> bool { self.blob_limit.is_exceeded(pool_size.blob, pool_size.blob_size) || @@ -68,6 +76,8 @@ impl Default for PoolConfig { blob_limit: Default::default(), max_account_slots: TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER, price_bumps: Default::default(), + minimal_protocol_basefee: MIN_PROTOCOL_BASE_FEE, + gas_limit: ETHEREUM_BLOCK_GAS_LIMIT, local_transactions_config: Default::default(), pending_tx_listener_buffer_size: PENDING_TX_LISTENER_BUFFER_SIZE, new_tx_listener_buffer_size: NEW_TX_LISTENER_BUFFER_SIZE, @@ -137,7 +147,7 @@ impl Default for PriceBumpConfig { } /// Configuration options for the locally received transactions: -/// [`TransactionOrigin::Local`](crate::TransactionOrigin) +/// [`TransactionOrigin::Local`](TransactionOrigin) #[derive(Debug, Clone, Eq, PartialEq)] pub struct LocalTransactionConfig { /// Apply no exemptions to the locally received transactions. diff --git a/crates/transaction-pool/src/error.rs b/crates/transaction-pool/src/error.rs index 606b50559f..e5142e18a0 100644 --- a/crates/transaction-pool/src/error.rs +++ b/crates/transaction-pool/src/error.rs @@ -1,6 +1,7 @@ //! Transaction pool errors -use reth_primitives::{Address, BlobTransactionValidationError, InvalidTransactionError, TxHash}; +use alloy_primitives::{Address, TxHash, U256}; +use reth_primitives::{BlobTransactionValidationError, InvalidTransactionError}; /// Transaction pool result type. pub type PoolResult = Result; @@ -104,7 +105,7 @@ impl PoolError { } PoolErrorKind::FeeCapBelowMinimumProtocolFeeCap(_) => { // fee cap of the tx below the technical minimum determined by the protocol, see - // [MINIMUM_PROTOCOL_FEE_CAP](reth_primitives::constants::MIN_PROTOCOL_BASE_FEE) + // [MINIMUM_PROTOCOL_FEE_CAP](alloy_primitives::constants::MIN_PROTOCOL_BASE_FEE) // although this transaction will always be invalid, we do not want to penalize the // sender because this check simply could not be implemented by the client false @@ -168,6 +169,15 @@ pub enum Eip4844PoolTransactionError { Eip4844NonceGap, } +/// Represents all errors that can happen when validating transactions for the pool for EIP-7702 +/// transactions +#[derive(Debug, thiserror::Error)] +pub enum Eip7702PoolTransactionError { + /// Thrown if the transaction has no items in its authorization list + #[error("no items in authorization list for EIP7702 transaction")] + MissingEip7702AuthorizationList, +} + /// Represents errors that can happen when validating transactions for the pool /// /// See [`TransactionValidator`](crate::TransactionValidator). @@ -193,11 +203,19 @@ pub enum InvalidPoolTransactionError { #[error("transaction underpriced")] Underpriced, /// Thrown if the transaction's would require an account to be overdrawn - #[error("transaction overdraws from account")] - Overdraft, - /// Eip-4844 related errors + #[error("transaction overdraws from account, balance: {balance}, cost: {cost}")] + Overdraft { + /// Cost transaction is allowed to consume. See `reth_transaction_pool::PoolTransaction`. + cost: U256, + /// Balance of account. + balance: U256, + }, + /// EIP-4844 related errors #[error(transparent)] Eip4844(#[from] Eip4844PoolTransactionError), + /// EIP-7702 related errors + #[error(transparent)] + Eip7702(#[from] Eip7702PoolTransactionError), /// Any other error that occurred while inserting/validating that is transaction specific #[error(transparent)] Other(Box), @@ -225,7 +243,7 @@ impl InvalidPoolTransactionError { // intentionally caused by the sender match err { InvalidTransactionError::InsufficientFunds { .. } | - InvalidTransactionError::NonceNotConsistent => { + InvalidTransactionError::NonceNotConsistent { .. } => { // transaction could just have arrived late/early false } @@ -261,7 +279,7 @@ impl InvalidPoolTransactionError { false } Self::IntrinsicGasTooLow => true, - Self::Overdraft => false, + Self::Overdraft { .. } => false, Self::Other(err) => err.is_bad_transaction(), Self::Eip4844(eip4844_err) => { match eip4844_err { @@ -289,12 +307,15 @@ impl InvalidPoolTransactionError { } } } + Self::Eip7702(eip7702_err) => match eip7702_err { + Eip7702PoolTransactionError::MissingEip7702AuthorizationList => false, + }, } } /// Returns `true` if an import failed due to nonce gap. pub const fn is_nonce_gap(&self) -> bool { - matches!(self, Self::Consensus(InvalidTransactionError::NonceNotConsistent)) || + matches!(self, Self::Consensus(InvalidTransactionError::NonceNotConsistent { .. })) || matches!(self, Self::Eip4844(Eip4844PoolTransactionError::Eip4844NonceGap)) } } diff --git a/crates/transaction-pool/src/identifier.rs b/crates/transaction-pool/src/identifier.rs index 37a3450e06..97d4bda8d0 100644 --- a/crates/transaction-pool/src/identifier.rs +++ b/crates/transaction-pool/src/identifier.rs @@ -1,5 +1,5 @@ //! Identifier types for transactions and senders. -use reth_primitives::Address; +use alloy_primitives::Address; use rustc_hash::FxHashMap; use std::collections::HashMap; diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index e6997010b5..2af1a025c7 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -79,12 +79,12 @@ //! Listen for new transactions and print them: //! //! ``` -//! use reth_chainspec::{MAINNET, ChainSpecProvider}; -//! use reth_storage_api::{BlockReaderIdExt, StateProviderFactory}; +//! use reth_chainspec::MAINNET; +//! use reth_storage_api::StateProviderFactory; //! use reth_tasks::TokioTaskExecutor; //! use reth_transaction_pool::{TransactionValidationTaskExecutor, Pool, TransactionPool}; //! use reth_transaction_pool::blobstore::InMemoryBlobStore; -//! async fn t(client: C) where C: StateProviderFactory + BlockReaderIdExt + ChainSpecProvider + Clone + 'static{ +//! async fn t(client: C) where C: StateProviderFactory + Clone + 'static{ //! let blob_store = InMemoryBlobStore::default(); //! let pool = Pool::eth_pool( //! TransactionValidationTaskExecutor::eth(client, MAINNET.clone(), blob_store.clone(), TokioTaskExecutor::default()), @@ -151,10 +151,12 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] use crate::{identifier::TransactionId, pool::PoolInner}; +use alloy_eips::eip4844::BlobAndProofV1; +use alloy_primitives::{Address, TxHash, B256, U256}; use aquamarine as _; use reth_eth_wire_types::HandleMempoolData; use reth_execution_types::ChangedAccount; -use reth_primitives::{Address, BlobTransactionSidecar, PooledTransactionsElement, TxHash, U256}; +use reth_primitives::{BlobTransactionSidecar, PooledTransactionsElement}; use reth_storage_api::StateProviderFactory; use std::{collections::HashSet, sync::Arc}; use tokio::sync::mpsc::Receiver; @@ -278,7 +280,7 @@ where impl EthTransactionPool where - Client: StateProviderFactory + reth_storage_api::BlockReaderIdExt + Clone + 'static, + Client: StateProviderFactory + Clone + 'static, S: BlobStore, { /// Returns a new [`Pool`] that uses the default [`TransactionValidationTaskExecutor`] when @@ -288,12 +290,12 @@ where /// /// ``` /// use reth_chainspec::MAINNET; - /// use reth_storage_api::{BlockReaderIdExt, StateProviderFactory}; + /// use reth_storage_api::StateProviderFactory; /// use reth_tasks::TokioTaskExecutor; /// use reth_transaction_pool::{ /// blobstore::InMemoryBlobStore, Pool, TransactionValidationTaskExecutor, /// }; - /// # fn t(client: C) where C: StateProviderFactory + BlockReaderIdExt + Clone + 'static { + /// # fn t(client: C) where C: StateProviderFactory + Clone + 'static { /// let blob_store = InMemoryBlobStore::default(); /// let pool = Pool::eth_pool( /// TransactionValidationTaskExecutor::eth( @@ -525,6 +527,13 @@ where self.pool.blob_store().get_exact(tx_hashes) } + fn get_blobs_for_versioned_hashes( + &self, + versioned_hashes: &[B256], + ) -> Result>, BlobStoreError> { + self.pool.blob_store().get_by_versioned_hashes(versioned_hashes) + } + /// Returns all pending transactions filtered by [`TransactionOrigin`] fn get_pending_transactions_by_origin( &self, diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index e9db4fb4c8..255d81251a 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -7,6 +7,7 @@ use crate::{ traits::{CanonicalStateUpdate, TransactionPool, TransactionPoolExt}, BlockInfo, PoolTransaction, }; +use alloy_primitives::{Address, BlockHash, BlockNumber}; use futures_util::{ future::{BoxFuture, Fuse, FusedFuture}, FutureExt, Stream, StreamExt, @@ -16,8 +17,8 @@ use reth_chainspec::{ChainSpec, ChainSpecProvider}; use reth_execution_types::ChangedAccount; use reth_fs_util::FsPathError; use reth_primitives::{ - Address, BlockHash, BlockNumber, BlockNumberOrTag, IntoRecoveredTransaction, - PooledTransactionsElementEcRecovered, TransactionSigned, + BlockNumberOrTag, IntoRecoveredTransaction, PooledTransactionsElementEcRecovered, + TransactionSigned, }; use reth_storage_api::{errors::provider::ProviderError, BlockReaderIdExt, StateProviderFactory}; use reth_tasks::TaskSpawner; @@ -678,9 +679,10 @@ mod tests { blobstore::InMemoryBlobStore, validate::EthTransactionValidatorBuilder, CoinbaseTipOrdering, EthPooledTransaction, Pool, TransactionOrigin, }; + use alloy_primitives::{hex, U256}; use reth_chainspec::MAINNET; use reth_fs_util as fs; - use reth_primitives::{hex, PooledTransactionsElement, U256}; + use reth_primitives::PooledTransactionsElement; use reth_provider::test_utils::{ExtendedAccount, MockEthProvider}; use reth_tasks::TaskManager; diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index 4b66542694..9ca639427b 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -16,8 +16,10 @@ use crate::{ PooledTransactionsElement, PropagatedTransactions, TransactionEvents, TransactionOrigin, TransactionPool, TransactionValidationOutcome, TransactionValidator, ValidPoolTransaction, }; +use alloy_eips::eip4844::BlobAndProofV1; +use alloy_primitives::{Address, TxHash, B256, U256}; use reth_eth_wire_types::HandleMempoolData; -use reth_primitives::{Address, BlobTransactionSidecar, TxHash, U256}; +use reth_primitives::BlobTransactionSidecar; use std::{collections::HashSet, marker::PhantomData, sync::Arc}; use tokio::sync::{mpsc, mpsc::Receiver}; @@ -218,6 +220,13 @@ impl TransactionPool for NoopTransactionPool { vec![] } + fn get_pending_transactions_by_origin( + &self, + _origin: TransactionOrigin, + ) -> Vec>> { + vec![] + } + fn unique_senders(&self) -> HashSet

{ Default::default() } @@ -243,11 +252,11 @@ impl TransactionPool for NoopTransactionPool { Err(BlobStoreError::MissingSidecar(tx_hashes[0])) } - fn get_pending_transactions_by_origin( + fn get_blobs_for_versioned_hashes( &self, - _origin: TransactionOrigin, - ) -> Vec>> { - vec![] + versioned_hashes: &[B256], + ) -> Result>, BlobStoreError> { + Ok(vec![None; versioned_hashes.len()]) } } diff --git a/crates/transaction-pool/src/ordering.rs b/crates/transaction-pool/src/ordering.rs index 15b5accd9d..3381bb0279 100644 --- a/crates/transaction-pool/src/ordering.rs +++ b/crates/transaction-pool/src/ordering.rs @@ -1,5 +1,6 @@ use crate::traits::PoolTransaction; -use reth_primitives::{PooledTransactionsElementEcRecovered, TransactionSignedEcRecovered, U256}; +use alloy_primitives::U256; +use reth_primitives::{PooledTransactionsElementEcRecovered, TransactionSignedEcRecovered}; use std::{fmt, marker::PhantomData}; /// Priority of the transaction that can be missing. diff --git a/crates/transaction-pool/src/pool/best.rs b/crates/transaction-pool/src/pool/best.rs index 2bad18646a..5880a73f51 100644 --- a/crates/transaction-pool/src/pool/best.rs +++ b/crates/transaction-pool/src/pool/best.rs @@ -2,8 +2,8 @@ use crate::{ identifier::TransactionId, pool::pending::PendingTransaction, PoolTransaction, TransactionOrdering, ValidPoolTransaction, }; +use alloy_primitives::B256 as TxHash; use core::fmt; -use reth_primitives::B256 as TxHash; use std::{ collections::{BTreeMap, BTreeSet, HashSet}, sync::Arc, @@ -268,7 +268,7 @@ mod tests { test_utils::{MockOrdering, MockTransaction, MockTransactionFactory}, Priority, }; - use reth_primitives::U256; + use alloy_primitives::U256; #[test] fn test_best_iter() { diff --git a/crates/transaction-pool/src/pool/events.rs b/crates/transaction-pool/src/pool/events.rs index 7b17dcec50..96d1e1849f 100644 --- a/crates/transaction-pool/src/pool/events.rs +++ b/crates/transaction-pool/src/pool/events.rs @@ -1,5 +1,5 @@ use crate::{traits::PropagateKind, PoolTransaction, ValidPoolTransaction}; -use reth_primitives::{TxHash, B256}; +use alloy_primitives::{TxHash, B256}; use std::sync::Arc; #[cfg(feature = "serde")] diff --git a/crates/transaction-pool/src/pool/listener.rs b/crates/transaction-pool/src/pool/listener.rs index b9d6c46b5f..cf6c980f59 100644 --- a/crates/transaction-pool/src/pool/listener.rs +++ b/crates/transaction-pool/src/pool/listener.rs @@ -5,8 +5,8 @@ use crate::{ traits::PropagateKind, PoolTransaction, ValidPoolTransaction, }; +use alloy_primitives::{TxHash, B256}; use futures_util::Stream; -use reth_primitives::{TxHash, B256}; use std::{ collections::{hash_map::Entry, HashMap}, pin::Pin, diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index fefe9ff6bb..cfe38ea31d 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -80,13 +80,15 @@ use crate::{ validate::{TransactionValidationOutcome, ValidPoolTransaction}, CanonicalStateUpdate, PoolConfig, TransactionOrdering, TransactionValidator, }; +use alloy_primitives::{Address, TxHash, B256}; use best::BestTransactions; use parking_lot::{Mutex, RwLock, RwLockReadGuard}; use reth_eth_wire_types::HandleMempoolData; use reth_execution_types::ChangedAccount; + use reth_primitives::{ - Address, BlobTransaction, BlobTransactionSidecar, IntoRecoveredTransaction, - PooledTransactionsElement, TransactionSigned, TxHash, B256, + BlobTransaction, BlobTransactionSidecar, IntoRecoveredTransaction, PooledTransactionsElement, + TransactionSigned, }; use std::{ collections::{HashMap, HashSet}, diff --git a/crates/transaction-pool/src/pool/parked.rs b/crates/transaction-pool/src/pool/parked.rs index 8ee7e13eca..b591fdb539 100644 --- a/crates/transaction-pool/src/pool/parked.rs +++ b/crates/transaction-pool/src/pool/parked.rs @@ -520,7 +520,8 @@ impl Ord for QueuedOrd { mod tests { use super::*; use crate::test_utils::{MockTransaction, MockTransactionFactory, MockTransactionSet}; - use reth_primitives::{address, TxType}; + use alloy_primitives::address; + use reth_primitives::TxType; use std::collections::HashSet; #[test] diff --git a/crates/transaction-pool/src/pool/pending.rs b/crates/transaction-pool/src/pool/pending.rs index 1e1ee63134..ff3ecf65a4 100644 --- a/crates/transaction-pool/src/pool/pending.rs +++ b/crates/transaction-pool/src/pool/pending.rs @@ -597,7 +597,8 @@ mod tests { test_utils::{MockOrdering, MockTransaction, MockTransactionFactory, MockTransactionSet}, PoolTransaction, }; - use reth_primitives::{address, TxType}; + use alloy_primitives::address; + use reth_primitives::TxType; use std::collections::HashSet; #[test] diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index f29d220fe4..a20ff46993 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -18,11 +18,9 @@ use crate::{ PoolConfig, PoolResult, PoolTransaction, PriceBumpConfig, TransactionOrdering, ValidPoolTransaction, U256, }; -use reth_primitives::{ - constants::{ - eip4844::BLOB_TX_MIN_BLOB_GASPRICE, ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE, - }, - Address, TxHash, B256, +use alloy_primitives::{Address, TxHash, B256}; +use reth_primitives::constants::{ + eip4844::BLOB_TX_MIN_BLOB_GASPRICE, ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE, }; use rustc_hash::FxHashMap; use smallvec::SmallVec; @@ -555,7 +553,10 @@ impl TxPool { )), InsertErr::Overdraft { transaction } => Err(PoolError::new( *transaction.hash(), - PoolErrorKind::InvalidTransaction(InvalidPoolTransactionError::Overdraft), + PoolErrorKind::InvalidTransaction(InvalidPoolTransactionError::Overdraft { + cost: transaction.cost(), + balance: on_chain_balance, + }), )), InsertErr::TxTypeConflict { transaction } => Err(PoolError::new( *transaction.hash(), @@ -947,6 +948,8 @@ impl AllTransactions { max_account_slots: config.max_account_slots, price_bumps: config.price_bumps, local_transactions_config: config.local_transactions_config.clone(), + minimal_protocol_basefee: config.minimal_protocol_basefee, + block_gas_limit: config.gas_limit, ..Default::default() } } @@ -1848,7 +1851,8 @@ impl SenderInfo { #[cfg(test)] mod tests { - use reth_primitives::{address, TxType}; + use alloy_primitives::address; + use reth_primitives::TxType; use super::*; use crate::{ diff --git a/crates/transaction-pool/src/pool/update.rs b/crates/transaction-pool/src/pool/update.rs index 12221b7283..a5cce8291f 100644 --- a/crates/transaction-pool/src/pool/update.rs +++ b/crates/transaction-pool/src/pool/update.rs @@ -1,7 +1,7 @@ //! Support types for updating the pool. use crate::{identifier::TransactionId, pool::state::SubPool}; -use reth_primitives::TxHash; +use alloy_primitives::TxHash; /// A change of the transaction's location /// diff --git a/crates/transaction-pool/src/test_utils/gen.rs b/crates/transaction-pool/src/test_utils/gen.rs index 81314caa1b..50182b0ddc 100644 --- a/crates/transaction-pool/src/test_utils/gen.rs +++ b/crates/transaction-pool/src/test_utils/gen.rs @@ -1,9 +1,10 @@ use crate::EthPooledTransaction; +use alloy_primitives::{Address, B256, U256}; use rand::Rng; use reth_chainspec::MAINNET; use reth_primitives::{ - constants::MIN_PROTOCOL_BASE_FEE, sign_message, AccessList, Address, Bytes, Transaction, - TransactionSigned, TxEip1559, TxEip4844, TxKind, TxLegacy, B256, U256, + constants::MIN_PROTOCOL_BASE_FEE, sign_message, AccessList, Bytes, Transaction, + TransactionSigned, TxEip1559, TxEip4844, TxKind, TxLegacy, }; /// A generator for transactions for testing purposes. @@ -143,7 +144,7 @@ impl TransactionBuilder { TxLegacy { chain_id: Some(self.chain_id), nonce: self.nonce, - gas_limit: self.gas_limit, + gas_limit: self.gas_limit.into(), gas_price: self.max_fee_per_gas, to: self.to, value: self.value, @@ -160,7 +161,7 @@ impl TransactionBuilder { TxEip1559 { chain_id: self.chain_id, nonce: self.nonce, - gas_limit: self.gas_limit, + gas_limit: self.gas_limit.into(), max_fee_per_gas: self.max_fee_per_gas, max_priority_fee_per_gas: self.max_priority_fee_per_gas, to: self.to, @@ -178,10 +179,9 @@ impl TransactionBuilder { TxEip4844 { chain_id: self.chain_id, nonce: self.nonce, - gas_limit: self.gas_limit, + gas_limit: self.gas_limit as u128, max_fee_per_gas: self.max_fee_per_gas, max_priority_fee_per_gas: self.max_priority_fee_per_gas, - placeholder: None, to: match self.to { TxKind::Call(to) => to, TxKind::Create => Address::default(), diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index e7679eff88..1ea9638700 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -7,6 +7,7 @@ use crate::{ CoinbaseTipOrdering, EthBlobTransactionSidecar, EthPoolTransaction, PoolTransaction, ValidPoolTransaction, }; +use alloy_primitives::{Address, Bytes, ChainId, TxHash, TxKind, B256, U256}; use paste::paste; use rand::{ distributions::{Uniform, WeightedIndex}, @@ -15,12 +16,12 @@ use rand::{ use reth_primitives::{ constants::{eip4844::DATA_GAS_PER_BLOB, MIN_PROTOCOL_BASE_FEE}, transaction::TryFromRecoveredTransactionError, - AccessList, Address, BlobTransactionSidecar, BlobTransactionValidationError, Bytes, ChainId, + AccessList, BlobTransactionSidecar, BlobTransactionValidationError, PooledTransactionsElementEcRecovered, Signature, Transaction, TransactionSigned, - TransactionSignedEcRecovered, TxEip1559, TxEip2930, TxEip4844, TxHash, TxKind, TxLegacy, - TxType, B256, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, - U256, + TransactionSignedEcRecovered, TxEip1559, TxEip2930, TxEip4844, TxLegacy, TxType, + EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, }; + use std::{ops::Range, sync::Arc, time::Instant, vec::IntoIter}; /// A transaction pool implementation using [`MockOrdering`] for transaction ordering. @@ -186,8 +187,6 @@ pub enum MockTransaction { max_fee_per_blob_gas: u128, /// The gas limit for the transaction. gas_limit: u64, - /// Placeholder for backwards compatibility. - placeholder: Option<()>, /// The transaction's destination. to: Address, /// The value of the transaction. @@ -278,7 +277,6 @@ impl MockTransaction { max_priority_fee_per_gas: MIN_PROTOCOL_BASE_FEE as u128, max_fee_per_blob_gas: DATA_GAS_PER_BLOB as u128, gas_limit: 0, - placeholder: Some(()), to: Address::random(), value: Default::default(), input: Bytes::new(), @@ -798,7 +796,7 @@ impl TryFrom for MockTransaction { sender, nonce, gas_price, - gas_limit, + gas_limit: gas_limit as u64, to, value, input, @@ -819,7 +817,7 @@ impl TryFrom for MockTransaction { sender, nonce, gas_price, - gas_limit, + gas_limit: gas_limit as u64, to, value, input, @@ -843,7 +841,7 @@ impl TryFrom for MockTransaction { nonce, max_fee_per_gas, max_priority_fee_per_gas, - gas_limit, + gas_limit: gas_limit as u64, to, value, input, @@ -856,7 +854,6 @@ impl TryFrom for MockTransaction { gas_limit, max_fee_per_gas, max_priority_fee_per_gas, - placeholder, to, value, input, @@ -871,8 +868,7 @@ impl TryFrom for MockTransaction { max_fee_per_gas, max_priority_fee_per_gas, max_fee_per_blob_gas, - gas_limit, - placeholder, + gas_limit: gas_limit as u64, to, value, input, @@ -919,7 +915,15 @@ impl From for Transaction { value, input, size: _, - } => Self::Legacy(TxLegacy { chain_id, nonce, gas_price, gas_limit, to, value, input }), + } => Self::Legacy(TxLegacy { + chain_id, + nonce, + gas_price, + gas_limit: gas_limit.into(), + to, + value, + input, + }), MockTransaction::Eip2930 { chain_id, hash: _, @@ -936,7 +940,7 @@ impl From for Transaction { chain_id, nonce, gas_price, - gas_limit, + gas_limit: gas_limit.into(), to, value, access_list, @@ -958,7 +962,7 @@ impl From for Transaction { } => Self::Eip1559(TxEip1559 { chain_id, nonce, - gas_limit, + gas_limit: gas_limit.into(), max_fee_per_gas, max_priority_fee_per_gas, to, @@ -981,14 +985,12 @@ impl From for Transaction { input, sidecar, size: _, - placeholder, } => Self::Eip4844(TxEip4844 { chain_id, nonce, - gas_limit, + gas_limit: gas_limit.into(), max_fee_per_gas, max_priority_fee_per_gas, - placeholder, to, value, access_list, @@ -1023,7 +1025,7 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { hash: tx_hash, nonce: *nonce, gas_price: *gas_price, - gas_limit: *gas_limit, + gas_limit: *gas_limit as u64, to: *to, value: *value, input: input.clone(), @@ -1045,7 +1047,7 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { hash: tx_hash, nonce: *nonce, gas_price: *gas_price, - gas_limit: *gas_limit, + gas_limit: *gas_limit as u64, to: *to, value: *value, input: input.clone(), @@ -1069,7 +1071,7 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { nonce: *nonce, max_fee_per_gas: *max_fee_per_gas, max_priority_fee_per_gas: *max_priority_fee_per_gas, - gas_limit: *gas_limit, + gas_limit: *gas_limit as u64, to: *to, value: *value, input: input.clone(), @@ -1088,7 +1090,6 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { max_fee_per_blob_gas, access_list, blob_versioned_hashes: _, - placeholder, }) => Self::Eip4844 { chain_id: *chain_id, sender, @@ -1097,8 +1098,7 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { max_fee_per_gas: *max_fee_per_gas, max_priority_fee_per_gas: *max_priority_fee_per_gas, max_fee_per_blob_gas: *max_fee_per_blob_gas, - gas_limit: *gas_limit, - placeholder: *placeholder, + gas_limit: *gas_limit as u64, to: *to, value: *value, input: input.clone(), diff --git a/crates/transaction-pool/src/test_utils/pool.rs b/crates/transaction-pool/src/test_utils/pool.rs index e8745a9460..4f4d7b4598 100644 --- a/crates/transaction-pool/src/test_utils/pool.rs +++ b/crates/transaction-pool/src/test_utils/pool.rs @@ -7,8 +7,8 @@ use crate::{ test_utils::{MockOrdering, MockTransactionDistribution, MockTransactionFactory}, TransactionOrdering, }; +use alloy_primitives::{Address, U256}; use rand::Rng; -use reth_primitives::{Address, U256}; use serde::{Deserialize, Serialize}; use std::{ collections::HashMap, diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 0dd6451848..0f4ca5d96c 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -7,14 +7,16 @@ use crate::{ validate::ValidPoolTransaction, AllTransactionsEvents, }; +use alloy_eips::eip4844::BlobAndProofV1; +use alloy_primitives::{Address, TxHash, TxKind, B256, U256}; use futures_util::{ready, Stream}; use reth_eth_wire_types::HandleMempoolData; use reth_execution_types::ChangedAccount; use reth_primitives::{ - kzg::KzgSettings, transaction::TryFromRecoveredTransactionError, AccessList, Address, + kzg::KzgSettings, transaction::TryFromRecoveredTransactionError, AccessList, BlobTransactionSidecar, BlobTransactionValidationError, PooledTransactionsElement, PooledTransactionsElementEcRecovered, SealedBlock, Transaction, TransactionSignedEcRecovered, - TxHash, TxKind, B256, EIP1559_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, U256, + EIP1559_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, }; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; @@ -29,7 +31,7 @@ use std::{ use tokio::sync::mpsc::Receiver; /// The `PeerId` type. -pub type PeerId = reth_primitives::B512; +pub type PeerId = alloy_primitives::B512; /// General purpose abstraction of a transaction-pool. /// @@ -413,6 +415,12 @@ pub trait TransactionPool: Send + Sync + Clone { &self, tx_hashes: Vec, ) -> Result, BlobStoreError>; + + /// Return the [`BlobTransactionSidecar`]s for a list of blob versioned hashes. + fn get_blobs_for_versioned_hashes( + &self, + versioned_hashes: &[B256], + ) -> Result>, BlobStoreError>; } /// Extension for [TransactionPool] trait that allows to set the current block info. diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index 1c0e1cc826..acfe46d6e8 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -3,7 +3,9 @@ use super::constants::DEFAULT_MAX_TX_INPUT_BYTES; use crate::{ blobstore::BlobStore, - error::{Eip4844PoolTransactionError, InvalidPoolTransactionError}, + error::{ + Eip4844PoolTransactionError, Eip7702PoolTransactionError, InvalidPoolTransactionError, + }, traits::TransactionOrigin, validate::{ValidTransaction, ValidationTask, MAX_INIT_CODE_BYTE_SIZE}, EthBlobTransactionSidecar, EthPoolTransaction, LocalTransactionConfig, PoolTransaction, @@ -15,7 +17,7 @@ use reth_primitives::{ EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, LEGACY_TX_TYPE_ID, }; -use reth_storage_api::{AccountReader, BlockReaderIdExt, StateProviderFactory}; +use reth_storage_api::{AccountReader, StateProviderFactory}; use reth_tasks::TaskSpawner; use revm::{ interpreter::gas::validate_initial_tx_gas, @@ -48,7 +50,7 @@ impl EthTransactionValidator { impl EthTransactionValidator where - Client: StateProviderFactory + BlockReaderIdExt, + Client: StateProviderFactory, Tx: EthPoolTransaction, { /// Validates a single transaction. @@ -77,7 +79,7 @@ where impl TransactionValidator for EthTransactionValidator where - Client: StateProviderFactory + BlockReaderIdExt, + Client: StateProviderFactory, Tx: EthPoolTransaction, { type Transaction = Tx; @@ -146,7 +148,7 @@ impl EthTransactionValidatorInner { impl EthTransactionValidatorInner where - Client: StateProviderFactory + BlockReaderIdExt, + Client: StateProviderFactory, Tx: EthPoolTransaction, { /// Validates a single transaction. @@ -274,6 +276,13 @@ where InvalidTransactionError::TxTypeNotSupported.into(), ) } + + if transaction.authorization_count() == 0 { + return TransactionValidationOutcome::Invalid( + transaction, + Eip7702PoolTransactionError::MissingEip7702AuthorizationList.into(), + ) + } } if let Err(err) = ensure_intrinsic_gas(&transaction, &self.fork_tracker) { @@ -326,20 +335,47 @@ where } }; - // Signer account shouldn't have bytecode. Presence of bytecode means this is a - // smartcontract. + // Unless Prague is active, the signer account shouldn't have bytecode. + // + // If Prague is active, only EIP-7702 bytecode is allowed for the sender. + // + // Any other case means that the account is not an EOA, and should not be able to send + // transactions. if account.has_bytecode() { - return TransactionValidationOutcome::Invalid( - transaction, - InvalidTransactionError::SignerAccountHasBytecode.into(), - ) + let is_eip7702 = if self.fork_tracker.is_prague_activated() { + match self + .client + .latest() + .and_then(|state| state.bytecode_by_hash(account.get_bytecode_hash())) + { + Ok(bytecode) => bytecode.unwrap_or_default().is_eip7702(), + Err(err) => { + return TransactionValidationOutcome::Error( + *transaction.hash(), + Box::new(err), + ) + } + } + } else { + false + }; + + if !is_eip7702 { + return TransactionValidationOutcome::Invalid( + transaction, + InvalidTransactionError::SignerAccountHasBytecode.into(), + ) + } } + let tx_nonce = transaction.nonce(); + // Checks for nonce - if transaction.nonce() < account.nonce { + if tx_nonce < account.nonce { return TransactionValidationOutcome::Invalid( transaction, - InvalidTransactionError::NonceNotConsistent.into(), + InvalidTransactionError::NonceNotConsistent { tx: tx_nonce, state: account.nonce } + .into(), ) } @@ -797,8 +833,9 @@ mod tests { blobstore::InMemoryBlobStore, error::PoolErrorKind, CoinbaseTipOrdering, EthPooledTransaction, Pool, TransactionPool, }; + use alloy_primitives::{hex, U256}; use reth_chainspec::MAINNET; - use reth_primitives::{hex, PooledTransactionsElement, U256}; + use reth_primitives::PooledTransactionsElement; use reth_provider::test_utils::{ExtendedAccount, MockEthProvider}; fn get_transaction() -> EthPooledTransaction { diff --git a/crates/transaction-pool/src/validate/mod.rs b/crates/transaction-pool/src/validate/mod.rs index c1f6066a06..7baa5e3f33 100644 --- a/crates/transaction-pool/src/validate/mod.rs +++ b/crates/transaction-pool/src/validate/mod.rs @@ -5,11 +5,11 @@ use crate::{ identifier::{SenderId, TransactionId}, traits::{PoolTransaction, TransactionOrigin}, }; +use alloy_primitives::{Address, TxHash, B256, U256}; use futures_util::future::Either; use reth_primitives::{ - Address, BlobTransactionSidecar, IntoRecoveredTransaction, - PooledTransactionsElementEcRecovered, SealedBlock, TransactionSignedEcRecovered, TxHash, B256, - U256, + BlobTransactionSidecar, IntoRecoveredTransaction, PooledTransactionsElementEcRecovered, + SealedBlock, TransactionSignedEcRecovered, }; use std::{fmt, future::Future, time::Instant}; diff --git a/crates/transaction-pool/src/validate/task.rs b/crates/transaction-pool/src/validate/task.rs index df05b0a446..8bb592cf6a 100644 --- a/crates/transaction-pool/src/validate/task.rs +++ b/crates/transaction-pool/src/validate/task.rs @@ -9,7 +9,6 @@ use crate::{ use futures_util::{lock::Mutex, StreamExt}; use reth_chainspec::ChainSpec; use reth_primitives::SealedBlock; -use reth_storage_api::BlockReaderIdExt; use reth_tasks::TaskSpawner; use std::{future::Future, pin::Pin, sync::Arc}; use tokio::{ @@ -111,10 +110,7 @@ impl TransactionValidationTaskExecutor { } } -impl TransactionValidationTaskExecutor> -where - Client: BlockReaderIdExt, -{ +impl TransactionValidationTaskExecutor> { /// Creates a new instance for the given [`ChainSpec`] /// /// This will spawn a single validation tasks that performs the actual validation. diff --git a/crates/transaction-pool/tests/it/evict.rs b/crates/transaction-pool/tests/it/evict.rs index 2491a314ac..c55d52309d 100644 --- a/crates/transaction-pool/tests/it/evict.rs +++ b/crates/transaction-pool/tests/it/evict.rs @@ -1,7 +1,8 @@ //! Transaction pool eviction tests. +use alloy_primitives::{Address, B256}; use rand::distributions::Uniform; -use reth_primitives::{constants::MIN_PROTOCOL_BASE_FEE, Address, B256}; +use reth_primitives::constants::MIN_PROTOCOL_BASE_FEE; use reth_transaction_pool::{ error::PoolErrorKind, test_utils::{ diff --git a/crates/trie/common/Cargo.toml b/crates/trie/common/Cargo.toml index 1d11d335d1..0bd28140f4 100644 --- a/crates/trie/common/Cargo.toml +++ b/crates/trie/common/Cargo.toml @@ -37,8 +37,6 @@ arbitrary = { workspace = true, features = ["derive"], optional = true } arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true proptest-arbitrary-interop.workspace = true -test-fuzz.workspace = true -toml.workspace = true hash-db = "=0.15.2" plain_hasher = "0.2" diff --git a/crates/trie/common/src/account.rs b/crates/trie/common/src/account.rs index 2692026011..0808837063 100644 --- a/crates/trie/common/src/account.rs +++ b/crates/trie/common/src/account.rs @@ -71,3 +71,114 @@ impl From<(AccountInfo, B256)> for TrieAccount { } } } + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::Bytes; + use std::collections::BTreeMap; + + #[test] + fn test_from_genesis_account_with_default_values() { + let genesis_account = GenesisAccount::default(); + + // Convert the GenesisAccount to a TrieAccount + let trie_account: TrieAccount = genesis_account.into(); + + // Check the fields are properly set. + assert_eq!(trie_account.nonce, 0); + assert_eq!(trie_account.balance, U256::default()); + assert_eq!(trie_account.storage_root(), EMPTY_ROOT_HASH); + assert_eq!(trie_account.code_hash, KECCAK_EMPTY); + + // Check that the default Account converts to the same TrieAccount + assert_eq!(Into::::into((Account::default(), EMPTY_ROOT_HASH)), trie_account); + + // Check that the default AccountInfo converts to the same TrieAccount + assert_eq!( + Into::::into((AccountInfo::default(), EMPTY_ROOT_HASH)), + trie_account + ); + } + + #[test] + fn test_from_genesis_account_with_values() { + // Create a GenesisAccount with specific values + let mut storage = BTreeMap::new(); + storage.insert(B256::from([0x01; 32]), B256::from([0x02; 32])); + + let genesis_account = GenesisAccount { + nonce: Some(10), + balance: U256::from(1000), + code: Some(Bytes::from(vec![0x60, 0x61])), + storage: Some(storage), + private_key: None, + }; + + // Convert the GenesisAccount to a TrieAccount + let trie_account: TrieAccount = genesis_account.into(); + + let expected_storage_root = storage_root_unhashed(BTreeMap::from([( + B256::from([0x01; 32]), + U256::from_be_bytes(*B256::from([0x02; 32])), + )])); + + // Check that the fields are properly set. + assert_eq!(trie_account.nonce, 10); + assert_eq!(trie_account.balance, U256::from(1000)); + assert_eq!(trie_account.storage_root(), expected_storage_root); + assert_eq!(trie_account.code_hash, keccak256([0x60, 0x61])); + + // Check that the Account converts to the same TrieAccount + assert_eq!( + Into::::into(( + Account { + nonce: 10, + balance: U256::from(1000), + bytecode_hash: Some(keccak256([0x60, 0x61])) + }, + expected_storage_root + )), + trie_account + ); + + // Check that the AccountInfo converts to the same TrieAccount + assert_eq!( + Into::::into(( + AccountInfo { + nonce: 10, + balance: U256::from(1000), + code_hash: keccak256([0x60, 0x61]), + ..Default::default() + }, + expected_storage_root + )), + trie_account + ); + } + + #[test] + fn test_from_genesis_account_with_zeroed_storage_values() { + // Create a GenesisAccount with storage containing zero values + let storage = BTreeMap::from([(B256::from([0x01; 32]), B256::from([0x00; 32]))]); + + let genesis_account = GenesisAccount { + nonce: Some(3), + balance: U256::from(300), + code: None, + storage: Some(storage), + private_key: None, + }; + + // Convert the GenesisAccount to a TrieAccount + let trie_account: TrieAccount = genesis_account.into(); + + // Check the fields are properly set. + assert_eq!(trie_account.nonce, 3); + assert_eq!(trie_account.balance, U256::from(300)); + // Zero values in storage should result in EMPTY_ROOT_HASH + assert_eq!(trie_account.storage_root(), EMPTY_ROOT_HASH); + // No code provided, so code hash should be KECCAK_EMPTY + assert_eq!(trie_account.code_hash, KECCAK_EMPTY); + } +} diff --git a/crates/trie/common/src/proofs.rs b/crates/trie/common/src/proofs.rs index 87f9b4e4b6..df32b1cb9f 100644 --- a/crates/trie/common/src/proofs.rs +++ b/crates/trie/common/src/proofs.rs @@ -13,6 +13,8 @@ use serde::{Deserialize, Serialize}; use std::collections::{BTreeMap, HashMap}; /// The state multiproof of target accounts and multiproofs of their storage tries. +/// Multiproof is effectively a state subtrie that only contains the nodes +/// in the paths of target accounts. #[derive(Clone, Default, Debug)] pub struct MultiProof { /// State trie multiproof for requested accounts. diff --git a/crates/trie/common/src/root.rs b/crates/trie/common/src/root.rs index 3779991cde..600e818ebb 100644 --- a/crates/trie/common/src/root.rs +++ b/crates/trie/common/src/root.rs @@ -28,6 +28,10 @@ pub fn ordered_trie_root_with_encoder(items: &[T], mut encode: F) -> B256 where F: FnMut(&T, &mut Vec), { + if items.is_empty() { + return alloy_trie::EMPTY_ROOT_HASH; + } + let mut value_buffer = Vec::new(); let mut hb = HashBuilder::default(); diff --git a/crates/trie/db/Cargo.toml b/crates/trie/db/Cargo.toml index 83ef66aabb..6d322ba3ff 100644 --- a/crates/trie/db/Cargo.toml +++ b/crates/trie/db/Cargo.toml @@ -26,6 +26,7 @@ revm.workspace = true # alloy alloy-rlp.workspace = true +alloy-primitives.workspace = true # tracing tracing.workspace = true @@ -55,6 +56,7 @@ reth-provider = { workspace = true, features = ["test-utils"] } reth-storage-errors.workspace = true reth-trie-common = { workspace = true, features = ["test-utils", "arbitrary"] } reth-trie = { workspace = true, features = ["test-utils"] } +reth-node-types.workspace = true # trie triehash = "0.8" @@ -70,7 +72,6 @@ tokio = { workspace = true, default-features = false, features = [ tokio-stream.workspace = true serde_json.workspace = true similar-asserts.workspace = true -criterion.workspace = true [features] metrics = ["reth-metrics", "reth-trie/metrics", "dep:metrics"] diff --git a/crates/trie/db/src/hashed_cursor.rs b/crates/trie/db/src/hashed_cursor.rs index 9f939fb98e..bf0341c888 100644 --- a/crates/trie/db/src/hashed_cursor.rs +++ b/crates/trie/db/src/hashed_cursor.rs @@ -1,9 +1,10 @@ +use alloy_primitives::{B256, U256}; use reth_db::tables; use reth_db_api::{ cursor::{DbCursorRO, DbDupCursorRO}, transaction::DbTx, }; -use reth_primitives::{Account, B256, U256}; +use reth_primitives::Account; use reth_trie::hashed_cursor::{HashedCursor, HashedCursorFactory, HashedStorageCursor}; /// A struct wrapping database transaction that implements [`HashedCursorFactory`]. diff --git a/crates/trie/db/src/prefix_set.rs b/crates/trie/db/src/prefix_set.rs index f55a03ab30..07b87016d2 100644 --- a/crates/trie/db/src/prefix_set.rs +++ b/crates/trie/db/src/prefix_set.rs @@ -1,3 +1,4 @@ +use alloy_primitives::{keccak256, BlockNumber, B256}; use derive_more::Deref; use reth_db::tables; use reth_db_api::{ @@ -6,7 +7,7 @@ use reth_db_api::{ transaction::DbTx, DatabaseError, }; -use reth_primitives::{keccak256, BlockNumber, StorageEntry, B256}; +use reth_primitives::StorageEntry; use reth_trie::prefix_set::{PrefixSetMut, TriePrefixSets}; use reth_trie_common::Nibbles; use std::{ diff --git a/crates/trie/db/src/proof.rs b/crates/trie/db/src/proof.rs index 8739dfc518..2d06d9e2f3 100644 --- a/crates/trie/db/src/proof.rs +++ b/crates/trie/db/src/proof.rs @@ -1,22 +1,33 @@ use crate::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; +use alloy_primitives::{Address, B256}; use reth_db_api::transaction::DbTx; use reth_execution_errors::StateProofError; -use reth_primitives::{Address, B256}; -use reth_trie::{hashed_cursor::HashedPostStateCursorFactory, proof::Proof, HashedPostState}; +use reth_trie::{ + hashed_cursor::HashedPostStateCursorFactory, proof::Proof, + trie_cursor::InMemoryTrieCursorFactory, MultiProof, TrieInput, +}; use reth_trie_common::AccountProof; +use std::collections::{HashMap, HashSet}; /// Extends [`Proof`] with operations specific for working with a database transaction. pub trait DatabaseProof<'a, TX> { /// Create a new [Proof] from database transaction. fn from_tx(tx: &'a TX) -> Self; - /// Generates the state proof for target account and slots on top of this [`HashedPostState`]. + /// Generates the state proof for target account based on [`TrieInput`]. fn overlay_account_proof( tx: &'a TX, - post_state: HashedPostState, + input: TrieInput, address: Address, slots: &[B256], ) -> Result; + + /// Generates the state [`MultiProof`] for target hashed account and storage keys. + fn overlay_multiproof( + tx: &'a TX, + input: TrieInput, + targets: HashMap>, + ) -> Result; } impl<'a, TX: DbTx> DatabaseProof<'a, TX> @@ -29,17 +40,43 @@ impl<'a, TX: DbTx> DatabaseProof<'a, TX> fn overlay_account_proof( tx: &'a TX, - post_state: HashedPostState, + input: TrieInput, address: Address, slots: &[B256], ) -> Result { - let prefix_sets = post_state.construct_prefix_sets(); - let sorted = post_state.into_sorted(); - let hashed_cursor_factory = - HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(tx), &sorted); + let nodes_sorted = input.nodes.into_sorted(); + let state_sorted = input.state.into_sorted(); Self::from_tx(tx) - .with_hashed_cursor_factory(hashed_cursor_factory) - .with_prefix_sets_mut(prefix_sets) + .with_trie_cursor_factory(InMemoryTrieCursorFactory::new( + DatabaseTrieCursorFactory::new(tx), + &nodes_sorted, + )) + .with_hashed_cursor_factory(HashedPostStateCursorFactory::new( + DatabaseHashedCursorFactory::new(tx), + &state_sorted, + )) + .with_prefix_sets_mut(input.prefix_sets) .account_proof(address, slots) } + + fn overlay_multiproof( + tx: &'a TX, + input: TrieInput, + targets: HashMap>, + ) -> Result { + let nodes_sorted = input.nodes.into_sorted(); + let state_sorted = input.state.into_sorted(); + Self::from_tx(tx) + .with_trie_cursor_factory(InMemoryTrieCursorFactory::new( + DatabaseTrieCursorFactory::new(tx), + &nodes_sorted, + )) + .with_hashed_cursor_factory(HashedPostStateCursorFactory::new( + DatabaseHashedCursorFactory::new(tx), + &state_sorted, + )) + .with_prefix_sets_mut(input.prefix_sets) + .with_targets(targets) + .multiproof() + } } diff --git a/crates/trie/db/src/state.rs b/crates/trie/db/src/state.rs index a138ee5791..4f27679dda 100644 --- a/crates/trie/db/src/state.rs +++ b/crates/trie/db/src/state.rs @@ -1,4 +1,5 @@ use crate::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory, PrefixSetLoader}; +use alloy_primitives::{keccak256, Address, BlockNumber, B256, U256}; use reth_db::tables; use reth_db_api::{ cursor::DbCursorRO, @@ -6,12 +7,11 @@ use reth_db_api::{ transaction::DbTx, }; use reth_execution_errors::StateRootError; -use reth_primitives::{keccak256, Account, Address, BlockNumber, B256, U256}; +use reth_primitives::Account; use reth_storage_errors::db::DatabaseError; use reth_trie::{ - hashed_cursor::HashedPostStateCursorFactory, prefix_set::TriePrefixSetsMut, - trie_cursor::InMemoryTrieCursorFactory, updates::TrieUpdates, HashedPostState, HashedStorage, - StateRoot, StateRootProgress, + hashed_cursor::HashedPostStateCursorFactory, trie_cursor::InMemoryTrieCursorFactory, + updates::TrieUpdates, HashedPostState, HashedStorage, StateRoot, StateRootProgress, TrieInput, }; use std::{ collections::{hash_map, HashMap}, @@ -111,20 +111,13 @@ pub trait DatabaseStateRoot<'a, TX>: Sized { ) -> Result<(B256, TrieUpdates), StateRootError>; /// Calculates the state root for provided [`HashedPostState`] using cached intermediate nodes. - fn overlay_root_from_nodes( - tx: &'a TX, - intermediate_nodes: TrieUpdates, - post_state: HashedPostState, - prefix_sets: TriePrefixSetsMut, - ) -> Result; + fn overlay_root_from_nodes(tx: &'a TX, input: TrieInput) -> Result; /// Calculates the state root and trie updates for provided [`HashedPostState`] using /// cached intermediate nodes. fn overlay_root_from_nodes_with_updates( tx: &'a TX, - intermediate_nodes: TrieUpdates, - post_state: HashedPostState, - prefix_sets: TriePrefixSetsMut, + input: TrieInput, ) -> Result<(B256, TrieUpdates), StateRootError>; } @@ -199,35 +192,28 @@ impl<'a, TX: DbTx> DatabaseStateRoot<'a, TX> .root_with_updates() } - fn overlay_root_from_nodes( - tx: &'a TX, - intermediate_nodes: TrieUpdates, - post_state: HashedPostState, - prefix_sets: TriePrefixSetsMut, - ) -> Result { - let state_sorted = post_state.into_sorted(); - let nodes_sorted = intermediate_nodes.into_sorted(); + fn overlay_root_from_nodes(tx: &'a TX, input: TrieInput) -> Result { + let state_sorted = input.state.into_sorted(); + let nodes_sorted = input.nodes.into_sorted(); StateRoot::new( InMemoryTrieCursorFactory::new(DatabaseTrieCursorFactory::new(tx), &nodes_sorted), HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(tx), &state_sorted), ) - .with_prefix_sets(prefix_sets.freeze()) + .with_prefix_sets(input.prefix_sets.freeze()) .root() } fn overlay_root_from_nodes_with_updates( tx: &'a TX, - intermediate_nodes: TrieUpdates, - post_state: HashedPostState, - prefix_sets: TriePrefixSetsMut, + input: TrieInput, ) -> Result<(B256, TrieUpdates), StateRootError> { - let state_sorted = post_state.into_sorted(); - let nodes_sorted = intermediate_nodes.into_sorted(); + let state_sorted = input.state.into_sorted(); + let nodes_sorted = input.nodes.into_sorted(); StateRoot::new( InMemoryTrieCursorFactory::new(DatabaseTrieCursorFactory::new(tx), &nodes_sorted), HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(tx), &state_sorted), ) - .with_prefix_sets(prefix_sets.freeze()) + .with_prefix_sets(input.prefix_sets.freeze()) .root_with_updates() } } diff --git a/crates/trie/db/src/storage.rs b/crates/trie/db/src/storage.rs index 9ab2cdeda3..6a3bbe1b96 100644 --- a/crates/trie/db/src/storage.rs +++ b/crates/trie/db/src/storage.rs @@ -1,10 +1,10 @@ use std::collections::hash_map; use crate::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; +use alloy_primitives::{keccak256, Address, BlockNumber, B256}; use reth_db::{cursor::DbCursorRO, models::BlockNumberAddress, tables, DatabaseError}; use reth_db_api::transaction::DbTx; use reth_execution_errors::StorageRootError; -use reth_primitives::{keccak256, Address, BlockNumber, B256}; use reth_trie::{ hashed_cursor::HashedPostStateCursorFactory, HashedPostState, HashedStorage, StorageRoot, }; diff --git a/crates/trie/db/src/trie_cursor.rs b/crates/trie/db/src/trie_cursor.rs index 28a510dd44..124b8ccb20 100644 --- a/crates/trie/db/src/trie_cursor.rs +++ b/crates/trie/db/src/trie_cursor.rs @@ -1,3 +1,4 @@ +use alloy_primitives::B256; use reth_db::{ cursor::{DbCursorRW, DbDupCursorRW}, tables, @@ -6,7 +7,6 @@ use reth_db_api::{ cursor::{DbCursorRO, DbDupCursorRO}, transaction::DbTx, }; -use reth_primitives::B256; use reth_storage_errors::db::DatabaseError; use reth_trie::{ trie_cursor::{TrieCursor, TrieCursorFactory}, diff --git a/crates/trie/db/src/witness.rs b/crates/trie/db/src/witness.rs index deede885aa..62b945d26d 100644 --- a/crates/trie/db/src/witness.rs +++ b/crates/trie/db/src/witness.rs @@ -1,9 +1,10 @@ use crate::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; +use alloy_primitives::{Bytes, B256}; use reth_db_api::transaction::DbTx; use reth_execution_errors::TrieWitnessError; -use reth_primitives::{Bytes, B256}; use reth_trie::{ - hashed_cursor::HashedPostStateCursorFactory, witness::TrieWitness, HashedPostState, + hashed_cursor::HashedPostStateCursorFactory, trie_cursor::InMemoryTrieCursorFactory, + witness::TrieWitness, HashedPostState, TrieInput, }; use std::collections::HashMap; @@ -12,10 +13,10 @@ pub trait DatabaseTrieWitness<'a, TX> { /// Create a new [`TrieWitness`] from database transaction. fn from_tx(tx: &'a TX) -> Self; - /// Generates trie witness for target state on top of this [`HashedPostState`]. + /// Generates trie witness for target state based on [`TrieInput`]. fn overlay_witness( tx: &'a TX, - post_state: HashedPostState, + input: TrieInput, target: HashedPostState, ) -> Result, TrieWitnessError>; } @@ -29,16 +30,21 @@ impl<'a, TX: DbTx> DatabaseTrieWitness<'a, TX> fn overlay_witness( tx: &'a TX, - post_state: HashedPostState, + input: TrieInput, target: HashedPostState, ) -> Result, TrieWitnessError> { - let prefix_sets = post_state.construct_prefix_sets(); - let sorted = post_state.into_sorted(); - let hashed_cursor_factory = - HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(tx), &sorted); + let nodes_sorted = input.nodes.into_sorted(); + let state_sorted = input.state.into_sorted(); Self::from_tx(tx) - .with_hashed_cursor_factory(hashed_cursor_factory) - .with_prefix_sets_mut(prefix_sets) + .with_trie_cursor_factory(InMemoryTrieCursorFactory::new( + DatabaseTrieCursorFactory::new(tx), + &nodes_sorted, + )) + .with_hashed_cursor_factory(HashedPostStateCursorFactory::new( + DatabaseHashedCursorFactory::new(tx), + &state_sorted, + )) + .with_prefix_sets_mut(input.prefix_sets) .compute(target) } } diff --git a/crates/trie/db/tests/fuzz_in_memory_nodes.rs b/crates/trie/db/tests/fuzz_in_memory_nodes.rs index 32737c1980..c0bea366b9 100644 --- a/crates/trie/db/tests/fuzz_in_memory_nodes.rs +++ b/crates/trie/db/tests/fuzz_in_memory_nodes.rs @@ -1,15 +1,18 @@ +use alloy_primitives::{B256, U256}; use proptest::prelude::*; -use reth_db::{cursor::DbCursorRW, tables, transaction::DbTxMut}; -use reth_primitives::{Account, StorageEntry, B256, U256}; +use reth_db::{ + cursor::{DbCursorRO, DbCursorRW, DbDupCursorRW}, + tables, + transaction::DbTxMut, +}; +use reth_primitives::{Account, StorageEntry}; use reth_provider::test_utils::create_test_provider_factory; use reth_trie::{ - prefix_set::{PrefixSetMut, TriePrefixSets}, test_utils::{state_root_prehashed, storage_root_prehashed}, trie_cursor::InMemoryTrieCursorFactory, updates::TrieUpdates, - StateRoot, StorageRoot, + HashedPostState, HashedStorage, StateRoot, StorageRoot, }; -use reth_trie_common::Nibbles; use reth_trie_db::{DatabaseStateRoot, DatabaseStorageRoot, DatabaseTrieCursorFactory}; use std::collections::BTreeMap; @@ -19,7 +22,7 @@ proptest! { })] #[test] - fn fuzz_in_memory_nodes(mut init_state: BTreeMap, state_updates: [BTreeMap; 10]) { + fn fuzz_in_memory_account_nodes(mut init_state: BTreeMap, state_updates: [BTreeMap>; 10]) { let factory = create_test_provider_factory(); let provider = factory.provider_rw().unwrap(); let mut hashed_account_cursor = provider.tx_ref().cursor_write::().unwrap(); @@ -35,17 +38,24 @@ proptest! { .unwrap(); let mut state = init_state; - for mut state_update in state_updates { + for state_update in state_updates { // Insert state updates into database - let mut changes = PrefixSetMut::default(); - for (hashed_address, balance) in state_update.clone() { - hashed_account_cursor.upsert(hashed_address, Account { balance, ..Default::default() }).unwrap(); - changes.insert(Nibbles::unpack(hashed_address)); + let mut hashed_state = HashedPostState::default(); + for (hashed_address, balance) in state_update { + if let Some(balance) = balance { + let account = Account { balance, ..Default::default() }; + hashed_account_cursor.upsert(hashed_address, account).unwrap(); + hashed_state.accounts.insert(hashed_address, Some(account)); + state.insert(hashed_address, balance); + } else { + hashed_state.accounts.insert(hashed_address, None); + state.remove(&hashed_address); + } } // Compute root with in-memory trie nodes overlay let (state_root, trie_updates) = StateRoot::from_tx(provider.tx_ref()) - .with_prefix_sets(TriePrefixSets { account_prefix_set: changes.freeze(), ..Default::default() }) + .with_prefix_sets(hashed_state.construct_prefix_sets().freeze()) .with_trie_cursor_factory(InMemoryTrieCursorFactory::new( DatabaseTrieCursorFactory::new(provider.tx_ref()), &trie_nodes.clone().into_sorted()) ) @@ -55,7 +65,6 @@ proptest! { trie_nodes.extend(trie_updates); // Verify the result - state.append(&mut state_update); let expected_root = state_root_prehashed( state.iter().map(|(&key, &balance)| (key, (Account { balance, ..Default::default() }, std::iter::empty()))) ); @@ -64,7 +73,7 @@ proptest! { } #[test] - fn fuzz_in_memory_storage_nodes(mut init_storage: BTreeMap, storage_updates: [BTreeMap; 10]) { + fn fuzz_in_memory_storage_nodes(mut init_storage: BTreeMap, storage_updates: [(bool, BTreeMap); 10]) { let hashed_address = B256::random(); let factory = create_test_provider_factory(); let provider = factory.provider_rw().unwrap(); @@ -83,14 +92,17 @@ proptest! { StorageRoot::from_tx_hashed(provider.tx_ref(), hashed_address).root_with_updates().unwrap(); let mut storage = init_storage; - for mut storage_update in storage_updates { + for (is_deleted, mut storage_update) in storage_updates { // Insert state updates into database - let mut changes = PrefixSetMut::default(); + if is_deleted && hashed_storage_cursor.seek_exact(hashed_address).unwrap().is_some() { + hashed_storage_cursor.delete_current_duplicates().unwrap(); + } + let mut hashed_storage = HashedStorage::new(is_deleted); for (hashed_slot, value) in storage_update.clone() { hashed_storage_cursor .upsert(hashed_address, StorageEntry { key: hashed_slot, value }) .unwrap(); - changes.insert(Nibbles::unpack(hashed_slot)); + hashed_storage.storage.insert(hashed_slot, value); } // Compute root with in-memory trie nodes overlay @@ -98,7 +110,7 @@ proptest! { trie_nodes.insert_storage_updates(hashed_address, storage_trie_nodes.clone()); let (storage_root, _, trie_updates) = StorageRoot::from_tx_hashed(provider.tx_ref(), hashed_address) - .with_prefix_set(changes.freeze()) + .with_prefix_set(hashed_storage.construct_prefix_set().freeze()) .with_trie_cursor_factory(InMemoryTrieCursorFactory::new( DatabaseTrieCursorFactory::new(provider.tx_ref()), &trie_nodes.into_sorted(), @@ -109,6 +121,9 @@ proptest! { storage_trie_nodes.extend(trie_updates); // Verify the result + if is_deleted { + storage.clear(); + } storage.append(&mut storage_update); let expected_root = storage_root_prehashed(storage.clone()); assert_eq!(expected_root, storage_root); diff --git a/crates/trie/db/tests/post_state.rs b/crates/trie/db/tests/post_state.rs index 16cece3923..be022b2dae 100644 --- a/crates/trie/db/tests/post_state.rs +++ b/crates/trie/db/tests/post_state.rs @@ -1,8 +1,9 @@ +use alloy_primitives::{B256, U256}; use proptest::prelude::*; use proptest_arbitrary_interop::arb; use reth_db::{tables, test_utils::create_test_rw_db}; use reth_db_api::{database::Database, transaction::DbTxMut}; -use reth_primitives::{Account, StorageEntry, B256, U256}; +use reth_primitives::{Account, StorageEntry}; use reth_trie::{ hashed_cursor::{ HashedCursor, HashedCursorFactory, HashedPostStateCursorFactory, HashedStorageCursor, diff --git a/crates/trie/db/tests/proof.rs b/crates/trie/db/tests/proof.rs index 41d599571e..8a31512a79 100644 --- a/crates/trie/db/tests/proof.rs +++ b/crates/trie/db/tests/proof.rs @@ -1,15 +1,10 @@ +use alloy_primitives::{keccak256, Address, Bytes, B256, U256}; use reth_chainspec::{Chain, ChainSpec, HOLESKY, MAINNET}; -use reth_db_api::database::Database; -use reth_primitives::{ - constants::EMPTY_ROOT_HASH, keccak256, Account, Address, Bytes, StorageEntry, B256, U256, -}; -use reth_provider::{ - test_utils::create_test_provider_factory, HashingWriter, ProviderFactory, TrieWriter, -}; -use reth_storage_errors::provider::ProviderResult; -use reth_trie::{proof::Proof, Nibbles, StateRoot}; +use reth_primitives::{constants::EMPTY_ROOT_HASH, Account}; +use reth_provider::test_utils::{create_test_provider_factory, insert_genesis}; +use reth_trie::{proof::Proof, Nibbles}; use reth_trie_common::{AccountProof, StorageProof}; -use reth_trie_db::{DatabaseProof, DatabaseStateRoot}; +use reth_trie_db::DatabaseProof; use std::{ str::FromStr, sync::{Arc, LazyLock}, @@ -40,39 +35,6 @@ fn convert_to_proof<'a>(path: impl IntoIterator) -> Vec { path.into_iter().map(Bytes::from_str).collect::, _>>().unwrap() } -fn insert_genesis( - provider_factory: &ProviderFactory, - chain_spec: Arc, -) -> ProviderResult { - let provider = provider_factory.provider_rw()?; - - // Hash accounts and insert them into hashing table. - let genesis = chain_spec.genesis(); - let alloc_accounts = - genesis.alloc.iter().map(|(addr, account)| (*addr, Some(Account::from(account)))); - provider.insert_account_for_hashing(alloc_accounts).unwrap(); - - let alloc_storage = genesis.alloc.clone().into_iter().filter_map(|(addr, account)| { - // Only return `Some` if there is storage. - account.storage.map(|storage| { - ( - addr, - storage.into_iter().map(|(key, value)| StorageEntry { key, value: value.into() }), - ) - }) - }); - provider.insert_storage_for_hashing(alloc_storage)?; - - let (root, updates) = StateRoot::from_tx(provider.tx_ref()) - .root_with_updates() - .map_err(Into::::into)?; - provider.write_trie_updates(&updates).unwrap(); - - provider.commit()?; - - Ok(root) -} - #[test] fn testspec_proofs() { // Create test database and insert genesis accounts. diff --git a/crates/trie/db/tests/trie.rs b/crates/trie/db/tests/trie.rs index 007ce3069d..a6983dc4ed 100644 --- a/crates/trie/db/tests/trie.rs +++ b/crates/trie/db/tests/trie.rs @@ -1,3 +1,4 @@ +use alloy_primitives::{keccak256, Address, B256, U256}; use proptest::{prelude::ProptestConfig, proptest}; use proptest_arbitrary_interop::arb; use reth_db::{tables, test_utils::TempDatabase, DatabaseEnv}; @@ -5,7 +6,7 @@ use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO}, transaction::DbTxMut, }; -use reth_primitives::{hex_literal::hex, Account, StorageEntry, U256}; +use reth_primitives::{constants::EMPTY_ROOT_HASH, hex_literal::hex, Account, StorageEntry}; use reth_provider::{ test_utils::create_test_provider_factory, DatabaseProviderRW, StorageTrieWriter, TrieWriter, }; @@ -25,7 +26,6 @@ use std::{ use alloy_rlp::Encodable; use reth_db_api::transaction::DbTx; -use reth_primitives::{constants::EMPTY_ROOT_HASH, keccak256, Address, B256}; use reth_trie::{ prefix_set::TriePrefixSets, updates::StorageTrieUpdates, HashBuilder, IntermediateStateRootState, Nibbles, StateRootProgress, TrieAccount, diff --git a/crates/trie/db/tests/walker.rs b/crates/trie/db/tests/walker.rs index 19d5c5c32a..f018d7f1ad 100644 --- a/crates/trie/db/tests/walker.rs +++ b/crates/trie/db/tests/walker.rs @@ -1,6 +1,6 @@ +use alloy_primitives::B256; use reth_db::tables; use reth_db_api::{cursor::DbCursorRW, transaction::DbTxMut}; -use reth_primitives::B256; use reth_provider::test_utils::create_test_provider_factory; use reth_trie::{ prefix_set::PrefixSetMut, trie_cursor::TrieCursor, walker::TrieWalker, StorageTrieEntry, diff --git a/crates/trie/parallel/Cargo.toml b/crates/trie/parallel/Cargo.toml index 92f939dd0c..e53d15c146 100644 --- a/crates/trie/parallel/Cargo.toml +++ b/crates/trie/parallel/Cargo.toml @@ -23,6 +23,7 @@ reth-provider.workspace = true # alloy alloy-rlp.workspace = true +alloy-primitives.workspace = true # tracing tracing.workspace = true diff --git a/crates/trie/parallel/benches/root.rs b/crates/trie/parallel/benches/root.rs index 2408874011..b8a4d25e58 100644 --- a/crates/trie/parallel/benches/root.rs +++ b/crates/trie/parallel/benches/root.rs @@ -1,9 +1,10 @@ #![allow(missing_docs, unreachable_pub)] +use alloy_primitives::{B256, U256}; use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; use proptest::{prelude::*, strategy::ValueTree, test_runner::TestRunner}; use proptest_arbitrary_interop::arb; use rayon::ThreadPoolBuilder; -use reth_primitives::{Account, B256, U256}; +use reth_primitives::Account; use reth_provider::{ providers::ConsistentDbView, test_utils::create_test_provider_factory, StateChangeWriter, TrieWriter, @@ -11,6 +12,7 @@ use reth_provider::{ use reth_tasks::pool::BlockingTaskPool; use reth_trie::{ hashed_cursor::HashedPostStateCursorFactory, HashedPostState, HashedStorage, StateRoot, + TrieInput, }; use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseStateRoot}; use reth_trie_parallel::{async_root::AsyncStateRoot, parallel_root::ParallelStateRoot}; @@ -62,7 +64,12 @@ pub fn calculate_state_root(c: &mut Criterion) { // parallel root group.bench_function(BenchmarkId::new("parallel root", size), |b| { b.to_async(&runtime).iter_with_setup( - || ParallelStateRoot::new(view.clone(), updated_state.clone()), + || { + ParallelStateRoot::new( + view.clone(), + TrieInput::from_state(updated_state.clone()), + ) + }, |calculator| async { calculator.incremental_root() }, ); }); @@ -70,7 +77,13 @@ pub fn calculate_state_root(c: &mut Criterion) { // async root group.bench_function(BenchmarkId::new("async root", size), |b| { b.to_async(&runtime).iter_with_setup( - || AsyncStateRoot::new(view.clone(), blocking_pool.clone(), updated_state.clone()), + || { + AsyncStateRoot::new( + view.clone(), + blocking_pool.clone(), + TrieInput::from_state(updated_state.clone()), + ) + }, |calculator| calculator.incremental_root(), ); }); diff --git a/crates/trie/parallel/src/async_root.rs b/crates/trie/parallel/src/async_root.rs index ef206064ff..ed12accb4b 100644 --- a/crates/trie/parallel/src/async_root.rs +++ b/crates/trie/parallel/src/async_root.rs @@ -1,20 +1,21 @@ #[cfg(feature = "metrics")] use crate::metrics::ParallelStateRootMetrics; use crate::{stats::ParallelTrieTracker, storage_root_targets::StorageRootTargets}; +use alloy_primitives::B256; use alloy_rlp::{BufMut, Encodable}; use itertools::Itertools; -use reth_db_api::database::Database; use reth_execution_errors::StorageRootError; -use reth_primitives::B256; -use reth_provider::{providers::ConsistentDbView, DatabaseProviderFactory, ProviderError}; +use reth_provider::{ + providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, ProviderError, +}; use reth_tasks::pool::BlockingTaskPool; use reth_trie::{ hashed_cursor::{HashedCursorFactory, HashedPostStateCursorFactory}, node_iter::{TrieElement, TrieNodeIter}, - trie_cursor::TrieCursorFactory, + trie_cursor::{InMemoryTrieCursorFactory, TrieCursorFactory}, updates::TrieUpdates, walker::TrieWalker, - HashBuilder, HashedPostState, Nibbles, StorageRoot, TrieAccount, + HashBuilder, Nibbles, StorageRoot, TrieAccount, TrieInput, }; use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; use std::{collections::HashMap, sync::Arc}; @@ -35,39 +36,38 @@ use tracing::*; /// /// For sync usage, take a look at `ParallelStateRoot`. #[derive(Debug)] -pub struct AsyncStateRoot { +pub struct AsyncStateRoot { /// Consistent view of the database. - view: ConsistentDbView, + view: ConsistentDbView, /// Blocking task pool. blocking_pool: BlockingTaskPool, - /// Changed hashed state. - hashed_state: HashedPostState, + /// Trie input. + input: TrieInput, /// Parallel state root metrics. #[cfg(feature = "metrics")] metrics: ParallelStateRootMetrics, } -impl AsyncStateRoot { +impl AsyncStateRoot { /// Create new async state root calculator. pub fn new( - view: ConsistentDbView, + view: ConsistentDbView, blocking_pool: BlockingTaskPool, - hashed_state: HashedPostState, + input: TrieInput, ) -> Self { Self { view, blocking_pool, - hashed_state, + input, #[cfg(feature = "metrics")] metrics: ParallelStateRootMetrics::default(), } } } -impl AsyncStateRoot +impl AsyncStateRoot where - DB: Database + Clone + 'static, - Provider: DatabaseProviderFactory + Clone + Send + Sync + 'static, + Factory: DatabaseProviderFactory + Clone + Send + Sync + 'static, { /// Calculate incremental state root asynchronously. pub async fn incremental_root(self) -> Result { @@ -86,12 +86,13 @@ where retain_updates: bool, ) -> Result<(B256, TrieUpdates), AsyncStateRootError> { let mut tracker = ParallelTrieTracker::default(); - let prefix_sets = self.hashed_state.construct_prefix_sets().freeze(); + let trie_nodes_sorted = Arc::new(self.input.nodes.into_sorted()); + let hashed_state_sorted = Arc::new(self.input.state.into_sorted()); + let prefix_sets = self.input.prefix_sets.freeze(); let storage_root_targets = StorageRootTargets::new( - self.hashed_state.accounts.keys().copied(), + prefix_sets.account_prefix_set.iter().map(|nibbles| B256::from_slice(&nibbles.pack())), prefix_sets.storage_prefix_sets, ); - let hashed_state_sorted = Arc::new(self.hashed_state.into_sorted()); // Pre-calculate storage roots async for accounts which were changed. tracker.set_precomputed_storage_roots(storage_root_targets.len() as u64); @@ -102,14 +103,18 @@ where { let view = self.view.clone(); let hashed_state_sorted = hashed_state_sorted.clone(); + let trie_nodes_sorted = trie_nodes_sorted.clone(); #[cfg(feature = "metrics")] let metrics = self.metrics.storage_trie.clone(); let handle = self.blocking_pool.spawn_fifo(move || -> Result<_, AsyncStateRootError> { - let provider = view.provider_ro()?; - let trie_cursor_factory = DatabaseTrieCursorFactory::new(provider.tx_ref()); + let provider_ro = view.provider_ro()?; + let trie_cursor_factory = InMemoryTrieCursorFactory::new( + DatabaseTrieCursorFactory::new(provider_ro.tx_ref()), + &trie_nodes_sorted, + ); let hashed_state = HashedPostStateCursorFactory::new( - DatabaseHashedCursorFactory::new(provider.tx_ref()), + DatabaseHashedCursorFactory::new(provider_ro.tx_ref()), &hashed_state_sorted, ); Ok(StorageRoot::new_hashed( @@ -129,10 +134,12 @@ where let mut trie_updates = TrieUpdates::default(); let provider_ro = self.view.provider_ro()?; - let tx = provider_ro.tx_ref(); - let trie_cursor_factory = DatabaseTrieCursorFactory::new(tx); + let trie_cursor_factory = InMemoryTrieCursorFactory::new( + DatabaseTrieCursorFactory::new(provider_ro.tx_ref()), + &trie_nodes_sorted, + ); let hashed_cursor_factory = HashedPostStateCursorFactory::new( - DatabaseHashedCursorFactory::new(tx), + DatabaseHashedCursorFactory::new(provider_ro.tx_ref()), &hashed_state_sorted, ); @@ -233,11 +240,12 @@ pub enum AsyncStateRootError { #[cfg(test)] mod tests { use super::*; + use alloy_primitives::{keccak256, Address, U256}; use rand::Rng; use rayon::ThreadPoolBuilder; - use reth_primitives::{keccak256, Account, Address, StorageEntry, U256}; + use reth_primitives::{Account, StorageEntry}; use reth_provider::{test_utils::create_test_provider_factory, HashingWriter}; - use reth_trie::{test_utils, HashedStorage}; + use reth_trie::{test_utils, HashedPostState, HashedStorage}; #[tokio::test] async fn random_async_root() { @@ -290,7 +298,7 @@ mod tests { AsyncStateRoot::new( consistent_view.clone(), blocking_pool.clone(), - HashedPostState::default() + Default::default(), ) .incremental_root() .await @@ -324,10 +332,14 @@ mod tests { } assert_eq!( - AsyncStateRoot::new(consistent_view.clone(), blocking_pool.clone(), hashed_state) - .incremental_root() - .await - .unwrap(), + AsyncStateRoot::new( + consistent_view.clone(), + blocking_pool.clone(), + TrieInput::from_state(hashed_state) + ) + .incremental_root() + .await + .unwrap(), test_utils::state_root(state) ); } diff --git a/crates/trie/parallel/src/parallel_root.rs b/crates/trie/parallel/src/parallel_root.rs index 015d41677d..e63c3f1a17 100644 --- a/crates/trie/parallel/src/parallel_root.rs +++ b/crates/trie/parallel/src/parallel_root.rs @@ -3,17 +3,18 @@ use crate::metrics::ParallelStateRootMetrics; use crate::{stats::ParallelTrieTracker, storage_root_targets::StorageRootTargets}; use alloy_rlp::{BufMut, Encodable}; use rayon::prelude::*; -use reth_db_api::database::Database; use reth_execution_errors::StorageRootError; use reth_primitives::B256; -use reth_provider::{providers::ConsistentDbView, DatabaseProviderFactory, ProviderError}; +use reth_provider::{ + providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, ProviderError, +}; use reth_trie::{ hashed_cursor::{HashedCursorFactory, HashedPostStateCursorFactory}, node_iter::{TrieElement, TrieNodeIter}, - trie_cursor::TrieCursorFactory, + trie_cursor::{InMemoryTrieCursorFactory, TrieCursorFactory}, updates::TrieUpdates, walker::TrieWalker, - HashBuilder, HashedPostState, Nibbles, StorageRoot, TrieAccount, + HashBuilder, Nibbles, StorageRoot, TrieAccount, TrieInput, }; use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; use std::collections::HashMap; @@ -33,32 +34,31 @@ use tracing::*; /// /// If possible, use more optimized `AsyncStateRoot` instead. #[derive(Debug)] -pub struct ParallelStateRoot { +pub struct ParallelStateRoot { /// Consistent view of the database. - view: ConsistentDbView, - /// Changed hashed state. - hashed_state: HashedPostState, + view: ConsistentDbView, + /// Trie input. + input: TrieInput, /// Parallel state root metrics. #[cfg(feature = "metrics")] metrics: ParallelStateRootMetrics, } -impl ParallelStateRoot { +impl ParallelStateRoot { /// Create new parallel state root calculator. - pub fn new(view: ConsistentDbView, hashed_state: HashedPostState) -> Self { + pub fn new(view: ConsistentDbView, input: TrieInput) -> Self { Self { view, - hashed_state, + input, #[cfg(feature = "metrics")] metrics: ParallelStateRootMetrics::default(), } } } -impl ParallelStateRoot +impl ParallelStateRoot where - DB: Database, - Provider: DatabaseProviderFactory + Send + Sync, + Factory: DatabaseProviderFactory + Send + Sync, { /// Calculate incremental state root in parallel. pub fn incremental_root(self) -> Result { @@ -77,12 +77,13 @@ where retain_updates: bool, ) -> Result<(B256, TrieUpdates), ParallelStateRootError> { let mut tracker = ParallelTrieTracker::default(); - let prefix_sets = self.hashed_state.construct_prefix_sets().freeze(); + let trie_nodes_sorted = self.input.nodes.into_sorted(); + let hashed_state_sorted = self.input.state.into_sorted(); + let prefix_sets = self.input.prefix_sets.freeze(); let storage_root_targets = StorageRootTargets::new( - self.hashed_state.accounts.keys().copied(), + prefix_sets.account_prefix_set.iter().map(|nibbles| B256::from_slice(&nibbles.pack())), prefix_sets.storage_prefix_sets, ); - let hashed_state_sorted = self.hashed_state.into_sorted(); // Pre-calculate storage roots in parallel for accounts which were changed. tracker.set_precomputed_storage_roots(storage_root_targets.len() as u64); @@ -91,7 +92,10 @@ where .into_par_iter() .map(|(hashed_address, prefix_set)| { let provider_ro = self.view.provider_ro()?; - let trie_cursor_factory = DatabaseTrieCursorFactory::new(provider_ro.tx_ref()); + let trie_cursor_factory = InMemoryTrieCursorFactory::new( + DatabaseTrieCursorFactory::new(provider_ro.tx_ref()), + &trie_nodes_sorted, + ); let hashed_cursor_factory = HashedPostStateCursorFactory::new( DatabaseHashedCursorFactory::new(provider_ro.tx_ref()), &hashed_state_sorted, @@ -113,11 +117,14 @@ where let mut trie_updates = TrieUpdates::default(); let provider_ro = self.view.provider_ro()?; + let trie_cursor_factory = InMemoryTrieCursorFactory::new( + DatabaseTrieCursorFactory::new(provider_ro.tx_ref()), + &trie_nodes_sorted, + ); let hashed_cursor_factory = HashedPostStateCursorFactory::new( DatabaseHashedCursorFactory::new(provider_ro.tx_ref()), &hashed_state_sorted, ); - let trie_cursor_factory = DatabaseTrieCursorFactory::new(provider_ro.tx_ref()); let walker = TrieWalker::new( trie_cursor_factory.account_trie_cursor().map_err(ProviderError::Database)?, @@ -219,10 +226,11 @@ impl From for ProviderError { #[cfg(test)] mod tests { use super::*; + use alloy_primitives::{keccak256, Address, U256}; use rand::Rng; - use reth_primitives::{keccak256, Account, Address, StorageEntry, U256}; + use reth_primitives::{Account, StorageEntry}; use reth_provider::{test_utils::create_test_provider_factory, HashingWriter}; - use reth_trie::{test_utils, HashedStorage}; + use reth_trie::{test_utils, HashedPostState, HashedStorage}; #[tokio::test] async fn random_parallel_root() { @@ -270,7 +278,7 @@ mod tests { } assert_eq!( - ParallelStateRoot::new(consistent_view.clone(), HashedPostState::default()) + ParallelStateRoot::new(consistent_view.clone(), Default::default()) .incremental_root() .unwrap(), test_utils::state_root(state.clone()) @@ -302,7 +310,9 @@ mod tests { } assert_eq!( - ParallelStateRoot::new(consistent_view, hashed_state).incremental_root().unwrap(), + ParallelStateRoot::new(consistent_view, TrieInput::from_state(hashed_state)) + .incremental_root() + .unwrap(), test_utils::state_root(state) ); } diff --git a/crates/trie/parallel/src/storage_root_targets.rs b/crates/trie/parallel/src/storage_root_targets.rs index d34441d3be..8325fbcf72 100644 --- a/crates/trie/parallel/src/storage_root_targets.rs +++ b/crates/trie/parallel/src/storage_root_targets.rs @@ -1,5 +1,5 @@ +use alloy_primitives::B256; use derive_more::{Deref, DerefMut}; -use reth_primitives::B256; use reth_trie::prefix_set::PrefixSet; use std::collections::HashMap; diff --git a/crates/trie/prefetch/src/prefetch.rs b/crates/trie/prefetch/src/prefetch.rs index 1bb70c3cd7..24a4d368d9 100644 --- a/crates/trie/prefetch/src/prefetch.rs +++ b/crates/trie/prefetch/src/prefetch.rs @@ -1,8 +1,9 @@ use rayon::prelude::*; -use reth_db::database::Database; use reth_execution_errors::StorageRootError; use reth_primitives::{revm_primitives::EvmState, B256}; -use reth_provider::{providers::ConsistentDbView, ProviderError, ProviderFactory}; +use reth_provider::{ + providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, ProviderError, +}; use reth_trie::{ hashed_cursor::{HashedCursorFactory, HashedPostStateCursorFactory}, metrics::TrieRootMetrics, @@ -52,13 +53,13 @@ impl TriePrefetch { } /// Run the prefetching task. - pub async fn run( + pub async fn run( &mut self, - consistent_view: Arc>>, + consistent_view: Arc>, mut prefetch_rx: UnboundedReceiver, mut interrupt_rx: Receiver<()>, ) where - DB: Database + 'static, + Factory: DatabaseProviderFactory + Send + Sync + 'static, { let mut join_set = JoinSet::new(); @@ -71,7 +72,7 @@ impl TriePrefetch { let self_clone = Arc::new(self.clone()); join_set.spawn(async move { - if let Err(e) = self_clone.prefetch_once::(consistent_view, hashed_state).await { + if let Err(e) = self_clone.prefetch_once(consistent_view, hashed_state).await { debug!(target: "trie::trie_prefetch", ?e, "Error while prefetching trie storage"); }; }); @@ -137,13 +138,13 @@ impl TriePrefetch { } /// Prefetch trie storage for the given hashed state. - pub async fn prefetch_once( + pub async fn prefetch_once( self: Arc, - consistent_view: Arc>>, + consistent_view: Arc>, hashed_state: HashedPostState, ) -> Result<(), TriePrefetchError> where - DB: Database, + Factory: DatabaseProviderFactory + Send + Sync + 'static, { let mut tracker = TrieTracker::default(); diff --git a/crates/trie/trie/Cargo.toml b/crates/trie/trie/Cargo.toml index 54e7d6e9d2..12cf9ac1cb 100644 --- a/crates/trie/trie/Cargo.toml +++ b/crates/trie/trie/Cargo.toml @@ -23,6 +23,7 @@ revm.workspace = true # alloy alloy-rlp.workspace = true +alloy-primitives.workspace = true # tracing tracing.workspace = true @@ -47,7 +48,6 @@ serde = { workspace = true, optional = true } # reth reth-chainspec.workspace = true reth-primitives = { workspace = true, features = ["test-utils", "arbitrary"] } -reth-provider = { workspace = true, features = ["test-utils"] } reth-trie-common = { workspace = true, features = ["test-utils", "arbitrary"] } # trie @@ -61,9 +61,7 @@ tokio = { workspace = true, default-features = false, features = [ "rt", "macros", ] } -tokio-stream.workspace = true serde_json.workspace = true -similar-asserts.workspace = true criterion.workspace = true [features] diff --git a/crates/trie/trie/benches/hash_post_state.rs b/crates/trie/trie/benches/hash_post_state.rs index 636ce44621..49759f14a9 100644 --- a/crates/trie/trie/benches/hash_post_state.rs +++ b/crates/trie/trie/benches/hash_post_state.rs @@ -1,7 +1,7 @@ #![allow(missing_docs, unreachable_pub)] +use alloy_primitives::{keccak256, Address, B256, U256}; use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; use proptest::{prelude::*, strategy::ValueTree, test_runner::TestRunner}; -use reth_primitives::{keccak256, Address, B256, U256}; use reth_trie::{HashedPostState, HashedStorage}; use revm::db::{states::BundleBuilder, BundleAccount}; use std::collections::HashMap; diff --git a/crates/trie/trie/benches/trie_root.rs b/crates/trie/trie/benches/trie_root.rs index 3f7efecc3a..ad16993646 100644 --- a/crates/trie/trie/benches/trie_root.rs +++ b/crates/trie/trie/benches/trie_root.rs @@ -1,8 +1,9 @@ #![allow(missing_docs, unreachable_pub)] +use alloy_primitives::B256; use criterion::{black_box, criterion_group, criterion_main, Criterion}; use proptest::{prelude::*, strategy::ValueTree, test_runner::TestRunner}; use proptest_arbitrary_interop::arb; -use reth_primitives::{ReceiptWithBloom, B256}; +use reth_primitives::ReceiptWithBloom; use reth_trie::triehash::KeccakHasher; /// Benchmarks different implementations of the root calculation. diff --git a/crates/trie/trie/src/hashed_cursor/mod.rs b/crates/trie/trie/src/hashed_cursor/mod.rs index b4961deae3..9b539cdce7 100644 --- a/crates/trie/trie/src/hashed_cursor/mod.rs +++ b/crates/trie/trie/src/hashed_cursor/mod.rs @@ -1,4 +1,5 @@ -use reth_primitives::{Account, B256, U256}; +use alloy_primitives::{B256, U256}; +use reth_primitives::Account; use reth_storage_errors::db::DatabaseError; /// Implementation of hashed state cursor traits for the post state. diff --git a/crates/trie/trie/src/hashed_cursor/post_state.rs b/crates/trie/trie/src/hashed_cursor/post_state.rs index e93795533d..53a2cdb3bb 100644 --- a/crates/trie/trie/src/hashed_cursor/post_state.rs +++ b/crates/trie/trie/src/hashed_cursor/post_state.rs @@ -3,7 +3,8 @@ use crate::{ forward_cursor::ForwardInMemoryCursor, HashedAccountsSorted, HashedPostStateSorted, HashedStorageSorted, }; -use reth_primitives::{Account, B256, U256}; +use alloy_primitives::{B256, U256}; +use reth_primitives::Account; use reth_storage_errors::db::DatabaseError; use std::collections::HashSet; diff --git a/crates/trie/trie/src/input.rs b/crates/trie/trie/src/input.rs new file mode 100644 index 0000000000..18f9ada2f4 --- /dev/null +++ b/crates/trie/trie/src/input.rs @@ -0,0 +1,75 @@ +use crate::{prefix_set::TriePrefixSetsMut, updates::TrieUpdates, HashedPostState}; + +/// Inputs for trie-related computations. +#[derive(Default, Debug)] +pub struct TrieInput { + /// The collection of cached in-memory intermediate trie nodes that + /// can be reused for computation. + pub nodes: TrieUpdates, + /// The in-memory overlay hashed state. + pub state: HashedPostState, + /// The collection of prefix sets for the computation. Since the prefix sets _always_ + /// invalidate the in-memory nodes, not all keys from `self.state` might be present here, + /// if we have cached nodes for them. + pub prefix_sets: TriePrefixSetsMut, +} + +impl TrieInput { + /// Create new trie input. + pub const fn new( + nodes: TrieUpdates, + state: HashedPostState, + prefix_sets: TriePrefixSetsMut, + ) -> Self { + Self { nodes, state, prefix_sets } + } + + /// Create new trie input from in-memory state. The prefix sets will be constructed and + /// set automatically. + pub fn from_state(state: HashedPostState) -> Self { + let prefix_sets = state.construct_prefix_sets(); + Self { nodes: TrieUpdates::default(), state, prefix_sets } + } + + /// Prepend state to the input and extend the prefix sets. + pub fn prepend(&mut self, mut state: HashedPostState) { + self.prefix_sets.extend(state.construct_prefix_sets()); + std::mem::swap(&mut self.state, &mut state); + self.state.extend(state); + } + + /// Prepend intermediate nodes and state to the input. + /// Prefix sets for incoming state will be ignored. + pub fn prepend_cached(&mut self, mut nodes: TrieUpdates, mut state: HashedPostState) { + std::mem::swap(&mut self.nodes, &mut nodes); + self.nodes.extend(nodes); + std::mem::swap(&mut self.state, &mut state); + self.state.extend(state); + } + + /// Append state to the input and extend the prefix sets. + pub fn append(&mut self, state: HashedPostState) { + self.prefix_sets.extend(state.construct_prefix_sets()); + self.state.extend(state); + } + + /// Append state to the input by reference and extend the prefix sets. + pub fn append_ref(&mut self, state: &HashedPostState) { + self.prefix_sets.extend(state.construct_prefix_sets()); + self.state.extend_ref(state); + } + + /// Append intermediate nodes and state to the input. + /// Prefix sets for incoming state will be ignored. + pub fn append_cached(&mut self, nodes: TrieUpdates, state: HashedPostState) { + self.nodes.extend(nodes); + self.state.extend(state); + } + + /// Append intermediate nodes and state to the input by reference. + /// Prefix sets for incoming state will be ignored. + pub fn append_cached_ref(&mut self, nodes: &TrieUpdates, state: &HashedPostState) { + self.nodes.extend_ref(nodes); + self.state.extend_ref(state); + } +} diff --git a/crates/trie/trie/src/lib.rs b/crates/trie/trie/src/lib.rs index 600b630013..317ec36554 100644 --- a/crates/trie/trie/src/lib.rs +++ b/crates/trie/trie/src/lib.rs @@ -36,6 +36,10 @@ pub mod node_iter; mod state; pub use state::*; +/// Input for trie computation. +mod input; +pub use input::TrieInput; + /// Merkle proof generation. pub mod proof; diff --git a/crates/trie/trie/src/node_iter.rs b/crates/trie/trie/src/node_iter.rs index ef63a60d1d..feebe36e16 100644 --- a/crates/trie/trie/src/node_iter.rs +++ b/crates/trie/trie/src/node_iter.rs @@ -1,5 +1,5 @@ use crate::{hashed_cursor::HashedCursor, trie_cursor::TrieCursor, walker::TrieWalker, Nibbles}; -use reth_primitives::B256; +use alloy_primitives::B256; use reth_storage_errors::db::DatabaseError; /// Represents a branch node in the trie. diff --git a/crates/trie/trie/src/prefix_set.rs b/crates/trie/trie/src/prefix_set.rs index fa14464241..4997228050 100644 --- a/crates/trie/trie/src/prefix_set.rs +++ b/crates/trie/trie/src/prefix_set.rs @@ -1,5 +1,5 @@ use crate::Nibbles; -use reth_primitives::B256; +use alloy_primitives::B256; use std::{ collections::{HashMap, HashSet}, sync::Arc, diff --git a/crates/trie/trie/src/progress.rs b/crates/trie/trie/src/progress.rs index 1594d7f6e0..25195b48ad 100644 --- a/crates/trie/trie/src/progress.rs +++ b/crates/trie/trie/src/progress.rs @@ -1,5 +1,5 @@ use crate::{hash_builder::HashBuilder, trie_cursor::CursorSubNode, updates::TrieUpdates}; -use reth_primitives::B256; +use alloy_primitives::B256; use reth_stages_types::MerkleCheckpoint; /// The progress of the state root computation. diff --git a/crates/trie/trie/src/proof.rs b/crates/trie/trie/src/proof.rs index 5c85fd040b..8b9d2f9d09 100644 --- a/crates/trie/trie/src/proof.rs +++ b/crates/trie/trie/src/proof.rs @@ -6,13 +6,13 @@ use crate::{ walker::TrieWalker, HashBuilder, Nibbles, }; +use alloy_primitives::{keccak256, Address, B256}; use alloy_rlp::{BufMut, Encodable}; use reth_execution_errors::trie::StateProofError; -use reth_primitives::{keccak256, Address, B256}; use reth_trie_common::{ proof::ProofRetainer, AccountProof, MultiProof, StorageMultiProof, TrieAccount, }; -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; /// A struct for generating merkle proofs. /// @@ -28,7 +28,7 @@ pub struct Proof { /// A set of prefix sets that have changes. prefix_sets: TriePrefixSetsMut, /// Proof targets. - targets: HashMap>, + targets: HashMap>, } impl Proof { @@ -42,6 +42,16 @@ impl Proof { } } + /// Set the trie cursor factory. + pub fn with_trie_cursor_factory(self, trie_cursor_factory: TF) -> Proof { + Proof { + trie_cursor_factory, + hashed_cursor_factory: self.hashed_cursor_factory, + prefix_sets: self.prefix_sets, + targets: self.targets, + } + } + /// Set the hashed cursor factory. pub fn with_hashed_cursor_factory(self, hashed_cursor_factory: HF) -> Proof { Proof { @@ -58,8 +68,13 @@ impl Proof { self } + /// Set the target account and slots. + pub fn with_target(self, target: (B256, HashSet)) -> Self { + self.with_targets(HashMap::from([target])) + } + /// Set the target accounts and slots. - pub fn with_targets(mut self, targets: HashMap>) -> Self { + pub fn with_targets(mut self, targets: HashMap>) -> Self { self.targets = targets; self } @@ -77,10 +92,7 @@ where slots: &[B256], ) -> Result { Ok(self - .with_targets(HashMap::from([( - keccak256(address), - slots.iter().map(keccak256).collect(), - )])) + .with_target((keccak256(address), slots.iter().map(keccak256).collect())) .multiproof()? .account_proof(address, slots)?) } diff --git a/crates/trie/trie/src/state.rs b/crates/trie/trie/src/state.rs index a5e58341ff..f555d4962e 100644 --- a/crates/trie/trie/src/state.rs +++ b/crates/trie/trie/src/state.rs @@ -1,13 +1,13 @@ +use alloy_primitives::{keccak256, Address, B256, U256}; +use itertools::Itertools; +use rayon::prelude::{IntoParallelIterator, ParallelIterator}; +use reth_primitives::Account; +use revm::db::{states::CacheAccount, AccountStatus, BundleAccount}; use std::{ borrow::Cow, collections::{hash_map, HashMap, HashSet}, }; -use itertools::Itertools; -use rayon::prelude::{IntoParallelIterator, ParallelIterator}; -use reth_primitives::{keccak256, Account, Address, B256, U256}; -use revm::db::{states::CacheAccount, AccountStatus, BundleAccount}; - use crate::{ prefix_set::{PrefixSetMut, TriePrefixSetsMut}, Nibbles, diff --git a/crates/trie/trie/src/test_utils.rs b/crates/trie/trie/src/test_utils.rs index e2fc1f192c..0d0462be90 100644 --- a/crates/trie/trie/src/test_utils.rs +++ b/crates/trie/trie/src/test_utils.rs @@ -1,5 +1,6 @@ +use alloy_primitives::{Address, B256, U256}; use alloy_rlp::encode_fixed_size; -use reth_primitives::{Account, Address, B256, U256}; +use reth_primitives::Account; use reth_trie_common::{triehash::KeccakHasher, TrieAccount}; /// Re-export of [triehash]. diff --git a/crates/trie/trie/src/trie.rs b/crates/trie/trie/src/trie.rs index 50f363f678..914f6ce601 100644 --- a/crates/trie/trie/src/trie.rs +++ b/crates/trie/trie/src/trie.rs @@ -9,9 +9,10 @@ use crate::{ walker::TrieWalker, HashBuilder, Nibbles, TrieAccount, }; +use alloy_primitives::{keccak256, Address, B256}; use alloy_rlp::{BufMut, Encodable}; use reth_execution_errors::{StateRootError, StorageRootError}; -use reth_primitives::{constants::EMPTY_ROOT_HASH, keccak256, Address, B256}; +use reth_primitives::constants::EMPTY_ROOT_HASH; use tracing::trace; #[cfg(feature = "metrics")] @@ -24,7 +25,7 @@ pub struct StateRoot { pub trie_cursor_factory: T, /// The factory for hashed cursors. pub hashed_cursor_factory: H, - /// A set of prefix sets that have changes. + /// A set of prefix sets that have changed. pub prefix_sets: TriePrefixSets, /// Previous intermediate state. previous_state: Option, diff --git a/crates/trie/trie/src/trie_cursor/in_memory.rs b/crates/trie/trie/src/trie_cursor/in_memory.rs index 4159606c49..ccb797cde8 100644 --- a/crates/trie/trie/src/trie_cursor/in_memory.rs +++ b/crates/trie/trie/src/trie_cursor/in_memory.rs @@ -3,7 +3,7 @@ use crate::{ forward_cursor::ForwardInMemoryCursor, updates::{StorageTrieUpdatesSorted, TrieUpdatesSorted}, }; -use reth_primitives::B256; +use alloy_primitives::B256; use reth_storage_errors::db::DatabaseError; use reth_trie_common::{BranchNodeCompact, Nibbles}; use std::collections::HashSet; diff --git a/crates/trie/trie/src/trie_cursor/mod.rs b/crates/trie/trie/src/trie_cursor/mod.rs index 3e3b408ed7..c6d42642db 100644 --- a/crates/trie/trie/src/trie_cursor/mod.rs +++ b/crates/trie/trie/src/trie_cursor/mod.rs @@ -1,5 +1,5 @@ use crate::{BranchNodeCompact, Nibbles}; -use reth_primitives::B256; +use alloy_primitives::B256; use reth_storage_errors::db::DatabaseError; /// In-memory implementations of trie cursors. diff --git a/crates/trie/trie/src/trie_cursor/noop.rs b/crates/trie/trie/src/trie_cursor/noop.rs index 94670cf7f6..f3239b5810 100644 --- a/crates/trie/trie/src/trie_cursor/noop.rs +++ b/crates/trie/trie/src/trie_cursor/noop.rs @@ -1,6 +1,6 @@ use super::{TrieCursor, TrieCursorFactory}; use crate::{BranchNodeCompact, Nibbles}; -use reth_primitives::B256; +use alloy_primitives::B256; use reth_storage_errors::db::DatabaseError; /// Noop trie cursor factory. diff --git a/crates/trie/trie/src/trie_cursor/subnode.rs b/crates/trie/trie/src/trie_cursor/subnode.rs index 5385f6e312..c2ba839ebf 100644 --- a/crates/trie/trie/src/trie_cursor/subnode.rs +++ b/crates/trie/trie/src/trie_cursor/subnode.rs @@ -1,5 +1,5 @@ use crate::{BranchNodeCompact, Nibbles, StoredSubNode, CHILD_INDEX_RANGE}; -use reth_primitives::B256; +use alloy_primitives::B256; /// Cursor for iterating over a subtrie. #[derive(Clone)] diff --git a/crates/trie/trie/src/updates.rs b/crates/trie/trie/src/updates.rs index 10bf036a95..9cb42fd68c 100644 --- a/crates/trie/trie/src/updates.rs +++ b/crates/trie/trie/src/updates.rs @@ -1,5 +1,5 @@ use crate::{walker::TrieWalker, BranchNodeCompact, HashBuilder, Nibbles}; -use reth_primitives::B256; +use alloy_primitives::B256; use std::collections::{HashMap, HashSet}; /// The aggregation of trie updates. diff --git a/crates/trie/trie/src/walker.rs b/crates/trie/trie/src/walker.rs index 76dde9f0c9..e75a96d0f1 100644 --- a/crates/trie/trie/src/walker.rs +++ b/crates/trie/trie/src/walker.rs @@ -3,7 +3,7 @@ use crate::{ trie_cursor::{CursorSubNode, TrieCursor}, BranchNodeCompact, Nibbles, }; -use reth_primitives::B256; +use alloy_primitives::B256; use reth_storage_errors::db::DatabaseError; use std::collections::HashSet; diff --git a/crates/trie/trie/src/witness.rs b/crates/trie/trie/src/witness.rs index 1914ba8848..0d08396160 100644 --- a/crates/trie/trie/src/witness.rs +++ b/crates/trie/trie/src/witness.rs @@ -2,10 +2,11 @@ use crate::{ hashed_cursor::HashedCursorFactory, prefix_set::TriePrefixSetsMut, proof::Proof, trie_cursor::TrieCursorFactory, HashedPostState, }; +use alloy_primitives::{keccak256, Bytes, B256}; use alloy_rlp::{BufMut, Decodable, Encodable}; use itertools::Either; use reth_execution_errors::{StateProofError, TrieWitnessError}; -use reth_primitives::{constants::EMPTY_ROOT_HASH, keccak256, Bytes, B256}; +use reth_primitives::constants::EMPTY_ROOT_HASH; use reth_trie_common::{ BranchNode, HashBuilder, Nibbles, TrieAccount, TrieNode, CHILD_INDEX_RANGE, }; @@ -35,6 +36,16 @@ impl TrieWitness { } } + /// Set the trie cursor factory. + pub fn with_trie_cursor_factory(self, trie_cursor_factory: TF) -> TrieWitness { + TrieWitness { + trie_cursor_factory, + hashed_cursor_factory: self.hashed_cursor_factory, + prefix_sets: self.prefix_sets, + witness: self.witness, + } + } + /// Set the hashed cursor factory. pub fn with_hashed_cursor_factory(self, hashed_cursor_factory: HF) -> TrieWitness { TrieWitness { @@ -68,13 +79,15 @@ where state: HashedPostState, ) -> Result, TrieWitnessError> { let proof_targets = HashMap::from_iter( - state.accounts.keys().map(|hashed_address| (*hashed_address, Vec::new())).chain( - state.storages.iter().map(|(hashed_address, storage)| { + state + .accounts + .keys() + .map(|hashed_address| (*hashed_address, HashSet::default())) + .chain(state.storages.iter().map(|(hashed_address, storage)| { (*hashed_address, storage.storage.keys().copied().collect()) - }), - ), + })), ); - let account_multiproof = + let mut account_multiproof = Proof::new(self.trie_cursor_factory.clone(), self.hashed_cursor_factory.clone()) .with_prefix_sets_mut(self.prefix_sets.clone()) .with_targets(proof_targets.clone()) @@ -85,11 +98,8 @@ where let mut account_rlp = Vec::with_capacity(128); let mut account_trie_nodes = BTreeMap::default(); for (hashed_address, hashed_slots) in proof_targets { - let key = Nibbles::unpack(hashed_address); - let storage_multiproof = account_multiproof - .storages - .get(&hashed_address) - .ok_or(TrieWitnessError::MissingStorageMultiProof(hashed_address))?; + let storage_multiproof = + account_multiproof.storages.remove(&hashed_address).unwrap_or_default(); // Gather and record account trie nodes. let account = state @@ -104,6 +114,7 @@ where } else { None }; + let key = Nibbles::unpack(hashed_address); let proof = account_multiproof.account_subtree.iter().filter(|e| key.starts_with(e.0)); account_trie_nodes.extend(self.target_nodes(key.clone(), value, proof)?); @@ -124,16 +135,17 @@ where )?); } - let root = Self::next_root_from_proofs(storage_trie_nodes, |key: Nibbles| { + Self::next_root_from_proofs(storage_trie_nodes, |key: Nibbles| { // Right pad the target with 0s. let mut padded_key = key.pack(); padded_key.resize(32, 0); + let target_key = B256::from_slice(&padded_key); let mut proof = Proof::new( self.trie_cursor_factory.clone(), self.hashed_cursor_factory.clone(), ) .with_prefix_sets_mut(self.prefix_sets.clone()) - .with_targets(HashMap::from([(B256::from_slice(&padded_key), Vec::new())])) + .with_target((hashed_address, HashSet::from([target_key]))) .storage_multiproof(hashed_address)?; // The subtree only contains the proof for a single target. @@ -142,7 +154,6 @@ where self.witness.insert(keccak256(node.as_ref()), node.clone()); // record in witness Ok(node) })?; - debug_assert_eq!(storage_multiproof.root, root); } Self::next_root_from_proofs(account_trie_nodes, |key: Nibbles| { @@ -152,7 +163,7 @@ where let mut proof = Proof::new(self.trie_cursor_factory.clone(), self.hashed_cursor_factory.clone()) .with_prefix_sets_mut(self.prefix_sets.clone()) - .with_targets(HashMap::from([(B256::from_slice(&padded_key), Vec::new())])) + .with_target((B256::from_slice(&padded_key), HashSet::default())) .multiproof()?; // The subtree only contains the proof for a single target. diff --git a/deny.toml b/deny.toml index 4316984959..e582346025 100644 --- a/deny.toml +++ b/deny.toml @@ -2,8 +2,11 @@ # More documentation for the advisories section can be found here: # https://embarkstudios.github.io/cargo-deny/checks/advisories/cfg.html [advisories] -version = 2 yanked = "warn" +ignore = [ + # proc-macro-error 1.0.4 unmaintained https://rustsec.org/advisories/RUSTSEC-2024-0370 + "RUSTSEC-2024-0370" +] # This section is considered when running `cargo deny check bans`. # More documentation about the 'bans' section can be found here: @@ -39,6 +42,7 @@ allow = [ "BSD-2-Clause", "BSD-3-Clause", "0BSD", + "CC0-1.0", "ISC", "Unicode-DFS-2016", "Unlicense", @@ -53,15 +57,6 @@ allow = [ # Allow 1 or more licenses on a per-crate basis, so that particular licenses # aren't accepted for every possible crate as with the normal allow list exceptions = [ - # CC0 is a permissive license but somewhat unclear status for source code - # so we prefer to not have dependencies using it - # https://tldrlegal.com/license/creative-commons-cc0-1.0-universal - { allow = ["CC0-1.0"], name = "secp256k1" }, - { allow = ["CC0-1.0"], name = "secp256k1-sys" }, - { allow = ["CC0-1.0"], name = "tiny-keccak" }, - { allow = ["CC0-1.0"], name = "more-asserts" }, - { allow = ["CC0-1.0"], name = "to_method" }, - { allow = ["CC0-1.0"], name = "aurora-engine-modexp" }, # TODO: decide on MPL-2.0 handling # These dependencies are grandfathered in in https://github.com/paradigmxyz/reth/pull/6980 { allow = ["MPL-2.0"], name = "option-ext" }, diff --git a/docs/repo/layout.md b/docs/repo/layout.md index 28ea9f473b..ddc1af1325 100644 --- a/docs/repo/layout.md +++ b/docs/repo/layout.md @@ -19,8 +19,10 @@ Generally reth is composed of a few components, with supporting crates. The main - [RPC](#rpc) - [Transports](#transports) - [Common](#common-1) + - [Utilities Crates](#utilities-crates) - [Payloads](#payloads) - [Primitives](#primitives) + - [Optimism](#optimism) - [Misc](#misc) The supporting crates are split into two categories: [primitives](#primitives) and [miscellaneous](#misc). @@ -87,7 +89,10 @@ Different consensus mechanisms. Crates related to transaction execution. -- [`revm`](../../crates/revm): An implementation of an executor using `revm` +- [`revm`](../../crates/revm): Revm utils and implementations specific to reth. +- [`evm`](../../crates/evm): Traits for configuring an EVM specifics. +- [`execution-types`](../../crates/evm/execution-types): Commonly used types for (EVM) block execution. +- [`execution-errors`](../../crates/evm/execution-errors): Commonly used error types used when doing block execution. ### Sync @@ -110,13 +115,15 @@ The RPC component mainly lives in [`rpc/rpc`](../../crates/rpc/rpc), which imple - `txpool_` - `web3_` +These RPC interface is defined in [`rpc/rpc-api`](../../crates/rpc/rpc-api). + The engine API ([`engine_`][engine-spec]) lives in [`rpc/rpc-engine-api`](../../crates/rpc/rpc-engine-api) (this is *not* an interface crate despite the confusing name). There is also a crate to easily configure an RPC server: [`rpc/rpc-builder`](../../crates/rpc/rpc-builder). #### Transports -The RPC component is based on the `jsonrpsee` crate which provides JSONRPC over WebSockets and HTTP. +The RPC component is based on the [`jsonrpsee`][jsonrpsee] crate which provides JSONRPC over WebSockets and HTTP. The IPC transport lives in [`rpc/ipc`](../../crates/rpc/ipc). @@ -126,6 +133,15 @@ The IPC transport lives in [`rpc/ipc`](../../crates/rpc/ipc). - Supported transports: HTTP, WS, IPC - Supported namespaces: `eth_`, `engine_`, `debug_` - [`rpc/rpc-types`](../../crates/rpc/rpc-types): Types relevant for the RPC endpoints above, grouped by namespace +- [`rpc/rpc-eth-api`](../../crates/rpc/rpc-eth-api/): Reth RPC 'eth' namespace API (including interface and implementation), this crate is re-exported by `rpc/rpc-api` +- [`rpc/rpc-eth-types`](../../crates/rpc/rpc-eth-types/): Types `supporting implementation` of 'eth' namespace RPC server API +- [`rpc/rpc-server-types`](../../crates/rpc/rpc-server-types/): RPC server types and constants + +#### Utilities Crates + +- [`rpc/rpc-types-compat`](../../crates/rpc-types-compat): This crate various helper functions to convert between reth primitive types and rpc types. +- [`rpc/layer`](../../crates/rpc/rpc-layer/): Some RPC middleware layers (e.g. `AuthValidator`, `JwtAuthValidator`) +- [`rpc/rpc-testing-util`](../../crates/rpc/rpc-testing-util/): Reth RPC testing helpers ### Payloads @@ -140,8 +156,13 @@ Crates related to building and validating payloads (blocks). These crates define primitive types or algorithms. - [`primitives`](../../crates/primitives): Commonly used types in Reth. +- [`primitives-traits`](../../crates/primitives-traits/): Common abstracted types in reth. - [`trie`](../../crates/trie): An implementation of a Merkle Patricia Trie used for various roots (e.g. the state root) in Ethereum. +### Optimism + +Crates related to the Optimism rollup are lives in [optimism](../../crates/optimism/). + ### Misc Small utility crates. diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index b32ac3553c..addd179303 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -129,7 +129,8 @@ "value": null } ] - } + }, + "unitScale": true }, "overrides": [] }, @@ -161,6 +162,77 @@ "wideLayout": true }, "pluginVersion": "11.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_chain_spec{instance=~\"$instance\"}", + "instant": true, + "legendFormat": "{{name}}", + "range": false, + "refId": "A" + } + ], + "title": "Chain", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unitScale": true + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 2, + "x": 3, + "y": 1 + }, + "id": 240, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "11.1.0", "targets": [ { "datasource": { @@ -199,14 +271,15 @@ "value": null } ] - } + }, + "unitScale": true }, "overrides": [] }, "gridPos": { "h": 3, - "w": 6, - "x": 3, + "w": 5, + "x": 5, "y": 1 }, "id": 192, @@ -269,14 +342,15 @@ "value": null } ] - } + }, + "unitScale": true }, "overrides": [] }, "gridPos": { "h": 3, - "w": 3, - "x": 9, + "w": 2, + "x": 10, "y": 1 }, "id": 193, @@ -339,7 +413,8 @@ "value": null } ] - } + }, + "unitScale": true }, "overrides": [] }, @@ -409,7 +484,8 @@ "value": null } ] - } + }, + "unitScale": true }, "overrides": [] }, @@ -479,7 +555,8 @@ "value": null } ] - } + }, + "unitScale": true }, "overrides": [] }, @@ -521,7 +598,7 @@ "exemplar": false, "expr": "reth_info{instance=~\"$instance\"}", "instant": true, - "legendFormat": "{{cargo_features}}", + "legendFormat": "{{cargo_features}} ", "range": false, "refId": "A" } @@ -563,7 +640,8 @@ "value": 30 } ] - } + }, + "unitScale": true }, "overrides": [] }, @@ -630,7 +708,8 @@ "value": null } ] - } + }, + "unitScale": true }, "overrides": [] }, @@ -733,7 +812,8 @@ } ] }, - "unit": "bytes" + "unit": "bytes", + "unitScale": true }, "overrides": [] }, @@ -872,7 +952,8 @@ } ] }, - "unit": "percentunit" + "unit": "percentunit", + "unitScale": true }, "overrides": [] }, @@ -966,7 +1047,8 @@ "value": 80 } ] - } + }, + "unitScale": true }, "overrides": [] }, @@ -1074,7 +1156,8 @@ } ] }, - "unit": "s" + "unit": "s", + "unitScale": true }, "overrides": [] }, @@ -1134,7 +1217,8 @@ "scaleDistribution": { "type": "linear" } - } + }, + "unitScale": true }, "overrides": [] }, @@ -1261,7 +1345,8 @@ } ] }, - "unit": "s" + "unit": "s", + "unitScale": true }, "overrides": [] }, @@ -1357,7 +1442,8 @@ } ] }, - "unit": "s" + "unit": "s", + "unitScale": true }, "overrides": [] }, @@ -1448,7 +1534,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -1548,7 +1635,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -1722,7 +1810,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -1861,7 +1950,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -1981,7 +2071,7 @@ }, "showHeader": true }, - "pluginVersion": "10.3.3", + "pluginVersion": "11.1.0", "targets": [ { "datasource": { @@ -2049,7 +2139,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -2206,7 +2297,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -2314,7 +2406,7 @@ }, "showHeader": true }, - "pluginVersion": "10.3.3", + "pluginVersion": "11.1.0", "targets": [ { "datasource": { @@ -2356,7 +2448,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -2464,7 +2557,7 @@ }, "showHeader": true }, - "pluginVersion": "10.3.3", + "pluginVersion": "11.1.0", "targets": [ { "datasource": { @@ -2532,7 +2625,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -2628,7 +2722,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -2688,7 +2783,7 @@ "panels": [], "repeat": "instance", "repeatDirection": "h", - "title": "Stage: Execution", + "title": "Execution", "type": "row" }, { @@ -2696,7 +2791,7 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "description": "The amount of gas processed by the execution stage in millions per second.\n\nNote: For mainnet, the block range 2,383,397-2,620,384 will be slow because of the 2016 DoS attack.", + "description": "The throughput of the node's executor. The metric is the amount of gas processed in a block, divided by the time it took to process the block.\n\nNote: For mainnet, the block range 2,383,397-2,620,384 will be slow because of the 2016 DoS attack.", "fieldConfig": { "defaults": { "color": { @@ -2739,11 +2834,12 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null } ] }, - "unit": "Mgas/s", + "unit": "si: gas/s", "unitScale": true }, "overrides": [] @@ -2774,49 +2870,132 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "rate(reth_sync_execution_mgas_processed_total{instance=~\"$instance\"}[30s])", - "legendFormat": "Gas/s (30s)", + "expr": "reth_sync_execution_gas_per_second{instance=~\"$instance\"}", + "legendFormat": "Gas/s", "range": true, "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + } + ], + "title": "Execution throughput", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" }, - "editorMode": "builder", - "expr": "rate(reth_sync_execution_mgas_processed_total{instance=~\"$instance\"}[1m])", - "hide": false, - "legendFormat": "Gas/s (1m)", - "range": true, - "refId": "B" + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 25, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "normal" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s", + "unitScale": true }, + "overrides": [] + }, + "gridPos": { + "h": 11, + "w": 24, + "x": 0, + "y": 95 + }, + "id": 242, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "right", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ { "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, + "disableTextWrap": false, "editorMode": "builder", - "expr": "rate(reth_sync_execution_mgas_processed_total{instance=~\"$instance\"}[5m])", - "hide": false, - "legendFormat": "Gas/s (5m)", + "expr": "reth_sync_block_validation_state_root_duration{instance=\"$instance\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "State Root Duration", "range": true, - "refId": "C" + "refId": "A", + "useBackend": false }, { "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, + "disableTextWrap": false, "editorMode": "builder", - "expr": "rate(reth_sync_execution_mgas_processed_total{instance=~\"$instance\"}[10m])", + "expr": "reth_sync_execution_execution_duration{instance=\"$instance\"}", + "fullMetaSearch": false, "hide": false, - "legendFormat": "Gas/s (10m)", + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Execution Duration", "range": true, - "refId": "D" + "refId": "B", + "useBackend": false } ], - "title": "Gas processed", + "title": "Block Processing Latency", "type": "timeseries" }, { @@ -2825,7 +3004,7 @@ "h": 1, "w": 24, "x": 0, - "y": 95 + "y": 106 }, "id": 24, "panels": [], @@ -2881,7 +3060,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -2922,7 +3102,7 @@ "h": 8, "w": 12, "x": 0, - "y": 96 + "y": 107 }, "id": 26, "options": { @@ -3038,7 +3218,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -3055,7 +3236,7 @@ "h": 8, "w": 12, "x": 12, - "y": 96 + "y": 107 }, "id": 33, "options": { @@ -3158,7 +3339,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -3174,7 +3356,7 @@ "h": 8, "w": 12, "x": 0, - "y": 104 + "y": 115 }, "id": 36, "options": { @@ -3223,7 +3405,7 @@ "h": 1, "w": 24, "x": 0, - "y": 112 + "y": 123 }, "id": 32, "panels": [], @@ -3280,7 +3462,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -3330,7 +3513,7 @@ "h": 8, "w": 12, "x": 0, - "y": 113 + "y": 124 }, "id": 30, "options": { @@ -3482,7 +3665,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null } ] }, @@ -3495,7 +3679,7 @@ "h": 8, "w": 12, "x": 12, - "y": 113 + "y": 124 }, "id": 28, "options": { @@ -3598,7 +3782,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -3614,7 +3799,7 @@ "h": 8, "w": 12, "x": 0, - "y": 121 + "y": 132 }, "id": 35, "options": { @@ -3705,7 +3890,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -3739,7 +3925,7 @@ "h": 8, "w": 12, "x": 12, - "y": 121 + "y": 132 }, "id": 73, "options": { @@ -3831,7 +4017,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -3865,7 +4052,7 @@ "h": 8, "w": 12, "x": 0, - "y": 129 + "y": 140 }, "id": 102, "options": { @@ -3928,7 +4115,7 @@ "h": 1, "w": 24, "x": 0, - "y": 137 + "y": 148 }, "id": 79, "panels": [], @@ -3985,7 +4172,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -4001,7 +4189,7 @@ "h": 8, "w": 12, "x": 0, - "y": 138 + "y": 149 }, "id": 74, "options": { @@ -4081,7 +4269,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -4097,7 +4286,7 @@ "h": 8, "w": 12, "x": 12, - "y": 138 + "y": 149 }, "id": 80, "options": { @@ -4177,7 +4366,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -4193,7 +4383,7 @@ "h": 8, "w": 12, "x": 0, - "y": 146 + "y": 157 }, "id": 81, "options": { @@ -4272,7 +4462,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -4289,7 +4480,7 @@ "h": 8, "w": 12, "x": 12, - "y": 146 + "y": 157 }, "id": 114, "options": { @@ -4368,7 +4559,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -4385,7 +4577,7 @@ "h": 8, "w": 12, "x": 12, - "y": 154 + "y": 165 }, "id": 190, "options": { @@ -4423,7 +4615,7 @@ "h": 1, "w": 24, "x": 0, - "y": 162 + "y": 173 }, "id": 87, "panels": [], @@ -4480,7 +4672,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -4496,7 +4689,7 @@ "h": 8, "w": 12, "x": 0, - "y": 163 + "y": 174 }, "id": 83, "options": { @@ -4575,7 +4768,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -4591,7 +4785,7 @@ "h": 8, "w": 12, "x": 12, - "y": 163 + "y": 174 }, "id": 84, "options": { @@ -4682,7 +4876,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -4698,7 +4893,7 @@ "h": 8, "w": 12, "x": 0, - "y": 171 + "y": 182 }, "id": 85, "options": { @@ -4777,7 +4972,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -4794,7 +4990,7 @@ "h": 8, "w": 12, "x": 12, - "y": 171 + "y": 182 }, "id": 210, "options": { @@ -5101,7 +5297,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -5118,7 +5315,7 @@ "h": 8, "w": 12, "x": 0, - "y": 179 + "y": 190 }, "id": 211, "options": { @@ -5425,7 +5622,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -5442,7 +5640,7 @@ "h": 8, "w": 12, "x": 12, - "y": 179 + "y": 190 }, "id": 212, "options": { @@ -5628,7 +5826,7 @@ "h": 1, "w": 24, "x": 0, - "y": 187 + "y": 198 }, "id": 214, "panels": [], @@ -5682,7 +5880,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -5699,7 +5898,7 @@ "h": 8, "w": 12, "x": 0, - "y": 188 + "y": 199 }, "id": 215, "options": { @@ -5778,7 +5977,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -5794,7 +5994,7 @@ "h": 8, "w": 12, "x": 12, - "y": 188 + "y": 199 }, "id": 216, "options": { @@ -5845,7 +6045,7 @@ "h": 1, "w": 24, "x": 0, - "y": 196 + "y": 207 }, "id": 68, "panels": [], @@ -5902,7 +6102,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -5918,7 +6119,7 @@ "h": 8, "w": 12, "x": 0, - "y": 197 + "y": 208 }, "id": 60, "options": { @@ -5997,7 +6198,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -6013,7 +6215,7 @@ "h": 8, "w": 12, "x": 12, - "y": 197 + "y": 208 }, "id": 62, "options": { @@ -6092,7 +6294,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -6108,7 +6311,7 @@ "h": 8, "w": 12, "x": 0, - "y": 205 + "y": 216 }, "id": 64, "options": { @@ -6145,7 +6348,7 @@ "h": 1, "w": 24, "x": 0, - "y": 213 + "y": 224 }, "id": 97, "panels": [], @@ -6199,7 +6402,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -6229,7 +6433,7 @@ "h": 8, "w": 12, "x": 0, - "y": 214 + "y": 225 }, "id": 98, "options": { @@ -6374,7 +6578,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -6391,7 +6596,7 @@ "h": 8, "w": 12, "x": 12, - "y": 214 + "y": 225 }, "id": 101, "options": { @@ -6471,7 +6676,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -6488,7 +6694,7 @@ "h": 8, "w": 12, "x": 0, - "y": 222 + "y": 233 }, "id": 99, "options": { @@ -6568,7 +6774,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -6585,7 +6792,7 @@ "h": 8, "w": 12, "x": 12, - "y": 222 + "y": 233 }, "id": 100, "options": { @@ -6623,7 +6830,7 @@ "h": 1, "w": 24, "x": 0, - "y": 230 + "y": 241 }, "id": 105, "panels": [], @@ -6678,7 +6885,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -6695,7 +6903,7 @@ "h": 8, "w": 12, "x": 0, - "y": 231 + "y": 242 }, "id": 106, "options": { @@ -6775,7 +6983,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -6792,7 +7001,7 @@ "h": 8, "w": 12, "x": 12, - "y": 231 + "y": 242 }, "id": 107, "options": { @@ -6871,7 +7080,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -6888,7 +7098,7 @@ "h": 8, "w": 12, "x": 0, - "y": 239 + "y": 250 }, "id": 217, "options": { @@ -6926,7 +7136,7 @@ "h": 1, "w": 24, "x": 0, - "y": 247 + "y": 258 }, "id": 108, "panels": [], @@ -6981,7 +7191,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -7023,7 +7234,7 @@ "h": 8, "w": 12, "x": 0, - "y": 248 + "y": 259 }, "id": 109, "options": { @@ -7086,7 +7297,7 @@ "h": 8, "w": 12, "x": 12, - "y": 248 + "y": 259 }, "id": 111, "maxDataPoints": 25, @@ -7131,7 +7342,7 @@ "unit": "percentunit" } }, - "pluginVersion": "10.3.3", + "pluginVersion": "11.1.0", "targets": [ { "datasource": { @@ -7198,7 +7409,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -7215,7 +7427,7 @@ "h": 8, "w": 12, "x": 0, - "y": 256 + "y": 267 }, "id": 120, "options": { @@ -7274,7 +7486,7 @@ "h": 8, "w": 12, "x": 12, - "y": 256 + "y": 267 }, "id": 112, "maxDataPoints": 25, @@ -7319,7 +7531,7 @@ "unit": "percentunit" } }, - "pluginVersion": "10.3.3", + "pluginVersion": "11.1.0", "targets": [ { "datasource": { @@ -7386,7 +7598,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -7427,7 +7640,7 @@ "h": 8, "w": 12, "x": 0, - "y": 264 + "y": 275 }, "id": 198, "options": { @@ -7595,7 +7808,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -7612,7 +7826,7 @@ "h": 8, "w": 12, "x": 12, - "y": 264 + "y": 275 }, "id": 213, "options": { @@ -7651,7 +7865,7 @@ "h": 1, "w": 24, "x": 0, - "y": 272 + "y": 283 }, "id": 236, "panels": [], @@ -7706,7 +7920,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -7722,7 +7937,7 @@ "h": 8, "w": 12, "x": 0, - "y": 273 + "y": 284 }, "id": 237, "options": { @@ -7802,7 +8017,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -7818,7 +8034,7 @@ "h": 8, "w": 12, "x": 12, - "y": 273 + "y": 284 }, "id": 238, "options": { @@ -7898,7 +8114,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -7914,7 +8131,7 @@ "h": 8, "w": 12, "x": 0, - "y": 281 + "y": 292 }, "id": 239, "options": { @@ -8006,7 +8223,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -8022,7 +8240,7 @@ "h": 8, "w": 12, "x": 12, - "y": 281 + "y": 292 }, "id": 219, "options": { @@ -8065,16 +8283,13 @@ "color": { "mode": "palette-classic" }, - "custom": { - "align": "auto", - "filterable": false - }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -8082,7 +8297,8 @@ } ] }, - "unit": "none" + "unit": "none", + "unitScale": true }, "overrides": [] }, @@ -8090,7 +8306,7 @@ "h": 8, "w": 12, "x": 0, - "y": 289 + "y": 300 }, "id": 220, "options": { @@ -8105,9 +8321,11 @@ "fields": "", "values": false }, - "textMode": "auto" + "showPercentChange": false, + "textMode": "auto", + "wideLayout": true }, - "pluginVersion": "8.0.6", + "pluginVersion": "11.1.0", "targets": [ { "datasource": { @@ -8131,7 +8349,7 @@ "h": 1, "w": 24, "x": 0, - "y": 297 + "y": 308 }, "id": 226, "panels": [], @@ -8186,7 +8404,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -8194,7 +8413,8 @@ } ] }, - "unit": "short" + "unit": "short", + "unitScale": true }, "overrides": [ { @@ -8227,7 +8447,7 @@ "h": 8, "w": 12, "x": 0, - "y": 298 + "y": 309 }, "id": 225, "options": { @@ -8313,7 +8533,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -8321,7 +8542,8 @@ } ] }, - "unit": "short" + "unit": "short", + "unitScale": true }, "overrides": [ { @@ -8354,7 +8576,7 @@ "h": 8, "w": 12, "x": 12, - "y": 298 + "y": 309 }, "id": 227, "options": { @@ -8440,7 +8662,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -8448,7 +8671,8 @@ } ] }, - "unit": "short" + "unit": "short", + "unitScale": true }, "overrides": [ { @@ -8481,7 +8705,7 @@ "h": 8, "w": 12, "x": 0, - "y": 306 + "y": 317 }, "id": 235, "options": { @@ -8567,7 +8791,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -8575,7 +8800,8 @@ } ] }, - "unit": "short" + "unit": "short", + "unitScale": true }, "overrides": [ { @@ -8608,7 +8834,7 @@ "h": 8, "w": 12, "x": 12, - "y": 306 + "y": 317 }, "id": 234, "options": { @@ -8685,6 +8911,6 @@ "timezone": "", "title": "Reth", "uid": "2k8BXz24x", - "version": 1, + "version": 3, "weekStart": "" } diff --git a/examples/beacon-api-sidecar-fetcher/src/main.rs b/examples/beacon-api-sidecar-fetcher/src/main.rs index c31153be13..ff66596427 100644 --- a/examples/beacon-api-sidecar-fetcher/src/main.rs +++ b/examples/beacon-api-sidecar-fetcher/src/main.rs @@ -21,13 +21,16 @@ use std::{ use clap::Parser; use futures_util::{stream::FuturesUnordered, StreamExt}; use mined_sidecar::MinedSidecarStream; -use reth::{builder::NodeHandle, cli::Cli, primitives::B256, providers::CanonStateSubscriptions}; +use reth::{ + args::utils::DefaultChainSpecParser, builder::NodeHandle, cli::Cli, primitives::B256, + providers::CanonStateSubscriptions, +}; use reth_node_ethereum::EthereumNode; pub mod mined_sidecar; fn main() { - Cli::::parse() + Cli::::parse() .run(|builder, beacon_config| async move { // launch the node let NodeHandle { node, node_exit_future } = diff --git a/examples/beacon-api-sse/Cargo.toml b/examples/beacon-api-sse/Cargo.toml index 8667ae7ab1..ca49897c9e 100644 --- a/examples/beacon-api-sse/Cargo.toml +++ b/examples/beacon-api-sse/Cargo.toml @@ -13,6 +13,6 @@ alloy-rpc-types-beacon.workspace = true clap.workspace = true futures-util.workspace = true -mev-share-sse = { version = "0.3.0", default-features = false } +mev-share-sse = { version = "0.4.0", default-features = false } tokio = { workspace = true, features = ["time"] } tracing.workspace = true diff --git a/examples/beacon-api-sse/src/main.rs b/examples/beacon-api-sse/src/main.rs index a2d74a77c2..53078e5bc8 100644 --- a/examples/beacon-api-sse/src/main.rs +++ b/examples/beacon-api-sse/src/main.rs @@ -21,13 +21,13 @@ use alloy_rpc_types_beacon::events::PayloadAttributesEvent; use clap::Parser; use futures_util::stream::StreamExt; use mev_share_sse::{client::EventStream, EventClient}; -use reth::cli::Cli; +use reth::{args::utils::DefaultChainSpecParser, cli::Cli}; use reth_node_ethereum::EthereumNode; use std::net::{IpAddr, Ipv4Addr}; use tracing::{info, warn}; fn main() { - Cli::::parse() + Cli::::parse() .run(|builder, args| async move { let handle = builder.node(EthereumNode::default()).launch().await?; diff --git a/examples/bsc-p2p/src/chainspec.rs b/examples/bsc-p2p/src/chainspec.rs index 92bf2f10a3..11c5702332 100644 --- a/examples/bsc-p2p/src/chainspec.rs +++ b/examples/bsc-p2p/src/chainspec.rs @@ -1,5 +1,5 @@ use reth_chainspec::{ - BaseFeeParams, Chain, ChainHardforks, ChainSpec, EthereumHardfork, ForkCondition, + once_cell_set, BaseFeeParams, Chain, ChainHardforks, ChainSpec, EthereumHardfork, ForkCondition, }; use reth_network_peers::NodeRecord; use reth_primitives::{b256, B256}; @@ -14,7 +14,8 @@ pub(crate) fn bsc_chain_spec() -> Arc { ChainSpec { chain: Chain::from_id(56), genesis: serde_json::from_str(include_str!("./genesis.json")).expect("deserialize genesis"), - genesis_hash: Some(GENESIS), + genesis_hash: once_cell_set(GENESIS), + genesis_header: Default::default(), paris_block_and_final_difficulty: None, hardforks: ChainHardforks::new(vec![( EthereumHardfork::Shanghai.boxed(), diff --git a/examples/custom-engine-types/src/main.rs b/examples/custom-engine-types/src/main.rs index 9980be374f..b6bccd2beb 100644 --- a/examples/custom-engine-types/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -27,10 +27,9 @@ use reth::{ api::PayloadTypes, builder::{ components::{ComponentsBuilder, PayloadServiceBuilder}, - node::NodeTypes, + node::{NodeTypes, NodeTypesWithEngine}, BuilderContext, FullNodeTypes, Node, NodeBuilder, PayloadBuilderConfig, }, - primitives::revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}, providers::{CanonStateSubscriptions, StateProviderFactory}, tasks::TaskManager, transaction_pool::TransactionPool, @@ -45,15 +44,18 @@ use reth_node_api::{ validate_version_specific_fields, EngineTypes, PayloadAttributes, PayloadBuilderAttributes, }; use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; -use reth_node_ethereum::node::{ - EthereumAddOns, EthereumConsensusBuilder, EthereumExecutorBuilder, EthereumNetworkBuilder, - EthereumPoolBuilder, +use reth_node_ethereum::{ + node::{ + EthereumAddOns, EthereumConsensusBuilder, EthereumExecutorBuilder, EthereumNetworkBuilder, + EthereumPoolBuilder, + }, + EthEvmConfig, }; use reth_payload_builder::{ error::PayloadBuilderError, EthBuiltPayload, EthPayloadBuilderAttributes, PayloadBuilderHandle, PayloadBuilderService, }; -use reth_primitives::{Address, Header, Withdrawals, B256}; +use reth_primitives::{Address, Withdrawals, B256}; use reth_rpc_types::{ engine::{ ExecutionPayloadEnvelopeV2, ExecutionPayloadEnvelopeV3, ExecutionPayloadEnvelopeV4, @@ -150,14 +152,6 @@ impl PayloadBuilderAttributes for CustomPayloadBuilderAttributes { fn withdrawals(&self) -> &Withdrawals { &self.0.withdrawals } - - fn cfg_and_block_env( - &self, - chain_spec: &ChainSpec, - parent: &Header, - ) -> (CfgEnvWithHandlerCfg, BlockEnv) { - self.0.cfg_and_block_env(chain_spec, parent) - } } /// Custom engine types - uses a custom payload attributes RPC type, but uses the default @@ -194,17 +188,20 @@ struct MyCustomNode; /// Configure the node types impl NodeTypes for MyCustomNode { type Primitives = (); - // use the custom engine types - type Engine = CustomEngineTypes; type ChainSpec = ChainSpec; } +/// Configure the node types with the custom engine types +impl NodeTypesWithEngine for MyCustomNode { + type Engine = CustomEngineTypes; +} + /// Implement the Node trait for the custom node /// /// This provides a preset configuration for the node impl Node for MyCustomNode where - N: FullNodeTypes, + N: FullNodeTypes>, { type ComponentsBuilder = ComponentsBuilder< N, @@ -234,14 +231,16 @@ pub struct CustomPayloadServiceBuilder; impl PayloadServiceBuilder for CustomPayloadServiceBuilder where - Node: FullNodeTypes, + Node: FullNodeTypes< + Types: NodeTypesWithEngine, + >, Pool: TransactionPool + Unpin + 'static, { async fn spawn_payload_service( self, ctx: &BuilderContext, pool: Pool, - ) -> eyre::Result> { + ) -> eyre::Result::Engine>> { let payload_builder = CustomPayloadBuilder::default(); let conf = ctx.payload_builder_config(); @@ -286,24 +285,18 @@ where args: BuildArguments, ) -> Result, PayloadBuilderError> { let BuildArguments { client, pool, cached_reads, config, cancel, best_payload } = args; - let PayloadConfig { - initialized_block_env, - initialized_cfg, - parent_block, - extra_data, - attributes, - chain_spec, - } = config; + let PayloadConfig { parent_block, extra_data, attributes, chain_spec } = config; // This reuses the default EthereumPayloadBuilder to build the payload // but any custom logic can be implemented here - reth_ethereum_payload_builder::EthereumPayloadBuilder::default().try_build(BuildArguments { + reth_ethereum_payload_builder::EthereumPayloadBuilder::new(EthEvmConfig::new( + chain_spec.clone(), + )) + .try_build(BuildArguments { client, pool, cached_reads, config: PayloadConfig { - initialized_block_env, - initialized_cfg, parent_block, extra_data, attributes: attributes.0, @@ -319,16 +312,9 @@ where client: &Client, config: PayloadConfig, ) -> Result { - let PayloadConfig { - initialized_block_env, - initialized_cfg, - parent_block, - extra_data, - attributes, - chain_spec, - } = config; - >::build_empty_payload(&reth_ethereum_payload_builder::EthereumPayloadBuilder::default(),client, - PayloadConfig { initialized_block_env, initialized_cfg, parent_block, extra_data, attributes: attributes.0, chain_spec }) + let PayloadConfig { parent_block, extra_data, attributes, chain_spec } = config; + >::build_empty_payload(&reth_ethereum_payload_builder::EthereumPayloadBuilder::new(EthEvmConfig::new(chain_spec.clone())),client, + PayloadConfig { parent_block, extra_data, attributes: attributes.0, chain_spec }) } } diff --git a/examples/custom-evm/src/main.rs b/examples/custom-evm/src/main.rs index c8b8a20f7a..3a93d85ad6 100644 --- a/examples/custom-evm/src/main.rs +++ b/examples/custom-evm/src/main.rs @@ -4,7 +4,11 @@ use alloy_genesis::Genesis; use reth::{ - builder::{components::ExecutorBuilder, BuilderContext, NodeBuilder}, + builder::{ + components::{ExecutorBuilder, PayloadServiceBuilder}, + BuilderContext, NodeBuilder, + }, + payload::{EthBuiltPayload, EthPayloadBuilderAttributes}, primitives::{ address, revm_primitives::{Env, PrecompileResult}, @@ -14,29 +18,44 @@ use reth::{ handler::register::EvmHandler, inspector_handle_register, precompile::{Precompile, PrecompileOutput, PrecompileSpecId}, + primitives::BlockEnv, ContextPrecompiles, Database, Evm, EvmBuilder, GetInspector, }, + rpc::types::engine::PayloadAttributes, tasks::TaskManager, + transaction_pool::TransactionPool, }; -use reth_chainspec::{Chain, ChainSpec, Head}; +use reth_chainspec::{Chain, ChainSpec}; use reth_evm_ethereum::EthEvmConfig; -use reth_node_api::{ConfigureEvm, ConfigureEvmEnv, FullNodeTypes}; +use reth_node_api::{ + ConfigureEvm, ConfigureEvmEnv, FullNodeTypes, NextBlockEnvAttributes, NodeTypes, + NodeTypesWithEngine, PayloadTypes, +}; use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; use reth_node_ethereum::{ node::{EthereumAddOns, EthereumPayloadBuilder}, EthExecutorProvider, EthereumNode, }; use reth_primitives::{ - revm_primitives::{AnalysisKind, CfgEnvWithHandlerCfg, TxEnv}, + revm_primitives::{CfgEnvWithHandlerCfg, TxEnv}, Address, Header, TransactionSigned, U256, }; use reth_tracing::{RethTracer, Tracer}; use std::sync::Arc; /// Custom EVM configuration -#[derive(Debug, Clone, Copy, Default)] +#[derive(Debug, Clone)] #[non_exhaustive] -pub struct MyEvmConfig; +pub struct MyEvmConfig { + /// Wrapper around mainnet configuration + inner: EthEvmConfig, +} + +impl MyEvmConfig { + pub const fn new(chain_spec: Arc) -> Self { + Self { inner: EthEvmConfig::new(chain_spec) } + } +} impl MyEvmConfig { /// Sets the precompiles to the EVM handler @@ -70,32 +89,10 @@ impl MyEvmConfig { } impl ConfigureEvmEnv for MyEvmConfig { - fn fill_cfg_env( - &self, - cfg_env: &mut CfgEnvWithHandlerCfg, - chain_spec: &ChainSpec, - header: &Header, - total_difficulty: U256, - ) { - let spec_id = reth_evm_ethereum::revm_spec( - chain_spec, - &Head { - number: header.number, - timestamp: header.timestamp, - difficulty: header.difficulty, - total_difficulty, - hash: Default::default(), - }, - ); - - cfg_env.chain_id = chain_spec.chain().id(); - cfg_env.perf_analyse_created_bytecodes = AnalysisKind::Analyse; - - cfg_env.handler_cfg.spec_id = spec_id; - } + type Header = Header; fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { - EthEvmConfig::default().fill_tx_env(tx_env, transaction, sender) + self.inner.fill_tx_env(tx_env, transaction, sender); } fn fill_tx_env_system_contract_call( @@ -105,7 +102,24 @@ impl ConfigureEvmEnv for MyEvmConfig { contract: Address, data: Bytes, ) { - EthEvmConfig::default().fill_tx_env_system_contract_call(env, caller, contract, data) + self.inner.fill_tx_env_system_contract_call(env, caller, contract, data); + } + + fn fill_cfg_env( + &self, + cfg_env: &mut CfgEnvWithHandlerCfg, + header: &Self::Header, + total_difficulty: U256, + ) { + self.inner.fill_cfg_env(cfg_env, header, total_difficulty); + } + + fn next_cfg_and_block_env( + &self, + parent: &Self::Header, + attributes: NextBlockEnvAttributes, + ) -> (CfgEnvWithHandlerCfg, BlockEnv) { + self.inner.next_cfg_and_block_env(parent, attributes) } } @@ -144,7 +158,7 @@ pub struct MyExecutorBuilder; impl ExecutorBuilder for MyExecutorBuilder where - Node: FullNodeTypes, + Node: FullNodeTypes>, { type EVM = MyEvmConfig; type Executor = EthExecutorProvider; @@ -154,12 +168,38 @@ where ctx: &BuilderContext, ) -> eyre::Result<(Self::EVM, Self::Executor)> { Ok(( - MyEvmConfig::default(), - EthExecutorProvider::new(ctx.chain_spec(), MyEvmConfig::default()), + MyEvmConfig::new(ctx.chain_spec()), + EthExecutorProvider::new(ctx.chain_spec(), MyEvmConfig::new(ctx.chain_spec())), )) } } +/// Builds a regular ethereum block executor that uses the custom EVM. +#[derive(Debug, Default, Clone)] +#[non_exhaustive] +pub struct MyPayloadBuilder { + inner: EthereumPayloadBuilder, +} + +impl PayloadServiceBuilder for MyPayloadBuilder +where + Types: NodeTypesWithEngine, + Node: FullNodeTypes, + Pool: TransactionPool + Unpin + 'static, + Types::Engine: PayloadTypes< + BuiltPayload = EthBuiltPayload, + PayloadAttributes = PayloadAttributes, + PayloadBuilderAttributes = EthPayloadBuilderAttributes, + >, +{ + async fn spawn_payload_service( + self, + ctx: &BuilderContext, + pool: Pool, + ) -> eyre::Result> { + self.inner.spawn(MyEvmConfig::new(ctx.chain_spec()), ctx, pool) + } +} #[tokio::main] async fn main() -> eyre::Result<()> { let _guard = RethTracer::new().init()?; @@ -187,7 +227,7 @@ async fn main() -> eyre::Result<()> { .with_components( EthereumNode::components() .executor(MyExecutorBuilder::default()) - .payload(EthereumPayloadBuilder::new(MyEvmConfig::default())), + .payload(MyPayloadBuilder::default()), ) .with_add_ons::() .launch() diff --git a/examples/custom-inspector/src/main.rs b/examples/custom-inspector/src/main.rs index b6721eded6..9d6a95b59e 100644 --- a/examples/custom-inspector/src/main.rs +++ b/examples/custom-inspector/src/main.rs @@ -13,6 +13,7 @@ use clap::Parser; use futures_util::StreamExt; use reth::{ + args::utils::DefaultChainSpecParser, builder::NodeHandle, cli::Cli, primitives::{Address, BlockNumberOrTag, IntoRecoveredTransaction}, @@ -28,7 +29,7 @@ use reth_node_ethereum::node::EthereumNode; use reth_rpc_types::state::EvmOverrides; fn main() { - Cli::::parse() + Cli::::parse() .run(|builder, args| async move { // launch the node let NodeHandle { node, node_exit_future } = diff --git a/examples/custom-node-components/src/main.rs b/examples/custom-node-components/src/main.rs index 2c44e18d85..1faca73d25 100644 --- a/examples/custom-node-components/src/main.rs +++ b/examples/custom-node-components/src/main.rs @@ -3,7 +3,9 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] use reth::{ + api::NodeTypes, builder::{components::PoolBuilder, BuilderContext, FullNodeTypes}, + chainspec::ChainSpec, cli::Cli, providers::CanonStateSubscriptions, transaction_pool::{ @@ -45,7 +47,7 @@ pub struct CustomPoolBuilder { /// This will be used to build the transaction pool and its maintenance tasks during launch. impl PoolBuilder for CustomPoolBuilder where - Node: FullNodeTypes, + Node: FullNodeTypes>, { type Pool = EthTransactionPool; diff --git a/examples/custom-payload-builder/src/main.rs b/examples/custom-payload-builder/src/main.rs index e0bf4e57a0..4861f8284b 100644 --- a/examples/custom-payload-builder/src/main.rs +++ b/examples/custom-payload-builder/src/main.rs @@ -21,7 +21,8 @@ use reth::{ }; use reth_basic_payload_builder::BasicPayloadJobGeneratorConfig; use reth_chainspec::ChainSpec; -use reth_node_ethereum::{node::EthereumAddOns, EthEngineTypes, EthereumNode}; +use reth_node_api::NodeTypesWithEngine; +use reth_node_ethereum::{node::EthereumAddOns, EthEngineTypes, EthEvmConfig, EthereumNode}; use reth_payload_builder::PayloadBuilderService; pub mod generator; @@ -33,14 +34,14 @@ pub struct CustomPayloadBuilder; impl PayloadServiceBuilder for CustomPayloadBuilder where - Node: FullNodeTypes, + Node: FullNodeTypes>, Pool: TransactionPool + Unpin + 'static, { async fn spawn_payload_service( self, ctx: &BuilderContext, pool: Pool, - ) -> eyre::Result> { + ) -> eyre::Result::Engine>> { tracing::info!("Spawning a custom payload builder"); let conf = ctx.payload_builder_config(); @@ -56,7 +57,9 @@ where ctx.task_executor().clone(), payload_job_config, ctx.chain_spec().clone(), - reth_ethereum_payload_builder::EthereumPayloadBuilder::default(), + reth_ethereum_payload_builder::EthereumPayloadBuilder::new(EthEvmConfig::new( + ctx.chain_spec(), + )), ); let (payload_service, payload_builder) = diff --git a/examples/db-access/Cargo.toml b/examples/db-access/Cargo.toml index 692a1175de..6ddfeb5e5b 100644 --- a/examples/db-access/Cargo.toml +++ b/examples/db-access/Cargo.toml @@ -12,6 +12,8 @@ reth-db.workspace = true reth-primitives.workspace = true reth-provider.workspace = true reth-rpc-types.workspace = true +reth-node-ethereum.workspace = true +reth-node-types.workspace = true eyre.workspace = true diff --git a/examples/db-access/src/main.rs b/examples/db-access/src/main.rs index 0a83688013..7886a3bdd2 100644 --- a/examples/db-access/src/main.rs +++ b/examples/db-access/src/main.rs @@ -1,4 +1,6 @@ use reth_chainspec::ChainSpecBuilder; +use reth_node_ethereum::EthereumNode; +use reth_node_types::NodeTypesWithDBAdapter; use reth_primitives::{Address, B256}; use reth_provider::{ providers::StaticFileProvider, AccountReader, BlockReader, BlockSource, HeaderProvider, @@ -21,12 +23,13 @@ fn main() -> eyre::Result<()> { // Instantiate a provider factory for Ethereum mainnet using the provided DB. // TODO: Should the DB version include the spec so that you do not need to specify it here? let spec = ChainSpecBuilder::mainnet().build(); - let factory = ProviderFactory::new_with_database_path( - db_path, - spec.into(), - Default::default(), - StaticFileProvider::read_only(db_path.join("static_files"))?, - )?; + let factory = + ProviderFactory::>::new_with_database_path( + db_path, + spec.into(), + Default::default(), + StaticFileProvider::read_only(db_path.join("static_files"), false)?, + )?; // This call opens a RO transaction on the database. To write to the DB you'd need to call // the `provider_rw` function and look for the `Writer` variants of the traits. diff --git a/examples/node-custom-rpc/src/main.rs b/examples/node-custom-rpc/src/main.rs index 08b27d3ac4..40a7beb7c9 100644 --- a/examples/node-custom-rpc/src/main.rs +++ b/examples/node-custom-rpc/src/main.rs @@ -14,12 +14,12 @@ use clap::Parser; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth::cli::Cli; +use reth::{args::utils::DefaultChainSpecParser, cli::Cli}; use reth_node_ethereum::EthereumNode; use reth_transaction_pool::TransactionPool; fn main() { - Cli::::parse() + Cli::::parse() .run(|builder, args| async move { let handle = builder .node(EthereumNode::default()) diff --git a/examples/polygon-p2p/src/chain_cfg.rs b/examples/polygon-p2p/src/chain_cfg.rs index 90bfaab35d..eabcfb2e71 100644 --- a/examples/polygon-p2p/src/chain_cfg.rs +++ b/examples/polygon-p2p/src/chain_cfg.rs @@ -1,5 +1,5 @@ use reth_chainspec::{ - BaseFeeParams, Chain, ChainHardforks, ChainSpec, EthereumHardfork, ForkCondition, + once_cell_set, BaseFeeParams, Chain, ChainHardforks, ChainSpec, EthereumHardfork, ForkCondition, }; use reth_discv4::NodeRecord; use reth_primitives::{b256, Head, B256}; @@ -15,7 +15,8 @@ pub(crate) fn polygon_chain_spec() -> Arc { chain: Chain::from_id(137), // genesis: serde_json::from_str(include_str!("./genesis.json")).expect("deserialize genesis"), - genesis_hash: Some(GENESIS), + genesis_hash: once_cell_set(GENESIS), + genesis_header: Default::default(), paris_block_and_final_difficulty: None, hardforks: ChainHardforks::new(vec![ (EthereumHardfork::Petersburg.boxed(), ForkCondition::Block(0)), diff --git a/examples/rpc-db/Cargo.toml b/examples/rpc-db/Cargo.toml index 007a488b81..262b3df8ba 100644 --- a/examples/rpc-db/Cargo.toml +++ b/examples/rpc-db/Cargo.toml @@ -11,7 +11,6 @@ jsonrpsee.workspace = true reth.workspace = true reth-chainspec.workspace = true reth-db.workspace = true -reth-db-api.workspace = true reth-node-ethereum.workspace = true reth-provider = { workspace = true, features = ["test-utils"] } tokio = { workspace = true, features = ["full"] } diff --git a/examples/rpc-db/src/main.rs b/examples/rpc-db/src/main.rs index 69f61fbd15..e7409417fb 100644 --- a/examples/rpc-db/src/main.rs +++ b/examples/rpc-db/src/main.rs @@ -15,6 +15,7 @@ use std::{path::Path, sync::Arc}; use reth::{ + api::NodeTypesWithDBAdapter, providers::{ providers::{BlockchainProvider, StaticFileProvider}, ProviderFactory, @@ -23,8 +24,7 @@ use reth::{ utils::open_db_read_only, }; use reth_chainspec::ChainSpecBuilder; -use reth_db::mdbx::DatabaseArguments; -use reth_db_api::models::ClientVersion; +use reth_db::{mdbx::DatabaseArguments, ClientVersion, DatabaseEnv}; // Bringing up the RPC use reth::rpc::builder::{ @@ -33,7 +33,7 @@ use reth::rpc::builder::{ // Configuring the network parts, ideally also wouldn't need to think about this. use myrpc_ext::{MyRpcExt, MyRpcExtApiServer}; use reth::{blockchain_tree::noop::NoopBlockchainTree, tasks::TokioTaskExecutor}; -use reth_node_ethereum::EthEvmConfig; +use reth_node_ethereum::{EthEvmConfig, EthereumNode}; use reth_provider::test_utils::TestCanonStateSubscriptions; // Custom rpc extension @@ -49,10 +49,10 @@ async fn main() -> eyre::Result<()> { DatabaseArguments::new(ClientVersion::default()), )?); let spec = Arc::new(ChainSpecBuilder::mainnet().build()); - let factory = ProviderFactory::new( + let factory = ProviderFactory::>>::new( db.clone(), spec.clone(), - StaticFileProvider::read_only(db_path.join("static_files"))?, + StaticFileProvider::read_only(db_path.join("static_files"), true)?, ); // 2. Setup the blockchain provider using only the database provider and a noop for the tree to @@ -66,7 +66,7 @@ async fn main() -> eyre::Result<()> { .with_noop_pool() .with_noop_network() .with_executor(TokioTaskExecutor::default()) - .with_evm_config(EthEvmConfig::default()) + .with_evm_config(EthEvmConfig::new(spec)) .with_events(TestCanonStateSubscriptions::default()); // Pick which namespaces to expose. diff --git a/examples/stateful-precompile/src/main.rs b/examples/stateful-precompile/src/main.rs index 5505f6d16c..88ca2ac1a7 100644 --- a/examples/stateful-precompile/src/main.rs +++ b/examples/stateful-precompile/src/main.rs @@ -5,9 +5,10 @@ use alloy_genesis::Genesis; use parking_lot::RwLock; use reth::{ + api::NextBlockEnvAttributes, builder::{components::ExecutorBuilder, BuilderContext, NodeBuilder}, primitives::{ - revm_primitives::{CfgEnvWithHandlerCfg, Env, PrecompileResult, TxEnv}, + revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, Env, PrecompileResult, TxEnv}, Address, Bytes, U256, }, revm::{ @@ -19,7 +20,7 @@ use reth::{ tasks::TaskManager, }; use reth_chainspec::{Chain, ChainSpec}; -use reth_node_api::{ConfigureEvm, ConfigureEvmEnv, FullNodeTypes}; +use reth_node_api::{ConfigureEvm, ConfigureEvmEnv, FullNodeTypes, NodeTypes}; use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; use reth_node_ethereum::{node::EthereumAddOns, EthEvmConfig, EthExecutorProvider, EthereumNode}; use reth_primitives::{ @@ -50,13 +51,19 @@ pub struct PrecompileCache { } /// Custom EVM configuration -#[derive(Debug, Clone, Default)] +#[derive(Debug, Clone)] #[non_exhaustive] pub struct MyEvmConfig { + inner: EthEvmConfig, precompile_cache: Arc>, } impl MyEvmConfig { + /// Creates a new instance. + pub fn new(chain_spec: Arc) -> Self { + Self { inner: EthEvmConfig::new(chain_spec), precompile_cache: Default::default() } + } + /// Sets the precompiles to the EVM handler /// /// This will be invoked when the EVM is created via [ConfigureEvm::evm] or @@ -138,28 +145,37 @@ impl StatefulPrecompileMut for WrappedPrecompile { } impl ConfigureEvmEnv for MyEvmConfig { + type Header = Header; + fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { - EthEvmConfig::default().fill_tx_env(tx_env, transaction, sender) + self.inner.fill_tx_env(tx_env, transaction, sender) + } + + fn fill_tx_env_system_contract_call( + &self, + env: &mut Env, + caller: Address, + contract: Address, + data: Bytes, + ) { + self.inner.fill_tx_env_system_contract_call(env, caller, contract, data) } fn fill_cfg_env( &self, cfg_env: &mut CfgEnvWithHandlerCfg, - chain_spec: &ChainSpec, - header: &Header, + header: &Self::Header, total_difficulty: U256, ) { - EthEvmConfig::default().fill_cfg_env(cfg_env, chain_spec, header, total_difficulty) + self.inner.fill_cfg_env(cfg_env, header, total_difficulty) } - fn fill_tx_env_system_contract_call( + fn next_cfg_and_block_env( &self, - env: &mut Env, - caller: Address, - contract: Address, - data: Bytes, - ) { - EthEvmConfig::default().fill_tx_env_system_contract_call(env, caller, contract, data) + parent: &Self::Header, + attributes: NextBlockEnvAttributes, + ) -> (CfgEnvWithHandlerCfg, BlockEnv) { + self.inner.next_cfg_and_block_env(parent, attributes) } } @@ -207,7 +223,7 @@ pub struct MyExecutorBuilder { impl ExecutorBuilder for MyExecutorBuilder where - Node: FullNodeTypes, + Node: FullNodeTypes>, { type EVM = MyEvmConfig; type Executor = EthExecutorProvider; @@ -216,7 +232,10 @@ where self, ctx: &BuilderContext, ) -> eyre::Result<(Self::EVM, Self::Executor)> { - let evm_config = MyEvmConfig { precompile_cache: self.precompile_cache.clone() }; + let evm_config = MyEvmConfig { + inner: EthEvmConfig::new(ctx.chain_spec()), + precompile_cache: self.precompile_cache.clone(), + }; Ok((evm_config.clone(), EthExecutorProvider::new(ctx.chain_spec(), evm_config))) } } diff --git a/examples/txpool-tracing/src/main.rs b/examples/txpool-tracing/src/main.rs index c9a14dee18..6dc7d132f4 100644 --- a/examples/txpool-tracing/src/main.rs +++ b/examples/txpool-tracing/src/main.rs @@ -13,6 +13,7 @@ use clap::Parser; use futures_util::StreamExt; use reth::{ + args::utils::DefaultChainSpecParser, builder::NodeHandle, cli::Cli, primitives::{Address, IntoRecoveredTransaction}, @@ -25,7 +26,7 @@ use reth::{ use reth_node_ethereum::node::EthereumNode; fn main() { - Cli::::parse() + Cli::::parse() .run(|builder, args| async move { // launch the node let NodeHandle { node, node_exit_future } = diff --git a/funding.json b/funding.json new file mode 100644 index 0000000000..29f2e782aa --- /dev/null +++ b/funding.json @@ -0,0 +1,5 @@ +{ + "opRetro": { + "projectId": "0x5759249c433d67eeb2ca1b6ff827feec164b60b92e849d6ce0db0974cedc4a89" + } +} diff --git a/op.Dockerfile b/op.Dockerfile index e06f1dd56c..b7c7e7f3f2 100644 --- a/op.Dockerfile +++ b/op.Dockerfile @@ -1,4 +1,4 @@ -FROM lukemathwalker/cargo-chef:latest-rust-1.80 AS chef +FROM lukemathwalker/cargo-chef:latest-rust-1.81 AS chef WORKDIR /app LABEL org.opencontainers.image.source=https://github.com/bnb-chain/reth @@ -28,11 +28,11 @@ ENV FEATURES $FEATURES RUN apt-get update && apt-get -y upgrade && apt-get install -y libclang-dev pkg-config # Builds dependencies -RUN cargo chef cook --profile $BUILD_PROFILE --features "$FEATURES" --recipe-path recipe.json +RUN cargo chef cook --profile $BUILD_PROFILE --features "$FEATURES" --recipe-path recipe.json --manifest-path crates/optimism/bin/Cargo.toml # Build application COPY . . -RUN cargo build --profile $BUILD_PROFILE --features "$FEATURES" --locked --bin op-reth +RUN cargo build --profile $BUILD_PROFILE --features "$FEATURES" --locked --bin op-reth --manifest-path crates/optimism/bin/Cargo.toml # ARG is not resolved in COPY so we have to hack around it by copying the # binary to a temporary location diff --git a/testing/ef-tests/Cargo.toml b/testing/ef-tests/Cargo.toml index 74206884a1..ca23ffcce3 100644 --- a/testing/ef-tests/Cargo.toml +++ b/testing/ef-tests/Cargo.toml @@ -25,6 +25,7 @@ reth-stages.workspace = true reth-evm-ethereum.workspace = true alloy-rlp.workspace = true +alloy-primitives.workspace = true walkdir = "2.3.3" serde.workspace = true diff --git a/testing/ef-tests/src/models.rs b/testing/ef-tests/src/models.rs index e7556ec0d4..3021f8cb70 100644 --- a/testing/ef-tests/src/models.rs +++ b/testing/ef-tests/src/models.rs @@ -1,6 +1,7 @@ //! Shared models for use crate::{assert::assert_equal, Error}; +use alloy_primitives::{Address, Bloom, Bytes, B256, B64, U256}; use reth_chainspec::{ChainSpec, ChainSpecBuilder}; use reth_db::tables; use reth_db_api::{ @@ -8,8 +9,8 @@ use reth_db_api::{ transaction::{DbTx, DbTxMut}, }; use reth_primitives::{ - keccak256, Account as RethAccount, Address, Bloom, Bytecode, Bytes, Header as RethHeader, - SealedHeader, StorageEntry, Withdrawals, B256, B64, U256, + keccak256, Account as RethAccount, Bytecode, Header as RethHeader, SealedHeader, StorageEntry, + Withdrawals, }; use serde::Deserialize; use std::{collections::BTreeMap, ops::Deref}; diff --git a/testing/testing-utils/Cargo.toml b/testing/testing-utils/Cargo.toml index 182d01baf2..af592e294c 100644 --- a/testing/testing-utils/Cargo.toml +++ b/testing/testing-utils/Cargo.toml @@ -16,6 +16,7 @@ reth-primitives = { workspace = true, features = ["secp256k1"] } alloy-eips.workspace = true alloy-genesis.workspace = true +alloy-primitives.workspace = true rand.workspace = true secp256k1 = { workspace = true, features = ["rand"] } diff --git a/testing/testing-utils/src/generators.rs b/testing/testing-utils/src/generators.rs index 853597f5d7..6e0fe32be2 100644 --- a/testing/testing-utils/src/generators.rs +++ b/testing/testing-utils/src/generators.rs @@ -3,14 +3,14 @@ use alloy_eips::{ eip6110::DepositRequest, eip7002::WithdrawalRequest, eip7251::ConsolidationRequest, }; +use alloy_primitives::{Address, BlockNumber, Bytes, TxKind, B256, U256}; pub use rand::Rng; use rand::{ distributions::uniform::SampleRange, rngs::StdRng, seq::SliceRandom, thread_rng, SeedableRng, }; use reth_primitives::{ - proofs, sign_message, Account, Address, BlockNumber, Bytes, Header, Log, Receipt, Request, - Requests, SealedBlock, SealedHeader, StorageEntry, Transaction, TransactionSigned, TxKind, - TxLegacy, Withdrawal, Withdrawals, B256, U256, + proofs, sign_message, Account, Header, Log, Receipt, Request, Requests, SealedBlock, + SealedHeader, StorageEntry, Transaction, TransactionSigned, TxLegacy, Withdrawal, Withdrawals, }; use secp256k1::{Keypair, Secp256k1}; use std::{ diff --git a/testing/testing-utils/src/genesis_allocator.rs b/testing/testing-utils/src/genesis_allocator.rs index a146401f04..8a5adb3002 100644 --- a/testing/testing-utils/src/genesis_allocator.rs +++ b/testing/testing-utils/src/genesis_allocator.rs @@ -2,7 +2,8 @@ //! signers to the genesis block. use alloy_genesis::GenesisAccount; -use reth_primitives::{public_key_to_address, Address, Bytes, B256, U256}; +use alloy_primitives::{Address, Bytes, B256, U256}; +use reth_primitives::public_key_to_address; use secp256k1::{ rand::{thread_rng, RngCore}, Keypair, Secp256k1,