diff --git a/.github/assets/hive/expected_failures.yaml b/.github/assets/hive/expected_failures.yaml index 871a3821acb..c6444017ba2 100644 --- a/.github/assets/hive/expected_failures.yaml +++ b/.github/assets/hive/expected_failures.yaml @@ -36,7 +36,6 @@ engine-api: [] # no fix due to https://github.com/paradigmxyz/reth/issues/8732 engine-cancun: - Invalid PayloadAttributes, Missing BeaconRoot, Syncing=True (Cancun) (reth) - - Invalid NewPayload, ExcessBlobGas, Syncing=True, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) sync: [] diff --git a/.github/assets/kurtosis_op_network_params.yaml b/.github/assets/kurtosis_op_network_params.yaml index 87670587395..5dcc418fe08 100644 --- a/.github/assets/kurtosis_op_network_params.yaml +++ b/.github/assets/kurtosis_op_network_params.yaml @@ -4,7 +4,6 @@ ethereum_package: el_extra_params: - "--rpc.eth-proof-window=100" cl_type: teku - cl_image: "consensys/teku:25.4.0" network_params: preset: minimal genesis_delay: 5 diff --git a/.github/workflows/build-release-binaries.yml b/.github/workflows/build-release-binaries.yml deleted file mode 100644 index 92b26406169..00000000000 --- a/.github/workflows/build-release-binaries.yml +++ /dev/null @@ -1,57 +0,0 @@ -name: build release binaries - -on: - workflow_dispatch: - -env: - CARGO_TERM_COLOR: always - -jobs: - build: - name: build release - runs-on: ${{ matrix.configs.os }} - strategy: - matrix: - configs: - - target: x86_64-unknown-linux-gnu - os: ubuntu-24.04 - profile: maxperf - - target: aarch64-unknown-linux-gnu - os: ubuntu-24.04 - profile: maxperf - - target: x86_64-apple-darwin - os: macos-13 - profile: maxperf - - target: aarch64-apple-darwin - os: macos-14 - profile: maxperf - - target: x86_64-pc-windows-gnu - os: ubuntu-24.04 - profile: maxperf - build: - - command: build - binary: reth - - command: op-build - binary: op-reth - steps: - - uses: actions/checkout@v4 - - uses: rui314/setup-mold@v1 - - uses: dtolnay/rust-toolchain@stable - with: - target: ${{ matrix.configs.target }} - - name: Install cross main - id: cross_main - run: | - cargo install cross --git https://github.com/cross-rs/cross - - uses: Swatinem/rust-cache@v2 - with: - cache-on-failure: true - - - name: Apple M1 setup - if: matrix.configs.target == 'aarch64-apple-darwin' - run: | - echo "SDKROOT=$(xcrun -sdk macosx --show-sdk-path)" >> $GITHUB_ENV - echo "MACOSX_DEPLOYMENT_TARGET=$(xcrun -sdk macosx --show-sdk-platform-version)" >> $GITHUB_ENV - - - name: Build Reth - run: make PROFILE=${{ matrix.configs.profile }} ${{ matrix.build.command }}-${{ matrix.configs.target }} diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 788e0e60417..e088e0ce83b 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -8,7 +8,6 @@ on: - v* env: - REPO_NAME: ${{ github.repository_owner }}/reth IMAGE_NAME: ${{ github.repository_owner }}/reth OP_IMAGE_NAME: ${{ github.repository_owner }}/op-reth CARGO_TERM_COLOR: always @@ -17,8 +16,45 @@ env: DOCKER_USERNAME: ${{ github.actor }} jobs: + build-rc: + if: contains(github.ref, '-rc') + name: build and push as release candidate + runs-on: ubuntu-24.04 + permissions: + packages: write + contents: read + strategy: + fail-fast: false + matrix: + build: + - name: "Build and push reth image" + command: "make IMAGE_NAME=$IMAGE_NAME DOCKER_IMAGE_NAME=$DOCKER_IMAGE_NAME PROFILE=maxperf docker-build-push" + - name: "Build and push op-reth image" + command: "make IMAGE_NAME=$OP_IMAGE_NAME DOCKER_IMAGE_NAME=$OP_DOCKER_IMAGE_NAME PROFILE=maxperf op-docker-build-push" + steps: + - uses: actions/checkout@v4 + - uses: rui314/setup-mold@v1 + - uses: dtolnay/rust-toolchain@stable + - uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true + - name: Install cross main + id: cross_main + run: | + cargo install cross --git https://github.com/cross-rs/cross + - name: Log in to Docker + run: | + echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io --username ${DOCKER_USERNAME} --password-stdin + - name: Set up Docker builder + run: | + docker run --privileged --rm tonistiigi/binfmt --install arm64,amd64 + docker buildx create --use --name cross-builder + - name: Build and push ${{ matrix.build.name }} + run: ${{ matrix.build.command }} + build: - name: build and push + if: ${{ !contains(github.ref, '-rc') }} + name: build and push as latest runs-on: ubuntu-24.04 permissions: packages: write @@ -27,14 +63,10 @@ jobs: fail-fast: false matrix: build: - - name: 'Build and push reth image' - command: 'make PROFILE=maxperf docker-build-push' - - name: 'Build and push reth image, tag as "latest"' - command: 'make PROFILE=maxperf docker-build-push-latest' - - name: 'Build and push op-reth image' - command: 'make IMAGE_NAME=$OP_IMAGE_NAME DOCKER_IMAGE_NAME=$OP_DOCKER_IMAGE_NAME PROFILE=maxperf op-docker-build-push' - - name: 'Build and push op-reth image, tag as "latest"' - command: 'make IMAGE_NAME=$OP_IMAGE_NAME DOCKER_IMAGE_NAME=$OP_DOCKER_IMAGE_NAME PROFILE=maxperf op-docker-build-push-latest' + - name: "Build and push reth image" + command: "make IMAGE_NAME=$IMAGE_NAME DOCKER_IMAGE_NAME=$DOCKER_IMAGE_NAME PROFILE=maxperf docker-build-push-latest" + - name: "Build and push op-reth image" + command: "make IMAGE_NAME=$OP_IMAGE_NAME DOCKER_IMAGE_NAME=$OP_DOCKER_IMAGE_NAME PROFILE=maxperf op-docker-build-push-latest" steps: - uses: actions/checkout@v4 - uses: rui314/setup-mold@v1 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 7442eef3059..87c85561478 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -43,28 +43,56 @@ jobs: outputs: VERSION: ${{ steps.extract_version.outputs.VERSION }} + check-version: + name: check version + runs-on: ubuntu-latest + needs: extract-version + if: ${{ github.event.inputs.dry_run != 'true' }} + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + - name: Verify crate version matches tag + # Check that the Cargo version starts with the tag, + # so that Cargo version 1.4.8 can be matched against both v1.4.8 and v1.4.8-rc.1 + run: | + tag="${{ needs.extract-version.outputs.VERSION }}" + tag=${tag#v} + cargo_ver=$(cargo metadata --no-deps --format-version 1 | jq -r '.packages[0].version') + [[ "$tag" == "$cargo_ver"* ]] || { echo "Tag $tag doesn’t match the Cargo version $cargo_ver"; exit 1; } + build: name: build release runs-on: ${{ matrix.configs.os }} needs: extract-version + continue-on-error: ${{ matrix.configs.allow_fail }} strategy: + fail-fast: true matrix: configs: - target: x86_64-unknown-linux-gnu os: ubuntu-24.04 profile: maxperf + allow_fail: false - target: aarch64-unknown-linux-gnu os: ubuntu-24.04 profile: maxperf + allow_fail: false - target: x86_64-apple-darwin os: macos-13 profile: maxperf + allow_fail: false - target: aarch64-apple-darwin os: macos-14 profile: maxperf + allow_fail: false - target: x86_64-pc-windows-gnu os: ubuntu-24.04 profile: maxperf + allow_fail: false + - target: riscv64gc-unknown-linux-gnu + os: ubuntu-24.04 + profile: maxperf + allow_fail: true build: - command: build binary: reth @@ -127,8 +155,8 @@ jobs: draft-release: name: draft release - needs: [build, extract-version] runs-on: ubuntu-latest + needs: [build, extract-version] if: ${{ github.event.inputs.dry_run != 'true' }} env: VERSION: ${{ needs.extract-version.outputs.VERSION }} @@ -143,6 +171,12 @@ jobs: fetch-depth: 0 - name: Download artifacts uses: actions/download-artifact@v4 + - name: Generate full changelog + id: changelog + run: | + echo "CHANGELOG<> $GITHUB_OUTPUT + echo "$(git log --pretty=format:"- %s" $(git describe --tags --abbrev=0 ${{ env.VERSION }}^)..${{ env.VERSION }})" >> $GITHUB_OUTPUT + echo "EOF" >> $GITHUB_OUTPUT - name: Create release draft env: GITHUB_USER: ${{ github.repository_owner }} @@ -150,6 +184,11 @@ jobs: # The formatting here is borrowed from Lighthouse (which is borrowed from OpenEthereum): # https://github.com/openethereum/openethereum/blob/6c2d392d867b058ff867c4373e40850ca3f96969/.github/workflows/build.yml run: | + prerelease_flag="" + if [[ "${GITHUB_REF}" == *-rc* ]]; then + prerelease_flag="--prerelease" + fi + body=$(cat <<- "ENDBODY" ![image](https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-prod.png) @@ -186,6 +225,10 @@ jobs: *See [Update Priorities](https://paradigmxyz.github.io/reth/installation/priorities.html) for more information about this table.* + ## All Changes + + ${{ steps.changelog.outputs.CHANGELOG }} + ## Binaries [See pre-built binaries documentation.](https://paradigmxyz.github.io/reth/installation/binaries.html) @@ -220,12 +263,12 @@ jobs: assets+=("$asset/$asset") done tag_name="${{ env.VERSION }}" - echo "$body" | gh release create --draft -t "Reth $tag_name" -F "-" "$tag_name" "${assets[@]}" + echo "$body" | gh release create --draft $prerelease_flag -t "Reth $tag_name" -F "-" "$tag_name" "${assets[@]}" dry-run-summary: name: dry run summary - needs: [build, extract-version] runs-on: ubuntu-latest + needs: [build, extract-version] if: ${{ github.event.inputs.dry_run == 'true' }} env: VERSION: ${{ needs.extract-version.outputs.VERSION }} @@ -242,4 +285,4 @@ jobs: echo "- A draft release would be created" echo "" echo "### Next Steps" - echo "To perform a real release, push a git tag." + echo "To perform a real release, push a git tag." \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index e7771e86666..150e23fd2cb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -112,9 +112,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "1.0.7" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7329eb72d95576dfb8813175bcf671198fb24266b0b3e520052a513e30c284df" +checksum = "ad451f9a70c341d951bca4e811d74dbe1e193897acd17e9dbac1353698cc430b" dependencies = [ "alloy-eips", "alloy-primitives", @@ -152,9 +152,9 @@ dependencies = [ [[package]] name = "alloy-contract" -version = "1.0.7" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1658352ca9425d7b5bbb3ae364bc276ab18d4afae06f5faf00377b6964fdf68" +checksum = "ebf25443920ecb9728cb087fe4dc04a0b290bd6ac85638c58fe94aba70f1a44e" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -234,9 +234,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "1.0.7" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fa190bfa5340aee544ac831114876fa73bc8da487095b49a5ea153a6a4656ea" +checksum = "3056872f6da48046913e76edb5ddced272861f6032f09461aea1a2497be5ae5d" dependencies = [ "alloy-eip2124", "alloy-eip2930", @@ -276,9 +276,9 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "1.0.7" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b81b2dfd278d58af8bfde8753fa4685407ba8fbad8bc88a2bb0e065eed48478" +checksum = "c98fb40f07997529235cc474de814cd7bd9de561e101716289095696c0e4639d" dependencies = [ "alloy-eips", "alloy-primitives", @@ -315,9 +315,9 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "1.0.7" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ab2dba5dca01ad4281b4d4726a18e2012a20e3950bfc2a90c5376840555366" +checksum = "dc08b31ebf9273839bd9a01f9333cbb7a3abb4e820c312ade349dd18bdc79581" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -329,9 +329,9 @@ dependencies = [ [[package]] name = "alloy-network" -version = "1.0.7" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0ed07e76fbc72790a911ea100cdfbe85b1f12a097c91b948042e854959d140e" +checksum = "ed117b08f0cc190312bf0c38c34cf4f0dabfb4ea8f330071c587cd7160a88cb2" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -355,9 +355,9 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "1.0.7" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f05aa52713c376f797b3c7077708585f22a5c3053a7b1b2b355ea98edeb2052d" +checksum = "c7162ff7be8649c0c391f4e248d1273e85c62076703a1f3ec7daf76b283d886d" dependencies = [ "alloy-consensus", "alloy-eips", @@ -427,9 +427,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "1.0.7" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05a3f7a59c276c6e410267e77a166f9297dbe74e4605f1abf625e29d85c53144" +checksum = "d84eba1fd8b6fe8b02f2acd5dd7033d0f179e304bd722d11e817db570d1fa6c4" dependencies = [ "alloy-chains", "alloy-consensus", @@ -470,9 +470,9 @@ dependencies = [ [[package]] name = "alloy-pubsub" -version = "1.0.7" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc3cbf02fdedbec7aadc7a77080b6b143752fa792c7fd49b86fd854257688bd4" +checksum = "8550f7306e0230fc835eb2ff4af0a96362db4b6fc3f25767d161e0ad0ac765bf" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -513,9 +513,9 @@ dependencies = [ [[package]] name = "alloy-rpc-client" -version = "1.0.7" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5f185483536cbcbf55971077140e03548dad4f3a4ddb35044bcdc01b8f02ce1" +checksum = "518a699422a3eab800f3dac2130d8f2edba8e4fff267b27a9c7dc6a2b0d313ee" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -541,9 +541,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types" -version = "1.0.7" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "347dfd77ba4d74886dba9e2872ff64fb246001b08868d27baec94e7248503e08" +checksum = "c000cab4ec26a4b3e29d144e999e1c539c2fa0abed871bf90311eb3466187ca8" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -554,9 +554,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-admin" -version = "1.0.7" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a743533bf15911763f92ea6b27fe934c0f459e216f1a53299114a9098cfd950b" +checksum = "3ebdc864f573645c5288370c208912b85b5cacc8025b700c50c2b74d06ab9830" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -566,9 +566,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" -version = "1.0.7" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51e15bd6456742d6dcadacf3cd238a90a8a7aa9f00bc7cc641ae272f5d3f5d4f" +checksum = "8abecc34549a208b5f91bc7f02df3205c36e2aa6586f1d9375c3382da1066b3b" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -589,9 +589,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-beacon" -version = "1.0.7" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb8f659fb7d27c86c1ba2449c6e4a6b5056be7ab6481cb33fdc3b990c34753a2" +checksum = "241aba7808bddc3ad1c6228e296a831f326f89118b1017012090709782a13334" dependencies = [ "alloy-eips", "alloy-primitives", @@ -607,9 +607,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-debug" -version = "1.0.7" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe8bc37b23e788c0f8081a7eec34fd439cfa8d4f137f6e987803fb2a733866ca" +checksum = "8c832f2e851801093928dbb4b7bd83cd22270faf76b2e080646b806a285c8757" dependencies = [ "alloy-primitives", "serde", @@ -617,9 +617,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" -version = "1.0.7" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bcf49fe91b3d621440dcc5bb067afaeba5ca4b07f59e42fb7af42944146a8c0" +checksum = "cab52691970553d84879d777419fa7b6a2e92e9fe8641f9324cc071008c2f656" dependencies = [ "alloy-consensus", "alloy-eips", @@ -638,9 +638,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "1.0.7" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89d9b4293dfd4721781d33ee40de060376932d4a55d421cf6618ad66ff97cc52" +checksum = "fcaf7dff0fdd756a714d58014f4f8354a1706ebf9fa2cf73431e0aeec3c9431e" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -659,9 +659,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-mev" -version = "1.0.7" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f889b67620493ff1a2db571c7d5e7faa2ef8e6bc7747e5491ae448c96e2e75e0" +checksum = "18bd1c5d7b9f3f1caeeaa1c082aa28ba7ce2d67127b12b2a9b462712c8f6e1c5" dependencies = [ "alloy-consensus", "alloy-eips", @@ -674,9 +674,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" -version = "1.0.7" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f68f020452c0d570b4eee22d4ffda9e4eda68ebcf67e1199d6dff48097f442b" +checksum = "6e3507a04e868dd83219ad3cd6a8c58aefccb64d33f426b3934423a206343e84" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -688,9 +688,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-txpool" -version = "1.0.7" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62a82f15f296c2c83c55519d21ca07801fb58b118878b0f4777250968e49f4fe" +checksum = "eec36272621c3ac82b47dd77f0508346687730b1c2e3e10d3715705c217c0a05" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -700,9 +700,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "1.0.7" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b7d927aa39ca51545ae4c9cf4bdb2cbc1f6b46ab4b54afc3ed9255f93eedbce" +checksum = "730e8f2edf2fc224cabd1c25d090e1655fa6137b2e409f92e5eec735903f1507" dependencies = [ "alloy-primitives", "arbitrary", @@ -712,9 +712,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "1.0.7" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c63771b50008d2b079187e9e74a08235ab16ecaf4609b4eb895e2890a3bcd465" +checksum = "6b0d2428445ec13edc711909e023d7779618504c4800be055a5b940025dbafe3" dependencies = [ "alloy-primitives", "async-trait", @@ -727,9 +727,9 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "1.0.7" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db906294ee7876bd332cd760f460d30de183554434e07fc19d7d54e16a7aeaf0" +checksum = "e14fe6fedb7fe6e0dfae47fe020684f1d8e063274ef14bca387ddb7a6efa8ec1" dependencies = [ "alloy-consensus", "alloy-network", @@ -815,9 +815,9 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "1.0.7" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca9b645fe4f4e6582cfbb4a8d20cedcf5aa23548e92eacbdacac6278b425e023" +checksum = "a712bdfeff42401a7dd9518f72f617574c36226a9b5414537fedc34350b73bf9" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -838,9 +838,9 @@ dependencies = [ [[package]] name = "alloy-transport-http" -version = "1.0.7" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee18869ecabe658ff6316e7db7c25d958c7d10f0a1723c2f7447d4f402920b66" +checksum = "7ea5a76d7f2572174a382aedf36875bedf60bcc41116c9f031cf08040703a2dc" dependencies = [ "alloy-json-rpc", "alloy-rpc-types-engine", @@ -859,9 +859,9 @@ dependencies = [ [[package]] name = "alloy-transport-ipc" -version = "1.0.7" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff95f0b3a3bd2b80a53a52f7649ea6ef3e7e91ff4bd439871199ec68b1b69038" +checksum = "606af17a7e064d219746f6d2625676122c79d78bf73dfe746d6db9ecd7dbcb85" dependencies = [ "alloy-json-rpc", "alloy-pubsub", @@ -879,9 +879,9 @@ dependencies = [ [[package]] name = "alloy-transport-ws" -version = "1.0.7" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7c838e7562d16110fba3590a20c7765281c1a302cf567e1806d0c3ce1352b58" +checksum = "e0c6f9b37cd8d44aab959613966cc9d4d7a9b429c575cec43b3e5b46ea109a79" dependencies = [ "alloy-pubsub", "alloy-transport", @@ -2276,7 +2276,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "117725a109d387c937a1533ce01b450cbde6b88abceea8473c4d7a85853cda3c" dependencies = [ "lazy_static", - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] @@ -3023,7 +3023,7 @@ dependencies = [ [[package]] name = "ef-tests" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -3167,7 +3167,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cea14ef9355e3beab063703aa9dab15afd25f0667c341310c1e5274bb1d0da18" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -3396,11 +3396,11 @@ dependencies = [ "op-revm", "reth-chain-state", "reth-codecs", + "reth-db-api", "reth-ethereum", "reth-network-peers", "reth-node-builder", "reth-op", - "reth-optimism-consensus", "reth-optimism-forks", "reth-payload-builder", "reth-rpc-api", @@ -3456,6 +3456,30 @@ dependencies = [ "reth-ethereum", ] +[[package]] +name = "example-engine-api-access" +version = "0.0.0" +dependencies = [ + "alloy-rpc-types-engine", + "async-trait", + "clap", + "eyre", + "futures", + "jsonrpsee", + "reth-db", + "reth-node-api", + "reth-node-builder", + "reth-optimism-chainspec", + "reth-optimism-consensus", + "reth-optimism-node", + "reth-provider", + "reth-rpc-api", + "reth-tasks", + "reth-tracing", + "serde_json", + "tokio", +] + [[package]] name = "example-exex-hello-world" version = "0.0.0" @@ -3597,7 +3621,7 @@ dependencies = [ [[package]] name = "exex-subscription" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-primitives", "clap", @@ -4422,7 +4446,7 @@ dependencies = [ "js-sys", "log", "wasm-bindgen", - "windows-core 0.57.0", + "windows-core 0.61.2", ] [[package]] @@ -4867,7 +4891,7 @@ checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9" dependencies = [ "hermit-abi 0.5.1", "libc", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -5997,9 +6021,9 @@ dependencies = [ [[package]] name = "op-alloy-flz" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ef71f23a8caf6f2a2d5cafbdc44956d44e6014dcb9aa58abf7e4e6481c6ec34" +checksum = "a79f352fc3893dcd670172e615afef993a41798a1d3fc0db88a3e60ef2e70ecc" [[package]] name = "op-alloy-network" @@ -6069,7 +6093,7 @@ dependencies = [ [[package]] name = "op-reth" -version = "1.4.3" +version = "1.4.8" dependencies = [ "clap", "reth-cli-util", @@ -6821,7 +6845,7 @@ dependencies = [ "once_cell", "socket2", "tracing", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -7168,7 +7192,7 @@ checksum = "95325155c684b1c89f7765e30bc1c42e4a6da51ca513615660cb8a62ef9a88e3" [[package]] name = "reth" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-rpc-types", "aquamarine", @@ -7216,7 +7240,7 @@ dependencies = [ [[package]] name = "reth-basic-payload-builder" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7239,7 +7263,7 @@ dependencies = [ [[package]] name = "reth-bench" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-eips", "alloy-json-rpc", @@ -7277,7 +7301,7 @@ dependencies = [ [[package]] name = "reth-chain-state" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7300,6 +7324,7 @@ dependencies = [ "reth-trie", "revm-database", "revm-state", + "serde", "tokio", "tokio-stream", "tracing", @@ -7307,7 +7332,7 @@ dependencies = [ [[package]] name = "reth-chainspec" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-chains", "alloy-consensus", @@ -7327,7 +7352,7 @@ dependencies = [ [[package]] name = "reth-cli" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-genesis", "clap", @@ -7340,7 +7365,7 @@ dependencies = [ [[package]] name = "reth-cli-commands" -version = "1.4.3" +version = "1.4.8" dependencies = [ "ahash", "alloy-chains", @@ -7418,7 +7443,7 @@ dependencies = [ [[package]] name = "reth-cli-runner" -version = "1.4.3" +version = "1.4.8" dependencies = [ "reth-tasks", "tokio", @@ -7427,7 +7452,7 @@ dependencies = [ [[package]] name = "reth-cli-util" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7447,7 +7472,7 @@ dependencies = [ [[package]] name = "reth-codecs" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7471,7 +7496,7 @@ dependencies = [ [[package]] name = "reth-codecs-derive" -version = "1.4.3" +version = "1.4.8" dependencies = [ "convert_case", "proc-macro2", @@ -7482,7 +7507,7 @@ dependencies = [ [[package]] name = "reth-config" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-primitives", "eyre", @@ -7494,11 +7519,12 @@ dependencies = [ "serde", "tempfile", "toml", + "url", ] [[package]] name = "reth-consensus" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -7510,7 +7536,7 @@ dependencies = [ [[package]] name = "reth-consensus-common" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7524,7 +7550,7 @@ dependencies = [ [[package]] name = "reth-consensus-debug-client" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7547,7 +7573,7 @@ dependencies = [ [[package]] name = "reth-db" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -7580,7 +7606,7 @@ dependencies = [ [[package]] name = "reth-db-api" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -7611,7 +7637,7 @@ dependencies = [ [[package]] name = "reth-db-common" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -7640,7 +7666,7 @@ dependencies = [ [[package]] name = "reth-db-models" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7657,7 +7683,7 @@ dependencies = [ [[package]] name = "reth-discv4" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7684,7 +7710,7 @@ dependencies = [ [[package]] name = "reth-discv5" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7709,7 +7735,7 @@ dependencies = [ [[package]] name = "reth-dns-discovery" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-chains", "alloy-primitives", @@ -7737,7 +7763,7 @@ dependencies = [ [[package]] name = "reth-downloaders" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7776,7 +7802,7 @@ dependencies = [ [[package]] name = "reth-e2e-test-utils" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7823,7 +7849,7 @@ dependencies = [ [[package]] name = "reth-ecies" -version = "1.4.3" +version = "1.4.8" dependencies = [ "aes", "alloy-primitives", @@ -7853,7 +7879,7 @@ dependencies = [ [[package]] name = "reth-engine-local" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -7862,19 +7888,12 @@ dependencies = [ "futures-util", "op-alloy-rpc-types-engine", "reth-chainspec", - "reth-consensus", "reth-engine-primitives", - "reth-engine-service", - "reth-engine-tree", "reth-ethereum-engine-primitives", - "reth-evm", - "reth-node-types", "reth-optimism-chainspec", "reth-payload-builder", "reth-payload-primitives", "reth-provider", - "reth-prune", - "reth-stages-api", "reth-transaction-pool", "scroll-alloy-rpc-types-engine", "tokio", @@ -7884,9 +7903,10 @@ dependencies = [ [[package]] name = "reth-engine-primitives" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", + "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", "auto_impl", @@ -7907,7 +7927,7 @@ dependencies = [ [[package]] name = "reth-engine-service" -version = "1.4.3" +version = "1.4.8" dependencies = [ "futures", "pin-project", @@ -7937,7 +7957,7 @@ dependencies = [ [[package]] name = "reth-engine-tree" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7949,6 +7969,7 @@ dependencies = [ "codspeed-criterion-compat", "crossbeam-channel", "derive_more", + "eyre", "futures", "itertools 0.14.0", "metrics", @@ -7963,6 +7984,7 @@ dependencies = [ "reth-consensus", "reth-db", "reth-db-common", + "reth-e2e-test-utils", "reth-engine-primitives", "reth-errors", "reth-ethereum-consensus", @@ -7996,6 +8018,7 @@ dependencies = [ "revm-primitives", "revm-state", "schnellru", + "serde_json", "thiserror 2.0.12", "tokio", "tracing", @@ -8003,7 +8026,7 @@ dependencies = [ [[package]] name = "reth-engine-util" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-rpc-types-engine", @@ -8029,7 +8052,7 @@ dependencies = [ [[package]] name = "reth-era" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8051,7 +8074,7 @@ dependencies = [ [[package]] name = "reth-era-downloader" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-primitives", "bytes", @@ -8068,7 +8091,7 @@ dependencies = [ [[package]] name = "reth-era-utils" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-primitives", "bytes", @@ -8092,7 +8115,7 @@ dependencies = [ [[package]] name = "reth-errors" -version = "1.4.3" +version = "1.4.8" dependencies = [ "reth-consensus", "reth-execution-errors", @@ -8102,7 +8125,7 @@ dependencies = [ [[package]] name = "reth-eth-wire" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-chains", "alloy-consensus", @@ -8140,7 +8163,7 @@ dependencies = [ [[package]] name = "reth-eth-wire-types" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-chains", "alloy-consensus", @@ -8165,11 +8188,12 @@ dependencies = [ [[package]] name = "reth-ethereum" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-rpc-types-engine", "alloy-rpc-types-eth", "reth-chainspec", + "reth-cli-util", "reth-consensus", "reth-consensus-common", "reth-db", @@ -8201,7 +8225,7 @@ dependencies = [ [[package]] name = "reth-ethereum-cli" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8260,7 +8284,7 @@ dependencies = [ [[package]] name = "reth-ethereum-consensus" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8276,7 +8300,7 @@ dependencies = [ [[package]] name = "reth-ethereum-engine-primitives" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8294,7 +8318,7 @@ dependencies = [ [[package]] name = "reth-ethereum-forks" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-eip2124", "alloy-hardforks", @@ -8307,7 +8331,7 @@ dependencies = [ [[package]] name = "reth-ethereum-payload-builder" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8333,7 +8357,7 @@ dependencies = [ [[package]] name = "reth-ethereum-primitives" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8358,7 +8382,7 @@ dependencies = [ [[package]] name = "reth-etl" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-primitives", "rayon", @@ -8368,7 +8392,7 @@ dependencies = [ [[package]] name = "reth-evm" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8394,7 +8418,7 @@ dependencies = [ [[package]] name = "reth-evm-ethereum" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8416,7 +8440,7 @@ dependencies = [ [[package]] name = "reth-execution-errors" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-evm", "alloy-primitives", @@ -8428,7 +8452,7 @@ dependencies = [ [[package]] name = "reth-execution-types" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8448,7 +8472,7 @@ dependencies = [ [[package]] name = "reth-exex" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8492,7 +8516,7 @@ dependencies = [ [[package]] name = "reth-exex-test-utils" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-eips", "eyre", @@ -8524,7 +8548,7 @@ dependencies = [ [[package]] name = "reth-exex-types" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8541,7 +8565,7 @@ dependencies = [ [[package]] name = "reth-fs-util" -version = "1.4.3" +version = "1.4.8" dependencies = [ "serde", "serde_json", @@ -8550,7 +8574,7 @@ dependencies = [ [[package]] name = "reth-invalid-block-hooks" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8577,7 +8601,7 @@ dependencies = [ [[package]] name = "reth-ipc" -version = "1.4.3" +version = "1.4.8" dependencies = [ "bytes", "futures", @@ -8599,7 +8623,7 @@ dependencies = [ [[package]] name = "reth-libmdbx" -version = "1.4.3" +version = "1.4.8" dependencies = [ "bitflags 2.9.1", "byteorder", @@ -8618,7 +8642,7 @@ dependencies = [ [[package]] name = "reth-mdbx-sys" -version = "1.4.3" +version = "1.4.8" dependencies = [ "bindgen", "cc", @@ -8626,7 +8650,7 @@ dependencies = [ [[package]] name = "reth-metrics" -version = "1.4.3" +version = "1.4.8" dependencies = [ "futures", "metrics", @@ -8637,14 +8661,14 @@ dependencies = [ [[package]] name = "reth-net-banlist" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-primitives", ] [[package]] name = "reth-net-nat" -version = "1.4.3" +version = "1.4.8" dependencies = [ "futures-util", "if-addrs", @@ -8658,7 +8682,7 @@ dependencies = [ [[package]] name = "reth-network" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8719,7 +8743,7 @@ dependencies = [ [[package]] name = "reth-network-api" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-primitives", "alloy-rpc-types-admin", @@ -8743,7 +8767,7 @@ dependencies = [ [[package]] name = "reth-network-p2p" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8765,7 +8789,7 @@ dependencies = [ [[package]] name = "reth-network-peers" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -8782,7 +8806,7 @@ dependencies = [ [[package]] name = "reth-network-types" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-eip2124", "humantime-serde", @@ -8795,7 +8819,7 @@ dependencies = [ [[package]] name = "reth-nippy-jar" -version = "1.4.3" +version = "1.4.8" dependencies = [ "anyhow", "bincode 1.3.3", @@ -8813,7 +8837,7 @@ dependencies = [ [[package]] name = "reth-node-api" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-rpc-types-engine", "eyre", @@ -8836,7 +8860,7 @@ dependencies = [ [[package]] name = "reth-node-builder" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8901,7 +8925,7 @@ dependencies = [ [[package]] name = "reth-node-core" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8946,13 +8970,14 @@ dependencies = [ "tokio", "toml", "tracing", + "url", "vergen", "vergen-git2", ] [[package]] name = "reth-node-ethereum" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-contract", @@ -9005,7 +9030,7 @@ dependencies = [ [[package]] name = "reth-node-events" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9028,7 +9053,7 @@ dependencies = [ [[package]] name = "reth-node-metrics" -version = "1.4.3" +version = "1.4.8" dependencies = [ "eyre", "http", @@ -9050,7 +9075,7 @@ dependencies = [ [[package]] name = "reth-node-types" -version = "1.4.3" +version = "1.4.8" dependencies = [ "reth-chainspec", "reth-db-api", @@ -9062,9 +9087,10 @@ dependencies = [ [[package]] name = "reth-op" -version = "1.4.3" +version = "1.4.8" dependencies = [ "reth-chainspec", + "reth-cli-util", "reth-consensus", "reth-consensus-common", "reth-db", @@ -9098,7 +9124,7 @@ dependencies = [ [[package]] name = "reth-optimism-chainspec" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-chains", "alloy-consensus", @@ -9124,7 +9150,7 @@ dependencies = [ [[package]] name = "reth-optimism-cli" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9172,7 +9198,7 @@ dependencies = [ [[package]] name = "reth-optimism-consensus" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-chains", "alloy-consensus", @@ -9204,7 +9230,7 @@ dependencies = [ [[package]] name = "reth-optimism-evm" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9230,7 +9256,7 @@ dependencies = [ [[package]] name = "reth-optimism-forks" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-op-hardforks", "alloy-primitives", @@ -9240,7 +9266,7 @@ dependencies = [ [[package]] name = "reth-optimism-node" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9274,6 +9300,7 @@ dependencies = [ "reth-optimism-payload-builder", "reth-optimism-primitives", "reth-optimism-rpc", + "reth-optimism-storage", "reth-optimism-txpool", "reth-payload-builder", "reth-payload-util", @@ -9299,7 +9326,7 @@ dependencies = [ [[package]] name = "reth-optimism-payload-builder" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9337,7 +9364,7 @@ dependencies = [ [[package]] name = "reth-optimism-primitives" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9364,7 +9391,7 @@ dependencies = [ [[package]] name = "reth-optimism-rpc" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9382,6 +9409,7 @@ dependencies = [ "jsonrpsee", "jsonrpsee-core", "jsonrpsee-types", + "metrics", "op-alloy-consensus", "op-alloy-network", "op-alloy-rpc-jsonrpsee", @@ -9393,6 +9421,7 @@ dependencies = [ "reth-chain-state", "reth-chainspec", "reth-evm", + "reth-metrics", "reth-network-api", "reth-node-api", "reth-node-builder", @@ -9420,16 +9449,17 @@ dependencies = [ [[package]] name = "reth-optimism-storage" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-primitives", "reth-chainspec", "reth-codecs", "reth-db-api", - "reth-optimism-forks", + "reth-node-api", "reth-optimism-primitives", "reth-primitives-traits", + "reth-provider", "reth-prune-types", "reth-stages-types", "reth-storage-api", @@ -9437,7 +9467,7 @@ dependencies = [ [[package]] name = "reth-optimism-txpool" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9474,7 +9504,7 @@ dependencies = [ [[package]] name = "reth-payload-builder" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9494,7 +9524,7 @@ dependencies = [ [[package]] name = "reth-payload-builder-primitives" -version = "1.4.3" +version = "1.4.8" dependencies = [ "pin-project", "reth-payload-primitives", @@ -9505,7 +9535,7 @@ dependencies = [ [[package]] name = "reth-payload-primitives" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9525,7 +9555,7 @@ dependencies = [ [[package]] name = "reth-payload-util" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9534,7 +9564,7 @@ dependencies = [ [[package]] name = "reth-payload-validator" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-rpc-types-engine", @@ -9543,7 +9573,7 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9565,7 +9595,7 @@ dependencies = [ [[package]] name = "reth-primitives-traits" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9603,7 +9633,7 @@ dependencies = [ [[package]] name = "reth-provider" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9652,7 +9682,7 @@ dependencies = [ [[package]] name = "reth-prune" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9684,7 +9714,7 @@ dependencies = [ [[package]] name = "reth-prune-types" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-primitives", "arbitrary", @@ -9703,7 +9733,7 @@ dependencies = [ [[package]] name = "reth-ress-protocol" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9729,7 +9759,7 @@ dependencies = [ [[package]] name = "reth-ress-provider" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9755,7 +9785,7 @@ dependencies = [ [[package]] name = "reth-revm" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9769,7 +9799,7 @@ dependencies = [ [[package]] name = "reth-rpc" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -9830,11 +9860,13 @@ dependencies = [ "reth-tasks", "reth-testing-utils", "reth-transaction-pool", + "reth-trie-common", "revm", "revm-inspectors", "revm-primitives", "serde", "serde_json", + "sha2 0.10.9", "thiserror 2.0.12", "tokio", "tokio-stream", @@ -9845,7 +9877,7 @@ dependencies = [ [[package]] name = "reth-rpc-api" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-eips", "alloy-genesis", @@ -9863,14 +9895,16 @@ dependencies = [ "alloy-rpc-types-txpool", "alloy-serde", "jsonrpsee", + "reth-chain-state", "reth-engine-primitives", "reth-network-peers", "reth-rpc-eth-api", + "reth-trie-common", ] [[package]] name = "reth-rpc-api-testing-util" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9889,7 +9923,7 @@ dependencies = [ [[package]] name = "reth-rpc-builder" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-eips", "alloy-network", @@ -9944,7 +9978,7 @@ dependencies = [ [[package]] name = "reth-rpc-engine-api" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9980,7 +10014,7 @@ dependencies = [ [[package]] name = "reth-rpc-eth-api" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -10022,7 +10056,7 @@ dependencies = [ [[package]] name = "reth-rpc-eth-types" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10064,7 +10098,7 @@ dependencies = [ [[package]] name = "reth-rpc-layer" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-rpc-types-engine", "http", @@ -10081,7 +10115,7 @@ dependencies = [ [[package]] name = "reth-rpc-server-types" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10096,19 +10130,28 @@ dependencies = [ [[package]] name = "reth-rpc-types-compat" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", + "alloy-network", "alloy-primitives", "alloy-rpc-types-eth", "jsonrpsee-types", + "op-alloy-consensus", + "op-alloy-rpc-types", + "reth-optimism-primitives", "reth-primitives-traits", + "reth-scroll-primitives", + "reth-storage-api", + "scroll-alloy-consensus", + "scroll-alloy-rpc-types", "serde", + "thiserror 2.0.12", ] [[package]] name = "reth-scroll-chainspec" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-chains", "alloy-consensus", @@ -10131,7 +10174,7 @@ dependencies = [ [[package]] name = "reth-scroll-cli" -version = "1.4.3" +version = "1.4.8" dependencies = [ "clap", "eyre", @@ -10155,7 +10198,7 @@ dependencies = [ [[package]] name = "reth-scroll-consensus" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10174,7 +10217,7 @@ dependencies = [ [[package]] name = "reth-scroll-engine-primitives" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10201,7 +10244,7 @@ dependencies = [ [[package]] name = "reth-scroll-evm" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10229,7 +10272,7 @@ dependencies = [ [[package]] name = "reth-scroll-forks" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-chains", "alloy-primitives", @@ -10242,7 +10285,7 @@ dependencies = [ [[package]] name = "reth-scroll-node" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10297,7 +10340,7 @@ dependencies = [ [[package]] name = "reth-scroll-payload" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10330,37 +10373,28 @@ dependencies = [ [[package]] name = "reth-scroll-primitives" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", - "alloy-evm", "alloy-primitives", "alloy-rlp", "arbitrary", "bytes", - "derive_more", "modular-bitfield", "once_cell", - "proptest", - "proptest-arbitrary-interop", - "rand 0.8.5", "rand 0.9.1", "reth-codecs", "reth-primitives-traits", "reth-zstd-compressors", - "revm-context", - "revm-scroll", "rstest", "scroll-alloy-consensus", - "scroll-alloy-evm", - "secp256k1", "serde", ] [[package]] name = "reth-scroll-rpc" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10396,7 +10430,7 @@ dependencies = [ [[package]] name = "reth-scroll-txpool" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10425,7 +10459,7 @@ dependencies = [ [[package]] name = "reth-stages" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10435,6 +10469,7 @@ dependencies = [ "bincode 1.3.3", "blake3", "codspeed-criterion-compat", + "eyre", "futures-util", "itertools 0.14.0", "num-traits", @@ -10449,6 +10484,9 @@ dependencies = [ "reth-db", "reth-db-api", "reth-downloaders", + "reth-era", + "reth-era-downloader", + "reth-era-utils", "reth-ethereum-consensus", "reth-ethereum-primitives", "reth-etl", @@ -10482,7 +10520,7 @@ dependencies = [ [[package]] name = "reth-stages-api" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10511,7 +10549,7 @@ dependencies = [ [[package]] name = "reth-stages-types" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-primitives", "arbitrary", @@ -10528,7 +10566,7 @@ dependencies = [ [[package]] name = "reth-stateless" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10553,7 +10591,7 @@ dependencies = [ [[package]] name = "reth-static-file" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-primitives", "assert_matches", @@ -10577,7 +10615,7 @@ dependencies = [ [[package]] name = "reth-static-file-types" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-primitives", "clap", @@ -10589,7 +10627,7 @@ dependencies = [ [[package]] name = "reth-storage-api" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10612,7 +10650,7 @@ dependencies = [ [[package]] name = "reth-storage-errors" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10627,7 +10665,7 @@ dependencies = [ [[package]] name = "reth-tasks" -version = "1.4.3" +version = "1.4.8" dependencies = [ "auto_impl", "dyn-clone", @@ -10644,7 +10682,7 @@ dependencies = [ [[package]] name = "reth-testing-utils" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10659,7 +10697,7 @@ dependencies = [ [[package]] name = "reth-tokio-util" -version = "1.4.3" +version = "1.4.8" dependencies = [ "tokio", "tokio-stream", @@ -10668,7 +10706,7 @@ dependencies = [ [[package]] name = "reth-tracing" -version = "1.4.3" +version = "1.4.8" dependencies = [ "clap", "eyre", @@ -10682,7 +10720,7 @@ dependencies = [ [[package]] name = "reth-tracing-otlp" -version = "1.4.3" +version = "1.4.8" dependencies = [ "opentelemetry", "opentelemetry-otlp", @@ -10695,7 +10733,7 @@ dependencies = [ [[package]] name = "reth-transaction-pool" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10741,7 +10779,7 @@ dependencies = [ [[package]] name = "reth-trie" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10773,7 +10811,7 @@ dependencies = [ [[package]] name = "reth-trie-common" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -10805,7 +10843,7 @@ dependencies = [ [[package]] name = "reth-trie-db" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10831,7 +10869,7 @@ dependencies = [ [[package]] name = "reth-trie-parallel" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -10860,7 +10898,7 @@ dependencies = [ [[package]] name = "reth-trie-sparse" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -10892,7 +10930,7 @@ dependencies = [ [[package]] name = "reth-zstd-compressors" -version = "1.4.3" +version = "1.4.8" dependencies = [ "zstd", ] @@ -11328,7 +11366,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.4.15", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -11341,7 +11379,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.9.4", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -11408,7 +11446,7 @@ dependencies = [ "security-framework 3.2.0", "security-framework-sys", "webpki-root-certs 0.26.11", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -11501,7 +11539,7 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "scroll-alloy-consensus" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11519,12 +11557,13 @@ dependencies = [ "reth-codecs-derive", "serde", "serde_json", + "serde_with", "test-fuzz", ] [[package]] name = "scroll-alloy-evm" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11541,7 +11580,7 @@ dependencies = [ [[package]] name = "scroll-alloy-hardforks" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-hardforks", "auto_impl", @@ -11550,7 +11589,7 @@ dependencies = [ [[package]] name = "scroll-alloy-network" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-network", @@ -11564,7 +11603,7 @@ dependencies = [ [[package]] name = "scroll-alloy-provider" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -11606,7 +11645,7 @@ dependencies = [ [[package]] name = "scroll-alloy-rpc-types" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11624,7 +11663,7 @@ dependencies = [ [[package]] name = "scroll-alloy-rpc-types-engine" -version = "1.4.3" +version = "1.4.8" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -11635,7 +11674,7 @@ dependencies = [ [[package]] name = "scroll-reth" -version = "1.4.3" +version = "1.4.8" dependencies = [ "clap", "reth-cli-util", @@ -12316,7 +12355,7 @@ dependencies = [ "getrandom 0.3.3", "once_cell", "rustix 1.0.7", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -12973,7 +13012,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69fff37da548239c3bf9e64a12193d261e8b22b660991c6fd2df057c168f435f" dependencies = [ "cc", - "windows-targets 0.48.5", + "windows-targets 0.52.6", ] [[package]] @@ -13519,7 +13558,7 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 90ef7790cd6..c07d8a31a88 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace.package] -version = "1.4.3" +version = "1.4.8" edition = "2021" rust-version = "1.86" license = "MIT OR Apache-2.0" @@ -166,6 +166,7 @@ members = [ "examples/custom-rlpx-subprotocol", "examples/custom-node", "examples/db-access", + "examples/engine-api-access", "examples/exex-hello-world", "examples/exex-subscription", "examples/exex-test", @@ -491,33 +492,33 @@ alloy-trie = { version = "0.8.1", default-features = false } alloy-hardforks = "0.2.2" -alloy-consensus = { version = "1.0.7", default-features = false } -alloy-contract = { version = "1.0.7", default-features = false } -alloy-eips = { version = "1.0.7", default-features = false } -alloy-genesis = { version = "1.0.7", default-features = false } -alloy-json-rpc = { version = "1.0.7", default-features = false } -alloy-network = { version = "1.0.7", default-features = false } -alloy-network-primitives = { version = "1.0.7", default-features = false } -alloy-provider = { version = "1.0.7", features = ["reqwest"], default-features = false } -alloy-pubsub = { version = "1.0.7", default-features = false } -alloy-rpc-client = { version = "1.0.7", default-features = false } -alloy-rpc-types = { version = "1.0.7", features = ["eth"], default-features = false } -alloy-rpc-types-admin = { version = "1.0.7", default-features = false } -alloy-rpc-types-anvil = { version = "1.0.7", default-features = false } -alloy-rpc-types-beacon = { version = "1.0.7", default-features = false } -alloy-rpc-types-debug = { version = "1.0.7", default-features = false } -alloy-rpc-types-engine = { version = "1.0.7", default-features = false } -alloy-rpc-types-eth = { version = "1.0.7", default-features = false } -alloy-rpc-types-mev = { version = "1.0.7", default-features = false } -alloy-rpc-types-trace = { version = "1.0.7", default-features = false } -alloy-rpc-types-txpool = { version = "1.0.7", default-features = false } -alloy-serde = { version = "1.0.7", default-features = false } -alloy-signer = { version = "1.0.7", default-features = false } -alloy-signer-local = { version = "1.0.7", default-features = false } -alloy-transport = { version = "1.0.7" } -alloy-transport-http = { version = "1.0.7", features = ["reqwest-rustls-tls"], default-features = false } -alloy-transport-ipc = { version = "1.0.7", default-features = false } -alloy-transport-ws = { version = "1.0.7", default-features = false } +alloy-consensus = { version = "1.0.9", default-features = false } +alloy-contract = { version = "1.0.9", default-features = false } +alloy-eips = { version = "1.0.9", default-features = false } +alloy-genesis = { version = "1.0.9", default-features = false } +alloy-json-rpc = { version = "1.0.9", default-features = false } +alloy-network = { version = "1.0.9", default-features = false } +alloy-network-primitives = { version = "1.0.9", default-features = false } +alloy-provider = { version = "1.0.9", features = ["reqwest"], default-features = false } +alloy-pubsub = { version = "1.0.9", default-features = false } +alloy-rpc-client = { version = "1.0.9", default-features = false } +alloy-rpc-types = { version = "1.0.9", features = ["eth"], default-features = false } +alloy-rpc-types-admin = { version = "1.0.9", default-features = false } +alloy-rpc-types-anvil = { version = "1.0.9", default-features = false } +alloy-rpc-types-beacon = { version = "1.0.9", default-features = false } +alloy-rpc-types-debug = { version = "1.0.9", default-features = false } +alloy-rpc-types-engine = { version = "1.0.9", default-features = false } +alloy-rpc-types-eth = { version = "1.0.9", default-features = false } +alloy-rpc-types-mev = { version = "1.0.9", default-features = false } +alloy-rpc-types-trace = { version = "1.0.9", default-features = false } +alloy-rpc-types-txpool = { version = "1.0.9", default-features = false } +alloy-serde = { version = "1.0.9", default-features = false } +alloy-signer = { version = "1.0.9", default-features = false } +alloy-signer-local = { version = "1.0.9", default-features = false } +alloy-transport = { version = "1.0.9" } +alloy-transport-http = { version = "1.0.9", features = ["reqwest-rustls-tls"], default-features = false } +alloy-transport-ipc = { version = "1.0.9", default-features = false } +alloy-transport-ws = { version = "1.0.9", default-features = false } # scroll scroll-alloy-consensus = { path = "crates/scroll/alloy/consensus", default-features = false } @@ -550,7 +551,7 @@ op-alloy-rpc-types-engine = { version = "0.17.2", default-features = false } op-alloy-network = { version = "0.17.2", default-features = false } op-alloy-consensus = { version = "0.17.2", default-features = false } op-alloy-rpc-jsonrpsee = { version = "0.17.2", default-features = false } -op-alloy-flz = { version = "0.13.0", default-features = false } +op-alloy-flz = { version = "0.13.1", default-features = false } # misc aquamarine = "0.6" diff --git a/Cross.toml b/Cross.toml index f180166df27..eff8da32fde 100644 --- a/Cross.toml +++ b/Cross.toml @@ -1,5 +1,14 @@ [build] pre-build = [ + # Use HTTPS for package sources + "apt-get update && apt-get install --assume-yes --no-install-recommends ca-certificates", + "find /etc/apt/ -type f \\( -name '*.list' -o -name '*.sources' \\) -exec sed -i 's|http://|https://|g' {} +", + + # Configure APT retries and timeouts to handle network issues + "echo 'Acquire::Retries \"3\";' > /etc/apt/apt.conf.d/80-retries", + "echo 'Acquire::http::Timeout \"60\";' >> /etc/apt/apt.conf.d/80-retries", + "echo 'Acquire::ftp::Timeout \"60\";' >> /etc/apt/apt.conf.d/80-retries", + # rust-bindgen dependencies: llvm-dev libclang-dev (>= 10) clang (>= 10) # See: https://github.com/cross-rs/cross/wiki/FAQ#using-clang--bindgen for # recommended clang versions for the given cross and bindgen version. @@ -15,5 +24,24 @@ pre-build = [ # Inspired by https://github.com/cross-rs/cross/blob/9e2298e17170655342d3248a9c8ac37ef92ba38f/docker/Dockerfile.x86_64-pc-windows-gnu#L51 dockerfile = "./Dockerfile.x86_64-pc-windows-gnu" +[target.riscv64gc-unknown-linux-gnu] +image = "ubuntu:24.04" +pre-build = [ + "apt update", + "apt install --yes gcc gcc-riscv64-linux-gnu libclang-dev make", +] +env.passthrough = [ + "CARGO_TARGET_RISCV64GC_UNKNOWN_LINUX_GNU_LINKER=riscv64-linux-gnu-gcc", +] + +[target.x86_64-pc-windows-gnu] +# Why do we need a custom Dockerfile on Windows: +# 1. `reth-libmdbx` stopped working with MinGW 9.3 that cross image comes with. +# 2. To be able to update the version of MinGW, we need to also update the Ubuntu that the image is based on. +# +# Also see https://github.com/cross-rs/cross/issues/1667 +# Inspired by https://github.com/cross-rs/cross/blob/9e2298e17170655342d3248a9c8ac37ef92ba38f/docker/Dockerfile.x86_64-pc-windows-gnu#L51 +dockerfile = "./Dockerfile.x86_64-pc-windows-gnu" + [build.env] passthrough = ["JEMALLOC_SYS_WITH_LG_PAGE"] diff --git a/Dockerfile b/Dockerfile index beea3301cfe..fc97c160bbc 100644 --- a/Dockerfile +++ b/Dockerfile @@ -33,7 +33,7 @@ ENV FEATURES=$FEATURES RUN cargo chef cook --profile $BUILD_PROFILE --features "$FEATURES" --recipe-path recipe.json # Build application -COPY --exclude=.git --exclude=dist . . +COPY --exclude=dist . . RUN cargo build --profile $BUILD_PROFILE --features "$FEATURES" --locked --bin reth # ARG is not resolved in COPY so we have to hack around it by copying the diff --git a/Dockerfile.x86_64-pc-windows-gnu b/Dockerfile.x86_64-pc-windows-gnu index 6e30eacf7c2..e4b5a531abe 100644 --- a/Dockerfile.x86_64-pc-windows-gnu +++ b/Dockerfile.x86_64-pc-windows-gnu @@ -1,7 +1,19 @@ FROM ubuntu:24.04 AS cross-base ENV DEBIAN_FRONTEND=noninteractive -RUN apt-get update && apt-get install --assume-yes --no-install-recommends git ca-certificates +# Use HTTPS for package sources +RUN apt-get update && apt-get install --assume-yes --no-install-recommends ca-certificates +RUN find /etc/apt/ -type f \( -name '*.list' -o -name '*.sources' \) -exec sed -i 's|http://|https://|g' {} + + +# Configure APT retries and timeouts to handle network issues +RUN echo 'Acquire::Retries \"3\";' > /etc/apt/apt.conf.d/80-retries && \ + echo 'Acquire::http::Timeout \"60\";' >> /etc/apt/apt.conf.d/80-retries && \ + echo 'Acquire::ftp::Timeout \"60\";' >> /etc/apt/apt.conf.d/80-retries + +# configure fallback mirrors +RUN sed -i 's|URIs: https://archive.ubuntu.com/ubuntu/|URIs: https://mirror.cov.ukservers.com/ubuntu/ https://archive.ubuntu.com/ubuntu/ https://mirror.ox.ac.uk/sites/archive.ubuntu.com/ubuntu/|g' /etc/apt/sources.list.d/ubuntu.sources + +RUN apt-get update && apt-get install --assume-yes --no-install-recommends git RUN git clone https://github.com/cross-rs/cross /cross WORKDIR /cross/docker @@ -15,10 +27,15 @@ FROM cross-base AS build RUN apt-get install --assume-yes --no-install-recommends libz-mingw-w64-dev g++-mingw-w64-x86-64 gfortran-mingw-w64-x86-64 +# Install Wine using OpenSUSE repository because official one is often lagging behind RUN dpkg --add-architecture i386 && \ apt-get install --assume-yes --no-install-recommends wget gpg && \ - mkdir -pm755 /etc/apt/keyrings && wget -O - https://dl.winehq.org/wine-builds/winehq.key | gpg --dearmor -o /etc/apt/keyrings/winehq-archive.key - && \ - wget -NP /etc/apt/sources.list.d/ https://dl.winehq.org/wine-builds/ubuntu/dists/noble/winehq-noble.sources && \ + mkdir -pm755 /etc/apt/keyrings && curl -fsSL \ + https://download.opensuse.org/repositories/Emulators:/Wine:/Debian/xUbuntu_24.04/Release.key \ + | tee /etc/apt/keyrings/obs-winehq.key >/dev/null && \ + echo "deb [arch=amd64,i386 signed-by=/etc/apt/keyrings/obs-winehq.key] \ + https://download.opensuse.org/repositories/Emulators:/Wine:/Debian/xUbuntu_24.04/ ./" \ + | tee /etc/apt/sources.list.d/obs-winehq.list && \ apt-get update && apt-get install --assume-yes --install-recommends winehq-stable # run-detectors are responsible for calling the correct interpreter for exe diff --git a/Makefile b/Makefile index a702d463b2c..de37ed49fc5 100644 --- a/Makefile +++ b/Makefile @@ -213,6 +213,18 @@ $(EF_TESTS_DIR): ef-tests: $(EF_TESTS_DIR) ## Runs Ethereum Foundation tests. cargo nextest run -p ef-tests --features ef-tests +##@ reth-bench + +.PHONY: reth-bench +reth-bench: ## Build the reth-bench binary into the `target` directory. + cargo build --manifest-path bin/reth-bench/Cargo.toml --features "$(FEATURES)" --profile "$(PROFILE)" + +.PHONY: install-reth-bech +install-reth-bench: ## Build and install the reth binary under `~/.cargo/bin`. + cargo install --path bin/reth-bench --bin reth-bench --force --locked \ + --features "$(FEATURES)" \ + --profile "$(PROFILE)" + ##@ Docker # Note: This requires a buildx builder with emulation support. For example: diff --git a/book/cli/reth/node.md b/book/cli/reth/node.md index 1aecb0b013f..f97980b34c6 100644 --- a/book/cli/reth/node.md +++ b/book/cli/reth/node.md @@ -735,9 +735,6 @@ Engine: --engine.legacy-state-root Enable legacy state root - --engine.caching-and-prewarming - CAUTION: This CLI flag has no effect anymore, use --engine.disable-caching-and-prewarming if you want to disable caching and prewarming - --engine.disable-caching-and-prewarming Disable cross-block caching and parallel prewarming @@ -765,8 +762,31 @@ Engine: [default: 1] - --engine.precompile-cache - Enable precompile cache + --engine.disable-precompile-cache + Disable precompile cache + + --engine.state-root-fallback + Enable state root fallback, useful for testing + + --engine.always-process-payload-attributes-on-canonical-head + Always process payload attributes and begin a payload build process even if `forkchoiceState.headBlockHash` is already the canonical head or an ancestor. See `TreeConfig::always_process_payload_attributes_on_canonical_head` for more details. + + Note: This is a no-op on OP Stack. + +ERA: + --era.enable + Enable import from ERA1 files + + --era.path + The path to a directory for import. + + The ERA1 files are read from the local directory parsing headers and bodies. + + --era.url + The URL to a remote host where the ERA1 files are hosted. + + The ERA1 files are read from the remote host using HTTP GET requests parsing headers + and bodies. Ress: --ress.enable diff --git a/crates/chain-state/Cargo.toml b/crates/chain-state/Cargo.toml index 4d6e493fe34..39a26f49378 100644 --- a/crates/chain-state/Cargo.toml +++ b/crates/chain-state/Cargo.toml @@ -41,6 +41,7 @@ derive_more.workspace = true metrics.workspace = true parking_lot.workspace = true pin-project.workspace = true +serde = { workspace = true, optional = true } # optional deps for test-utils alloy-signer = { workspace = true, optional = true } @@ -56,6 +57,21 @@ alloy-consensus.workspace = true rand.workspace = true [features] +serde = [ + "dep:serde", + "alloy-consensus/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "parking_lot/serde", + "rand?/serde", + "reth-ethereum-primitives/serde", + "reth-execution-types/serde", + "reth-primitives-traits/serde", + "reth-trie/serde", + "revm-database/serde", + "revm-state?/serde", + "reth-storage-api/serde", +] test-utils = [ "alloy-primitives/getrandom", "alloy-signer", diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index 14e3b02a3d8..7e8e3e7027a 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -798,19 +798,71 @@ impl ExecutedBlock { } } +/// Trie updates that result from calculating the state root for the block. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum ExecutedTrieUpdates { + /// Trie updates present. State root was calculated, and the trie updates can be applied to the + /// database. + Present(Arc), + /// Trie updates missing. State root was calculated, but the trie updates cannot be applied to + /// the current database state. To apply the updates, the state root must be recalculated, and + /// new trie updates must be generated. + /// + /// This can happen when processing fork chain blocks that are building on top of the + /// historical database state. Since we don't store the historical trie state, we cannot + /// generate the trie updates for it. + Missing, +} + +impl ExecutedTrieUpdates { + /// Creates a [`ExecutedTrieUpdates`] with present but empty trie updates. + pub fn empty() -> Self { + Self::Present(Arc::default()) + } + + /// Sets the trie updates to the provided value as present. + pub fn set_present(&mut self, updates: Arc) { + *self = Self::Present(updates); + } + + /// Takes the present trie updates, leaving the state as missing. + pub fn take_present(&mut self) -> Option> { + match self { + Self::Present(updates) => { + let updates = core::mem::take(updates); + *self = Self::Missing; + Some(updates) + } + Self::Missing => None, + } + } + + /// Returns a reference to the trie updates if present. + #[allow(clippy::missing_const_for_fn)] // false positive + pub fn as_ref(&self) -> Option<&TrieUpdates> { + match self { + Self::Present(updates) => Some(updates), + Self::Missing => None, + } + } + + /// Returns `true` if the trie updates are present. + pub const fn is_present(&self) -> bool { + matches!(self, Self::Present(_)) + } + + /// Returns `true` if the trie updates are missing. + pub const fn is_missing(&self) -> bool { + matches!(self, Self::Missing) + } +} + /// An [`ExecutedBlock`] with its [`TrieUpdates`]. /// /// We store it as separate type because [`TrieUpdates`] are only available for blocks stored in /// memory and can't be obtained for canonical persisted blocks. #[derive( - Clone, - Debug, - PartialEq, - Eq, - Default, - derive_more::Deref, - derive_more::DerefMut, - derive_more::Into, + Clone, Debug, PartialEq, Eq, derive_more::Deref, derive_more::DerefMut, derive_more::Into, )] pub struct ExecutedBlockWithTrieUpdates { /// Inner [`ExecutedBlock`]. @@ -818,8 +870,11 @@ pub struct ExecutedBlockWithTrieUpdates { #[deref_mut] #[into] pub block: ExecutedBlock, - /// Trie updates that result of applying the block. - pub trie: Arc, + /// Trie updates that result from calculating the state root for the block. + /// + /// If [`ExecutedTrieUpdates::Missing`], the trie updates should be computed when persisting + /// the block **on top of the canonical parent**. + pub trie: ExecutedTrieUpdates, } impl ExecutedBlockWithTrieUpdates { @@ -828,15 +883,15 @@ impl ExecutedBlockWithTrieUpdates { recovered_block: Arc>, execution_output: Arc>, hashed_state: Arc, - trie: Arc, + trie: ExecutedTrieUpdates, ) -> Self { Self { block: ExecutedBlock { recovered_block, execution_output, hashed_state }, trie } } - /// Returns a reference to the trie updates for the block + /// Returns a reference to the trie updates for the block, if present. #[inline] - pub fn trie_updates(&self) -> &TrieUpdates { - &self.trie + pub fn trie_updates(&self) -> Option<&TrieUpdates> { + self.trie.as_ref() } /// Converts the value into [`SealedBlock`]. diff --git a/crates/chain-state/src/memory_overlay.rs b/crates/chain-state/src/memory_overlay.rs index e8f85905afb..e454b84b700 100644 --- a/crates/chain-state/src/memory_overlay.rs +++ b/crates/chain-state/src/memory_overlay.rs @@ -26,7 +26,7 @@ pub struct MemoryOverlayStateProviderRef< /// The collection of executed parent blocks. Expected order is newest to oldest. pub(crate) in_memory: Vec>, /// Lazy-loaded in-memory trie data. - pub(crate) trie_state: OnceLock, + pub(crate) trie_input: OnceLock, } /// A state provider that stores references to in-memory blocks along with their state as well as @@ -45,7 +45,7 @@ impl<'a, N: NodePrimitives> MemoryOverlayStateProviderRef<'a, N> { historical: Box, in_memory: Vec>, ) -> Self { - Self { historical, in_memory, trie_state: OnceLock::new() } + Self { historical, in_memory, trie_input: OnceLock::new() } } /// Turn this state provider into a state provider @@ -54,14 +54,14 @@ impl<'a, N: NodePrimitives> MemoryOverlayStateProviderRef<'a, N> { } /// Return lazy-loaded trie state aggregated from in-memory blocks. - fn trie_state(&self) -> &MemoryOverlayTrieState { - self.trie_state.get_or_init(|| { - let mut trie_state = MemoryOverlayTrieState::default(); - for block in self.in_memory.iter().rev() { - trie_state.state.extend_ref(block.hashed_state.as_ref()); - trie_state.nodes.extend_ref(block.trie.as_ref()); - } - trie_state + fn trie_input(&self) -> &TrieInput { + self.trie_input.get_or_init(|| { + TrieInput::from_blocks( + self.in_memory + .iter() + .rev() + .map(|block| (block.hashed_state.as_ref(), block.trie.as_ref())), + ) }) } } @@ -117,8 +117,7 @@ impl StateRootProvider for MemoryOverlayStateProviderRef<'_, } fn state_root_from_nodes(&self, mut input: TrieInput) -> ProviderResult { - let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); - input.prepend_cached(nodes, state); + input.prepend_self(self.trie_input().clone()); self.historical.state_root_from_nodes(input) } @@ -133,8 +132,7 @@ impl StateRootProvider for MemoryOverlayStateProviderRef<'_, &self, mut input: TrieInput, ) -> ProviderResult<(B256, TrieUpdates)> { - let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); - input.prepend_cached(nodes, state); + input.prepend_self(self.trie_input().clone()); self.historical.state_root_from_nodes_with_updates(input) } } @@ -142,7 +140,7 @@ impl StateRootProvider for MemoryOverlayStateProviderRef<'_, impl StorageRootProvider for MemoryOverlayStateProviderRef<'_, N> { // TODO: Currently this does not reuse available in-memory trie nodes. fn storage_root(&self, address: Address, storage: HashedStorage) -> ProviderResult { - let state = &self.trie_state().state; + let state = &self.trie_input().state; let mut hashed_storage = state.storages.get(&keccak256(address)).cloned().unwrap_or_default(); hashed_storage.extend(&storage); @@ -156,7 +154,7 @@ impl StorageRootProvider for MemoryOverlayStateProviderRef<'_ slot: B256, storage: HashedStorage, ) -> ProviderResult { - let state = &self.trie_state().state; + let state = &self.trie_input().state; let mut hashed_storage = state.storages.get(&keccak256(address)).cloned().unwrap_or_default(); hashed_storage.extend(&storage); @@ -170,7 +168,7 @@ impl StorageRootProvider for MemoryOverlayStateProviderRef<'_ slots: &[B256], storage: HashedStorage, ) -> ProviderResult { - let state = &self.trie_state().state; + let state = &self.trie_input().state; let mut hashed_storage = state.storages.get(&keccak256(address)).cloned().unwrap_or_default(); hashed_storage.extend(&storage); @@ -185,8 +183,7 @@ impl StateProofProvider for MemoryOverlayStateProviderRef<'_, address: Address, slots: &[B256], ) -> ProviderResult { - let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); - input.prepend_cached(nodes, state); + input.prepend_self(self.trie_input().clone()); self.historical.proof(input, address, slots) } @@ -195,14 +192,12 @@ impl StateProofProvider for MemoryOverlayStateProviderRef<'_, mut input: TrieInput, targets: MultiProofTargets, ) -> ProviderResult { - let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); - input.prepend_cached(nodes, state); + input.prepend_self(self.trie_input().clone()); self.historical.multiproof(input, targets) } fn witness(&self, mut input: TrieInput, target: HashedPostState) -> ProviderResult> { - let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); - input.prepend_cached(nodes, state); + input.prepend_self(self.trie_input().clone()); self.historical.witness(input, target) } } @@ -238,12 +233,3 @@ impl StateProvider for MemoryOverlayStateProviderRef<'_, N> { self.historical.bytecode_by_hash(code_hash) } } - -/// The collection of data necessary for trie-related operations for [`MemoryOverlayStateProvider`]. -#[derive(Clone, Default, Debug)] -pub(crate) struct MemoryOverlayTrieState { - /// The collection of aggregated in-memory trie updates. - pub(crate) nodes: TrieUpdates, - /// The collection of hashed state from in-memory blocks. - pub(crate) state: HashedPostState, -} diff --git a/crates/chain-state/src/notifications.rs b/crates/chain-state/src/notifications.rs index b03513a7520..abf2405c872 100644 --- a/crates/chain-state/src/notifications.rs +++ b/crates/chain-state/src/notifications.rs @@ -81,6 +81,8 @@ impl Stream for CanonStateNotificationStream { /// The notification contains at least one [`Chain`] with the imported segment. If some blocks were /// reverted (e.g. during a reorg), the old chain is also returned. #[derive(Clone, Debug, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "serde", serde(bound = ""))] pub enum CanonStateNotification { /// The canonical chain was extended. Commit { @@ -357,6 +359,7 @@ mod tests { block_receipts[0].0, BlockReceipts { block: block1.num_hash(), + timestamp: block1.timestamp, tx_receipts: vec![( // Transaction hash of a Transaction::default() b256!("0x20b5378c6fe992c118b557d2f8e8bbe0b7567f6fe5483a8f0f1c51e93a9d91ab"), @@ -442,6 +445,7 @@ mod tests { block_receipts[0].0, BlockReceipts { block: old_block1.num_hash(), + timestamp: old_block1.timestamp, tx_receipts: vec![( // Transaction hash of a Transaction::default() b256!("0x20b5378c6fe992c118b557d2f8e8bbe0b7567f6fe5483a8f0f1c51e93a9d91ab"), @@ -458,6 +462,7 @@ mod tests { block_receipts[1].0, BlockReceipts { block: new_block1.num_hash(), + timestamp: new_block1.timestamp, tx_receipts: vec![( // Transaction hash of a Transaction::default() b256!("0x20b5378c6fe992c118b557d2f8e8bbe0b7567f6fe5483a8f0f1c51e93a9d91ab"), diff --git a/crates/chain-state/src/test_utils.rs b/crates/chain-state/src/test_utils.rs index ae0455b9c23..499a47de593 100644 --- a/crates/chain-state/src/test_utils.rs +++ b/crates/chain-state/src/test_utils.rs @@ -1,6 +1,6 @@ use crate::{ in_memory::ExecutedBlockWithTrieUpdates, CanonStateNotification, CanonStateNotifications, - CanonStateSubscriptions, + CanonStateSubscriptions, ExecutedTrieUpdates, }; use alloy_consensus::{ Header, SignableTransaction, Transaction as _, TxEip1559, TxReceipt, EMPTY_ROOT_HASH, @@ -25,7 +25,7 @@ use reth_primitives_traits::{ SignedTransaction, }; use reth_storage_api::NodePrimitivesProvider; -use reth_trie::{root::state_root_unhashed, updates::TrieUpdates, HashedPostState}; +use reth_trie::{root::state_root_unhashed, HashedPostState}; use revm_database::BundleState; use revm_state::AccountInfo; use std::{ @@ -222,7 +222,7 @@ impl TestBlockBuilder { vec![Requests::default()], )), Arc::new(HashedPostState::default()), - Arc::new(TrieUpdates::default()), + ExecutedTrieUpdates::empty(), ) } diff --git a/crates/cli/commands/src/common.rs b/crates/cli/commands/src/common.rs index 032fdcb2123..cb005ae6ff4 100644 --- a/crates/cli/commands/src/common.rs +++ b/crates/cli/commands/src/common.rs @@ -85,6 +85,9 @@ impl EnvironmentArgs { if config.stages.etl.dir.is_none() { config.stages.etl.dir = Some(EtlConfig::from_datadir(data_dir.data_dir())); } + if config.stages.era.folder.is_none() { + config.stages.era = config.stages.era.with_datadir(data_dir.data_dir()); + } info!(target: "reth::cli", ?db_path, ?sf_path, "Opening storage"); let (db, sfp) = match access { @@ -164,6 +167,7 @@ impl EnvironmentArgs { NoopEvmConfig::::default(), config.stages.clone(), prune_modes.clone(), + None, )) .build(factory.clone(), StaticFileProducer::new(factory.clone(), prune_modes)); diff --git a/crates/cli/commands/src/import.rs b/crates/cli/commands/src/import.rs index 6eff43acd68..d821570901b 100644 --- a/crates/cli/commands/src/import.rs +++ b/crates/cli/commands/src/import.rs @@ -234,6 +234,7 @@ where evm_config, config.stages.clone(), PruneModes::default(), + None, ) .builder() .disable_all_if(&StageId::STATE_REQUIRED, || disable_exec), diff --git a/crates/cli/commands/src/import_era.rs b/crates/cli/commands/src/import_era.rs index 3d5097649cf..fbd8d23bd56 100644 --- a/crates/cli/commands/src/import_era.rs +++ b/crates/cli/commands/src/import_era.rs @@ -48,10 +48,11 @@ impl TryFromChain for ChainKind { fn try_to_url(&self) -> eyre::Result { Ok(match self { ChainKind::Named(NamedChain::Mainnet) => { - Url::parse("https://era.ithaca.xyz/era1/").expect("URL should be valid") + Url::parse("https://era.ithaca.xyz/era1/index.html").expect("URL should be valid") } ChainKind::Named(NamedChain::Sepolia) => { - Url::parse("https://era.ithaca.xyz/sepolia-era1/").expect("URL should be valid") + Url::parse("https://era.ithaca.xyz/sepolia-era1/index.html") + .expect("URL should be valid") } chain => return Err(eyre!("No known host for ERA files on chain {chain:?}")), }) @@ -68,13 +69,13 @@ impl> ImportEraC let Environment { provider_factory, config, .. } = self.env.init::(AccessRights::RW)?; - let hash_collector = Collector::new(config.stages.etl.file_size, config.stages.etl.dir); + let mut hash_collector = Collector::new(config.stages.etl.file_size, config.stages.etl.dir); let provider_factory = &provider_factory.provider_rw()?.0; if let Some(path) = self.import.path { - let stream = read_dir(path)?; + let stream = read_dir(path, 0)?; - era::import(stream, provider_factory, hash_collector)?; + era::import(stream, provider_factory, &mut hash_collector)?; } else { let url = match self.import.url { Some(url) => url, @@ -85,7 +86,7 @@ impl> ImportEraC let client = EraClient::new(Client::new(), url, folder); let stream = EraStream::new(client, EraStreamConfig::default()); - era::import(stream, provider_factory, hash_collector)?; + era::import(stream, provider_factory, &mut hash_collector)?; } Ok(()) diff --git a/crates/cli/commands/src/init_state/mod.rs b/crates/cli/commands/src/init_state/mod.rs index 4f0ae05d8bc..76e7791e1d4 100644 --- a/crates/cli/commands/src/init_state/mod.rs +++ b/crates/cli/commands/src/init_state/mod.rs @@ -1,6 +1,7 @@ //! Command that initializes the node from a genesis file. use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; +use alloy_consensus::Header; use alloy_primitives::{B256, U256}; use clap::Parser; use reth_chainspec::{EthChainSpec, EthereumHardforks}; @@ -102,6 +103,7 @@ impl> InitStateC &provider_rw, SealedHeader::new(header, header_hash), total_difficulty, + |number| Header { number, ..Default::default() }, )?; // SAFETY: it's safe to commit static files, since in the event of a crash, they diff --git a/crates/cli/commands/src/init_state/without_evm.rs b/crates/cli/commands/src/init_state/without_evm.rs index 81b313f03e5..c839aaf268e 100644 --- a/crates/cli/commands/src/init_state/without_evm.rs +++ b/crates/cli/commands/src/init_state/without_evm.rs @@ -25,21 +25,26 @@ pub(crate) fn read_header_from_file(path: PathBuf) -> Result( +pub fn setup_without_evm( provider_rw: &Provider, header: SealedHeader<::BlockHeader>, total_difficulty: U256, + header_factory: F, ) -> ProviderResult<()> where - Provider: StaticFileProviderFactory> + Provider: StaticFileProviderFactory + StageCheckpointWriter + BlockWriter::Block>, + F: Fn(BlockNumber) -> ::BlockHeader + + Send + + Sync + + 'static, { info!(target: "reth::cli", new_tip = ?header.num_hash(), "Setting up dummy EVM chain before importing state."); let static_file_provider = provider_rw.static_file_provider(); // Write EVM dummy data up to `header - 1` block - append_dummy_chain(&static_file_provider, header.number() - 1)?; + append_dummy_chain(&static_file_provider, header.number() - 1, header_factory)?; info!(target: "reth::cli", "Appending first valid block."); @@ -97,10 +102,15 @@ where /// * Headers: It will push an empty block. /// * Transactions: It will not push any tx, only increments the end block range. /// * Receipts: It will not push any receipt, only increments the end block range. -fn append_dummy_chain>( +fn append_dummy_chain( sf_provider: &StaticFileProvider, target_height: BlockNumber, -) -> ProviderResult<()> { + header_factory: F, +) -> ProviderResult<()> +where + N: NodePrimitives, + F: Fn(BlockNumber) -> N::BlockHeader + Send + Sync + 'static, +{ let (tx, rx) = std::sync::mpsc::channel(); // Spawn jobs for incrementing the block end range of transactions and receipts @@ -122,12 +132,11 @@ fn append_dummy_chain>( // Spawn job for appending empty headers let provider = sf_provider.clone(); std::thread::spawn(move || { - let mut empty_header = Header::default(); let result = provider.latest_writer(StaticFileSegment::Headers).and_then(|mut writer| { for block_num in 1..=target_height { // TODO: should we fill with real parent_hash? - empty_header.number = block_num; - writer.append_header(&empty_header, U256::ZERO, &B256::ZERO)?; + let header = header_factory(block_num); + writer.append_header(&header, U256::ZERO, &B256::ZERO)?; } Ok(()) }); diff --git a/crates/cli/commands/src/node.rs b/crates/cli/commands/src/node.rs index dbc9e678b24..7a3ac269da6 100644 --- a/crates/cli/commands/src/node.rs +++ b/crates/cli/commands/src/node.rs @@ -10,8 +10,8 @@ use reth_db::init_db; use reth_node_builder::NodeBuilder; use reth_node_core::{ args::{ - DatabaseArgs, DatadirArgs, DebugArgs, DevArgs, EngineArgs, NetworkArgs, PayloadBuilderArgs, - PruningArgs, RpcServerArgs, TxPoolArgs, + DatabaseArgs, DatadirArgs, DebugArgs, DevArgs, EngineArgs, EraArgs, NetworkArgs, + PayloadBuilderArgs, PruningArgs, RpcServerArgs, TxPoolArgs, }, node_config::NodeConfig, version, @@ -109,6 +109,10 @@ pub struct NodeCommand> Command evm_config.clone(), stage_conf.clone(), prune_modes.clone(), + None, ) .set(ExecutionStage::new( evm_config, @@ -208,7 +209,9 @@ impl Subcommands { Self::NumBlocks { amount } => last.saturating_sub(*amount), }; if target > last { - eyre::bail!("Target block number is higher than the latest block number") + eyre::bail!( + "Target block number {target} is higher than the latest block number {last}" + ) } Ok(target) } diff --git a/crates/config/Cargo.toml b/crates/config/Cargo.toml index 25d42ac9022..65bca139012 100644 --- a/crates/config/Cargo.toml +++ b/crates/config/Cargo.toml @@ -24,6 +24,9 @@ humantime-serde = { workspace = true, optional = true } toml = { workspace = true, optional = true } eyre = { workspace = true, optional = true } +# value objects +url.workspace = true + [features] serde = [ "dep:serde", @@ -34,6 +37,7 @@ serde = [ "reth-prune-types/serde", "reth-stages-types/serde", "alloy-primitives/serde", + "url/serde", ] [dev-dependencies] diff --git a/crates/config/src/config.rs b/crates/config/src/config.rs index ad6bdb6ef04..55883d04e8d 100644 --- a/crates/config/src/config.rs +++ b/crates/config/src/config.rs @@ -6,6 +6,7 @@ use std::{ path::{Path, PathBuf}, time::Duration, }; +use url::Url; #[cfg(feature = "serde")] const EXTENSION: &str = "toml"; @@ -100,6 +101,8 @@ impl Config { #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(feature = "serde", serde(default))] pub struct StageConfig { + /// ERA stage configuration. + pub era: EraConfig, /// Header stage configuration. pub headers: HeadersConfig, /// Body stage configuration. @@ -139,6 +142,33 @@ impl StageConfig { } } +/// ERA stage configuration. +#[derive(Debug, Clone, Default, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "serde", serde(default))] +pub struct EraConfig { + /// Path to a local directory where ERA1 files are located. + /// + /// Conflicts with `url`. + pub path: Option, + /// The base URL of an ERA1 file host to download from. + /// + /// Conflicts with `path`. + pub url: Option, + /// Path to a directory where files downloaded from `url` will be stored until processed. + /// + /// Required for `url`. + pub folder: Option, +} + +impl EraConfig { + /// Sets `folder` for temporary downloads as a directory called "era" inside `dir`. + pub fn with_datadir(mut self, dir: impl AsRef) -> Self { + self.folder = Some(dir.as_ref().join("era")); + self + } +} + /// Header stage configuration. #[derive(Debug, Clone, Copy, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index a74c943e5e2..b3e75677b1f 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -185,7 +185,6 @@ pub fn validate_4844_header_standalone( blob_params: BlobParams, ) -> Result<(), ConsensusError> { let blob_gas_used = header.blob_gas_used().ok_or(ConsensusError::BlobGasUsedMissing)?; - let excess_blob_gas = header.excess_blob_gas().ok_or(ConsensusError::ExcessBlobGasMissing)?; if header.parent_beacon_block_root().is_none() { return Err(ConsensusError::ParentBeaconBlockRootMissing) @@ -198,15 +197,6 @@ pub fn validate_4844_header_standalone( }) } - // `excess_blob_gas` must also be a multiple of `DATA_GAS_PER_BLOB`. This will be checked later - // (via `calc_excess_blob_gas`), but it doesn't hurt to catch the problem sooner. - if excess_blob_gas % DATA_GAS_PER_BLOB != 0 { - return Err(ConsensusError::ExcessBlobGasNotMultipleOfBlobGasPerBlob { - excess_blob_gas, - blob_gas_per_blob: DATA_GAS_PER_BLOB, - }) - } - if blob_gas_used > blob_params.max_blob_gas_per_block() { return Err(ConsensusError::BlobGasUsedExceedsMaxBlobGasPerBlock { blob_gas_used, diff --git a/crates/consensus/consensus/src/lib.rs b/crates/consensus/consensus/src/lib.rs index aa166a07375..951c2c0ef55 100644 --- a/crates/consensus/consensus/src/lib.rs +++ b/crates/consensus/consensus/src/lib.rs @@ -336,17 +336,6 @@ pub enum ConsensusError { blob_gas_per_blob: u64, }, - /// Error when excess blob gas is not a multiple of blob gas per blob. - #[error( - "excess blob gas {excess_blob_gas} is not a multiple of blob gas per blob {blob_gas_per_blob}" - )] - ExcessBlobGasNotMultipleOfBlobGasPerBlob { - /// The actual excess blob gas. - excess_blob_gas: u64, - /// The blob gas per blob. - blob_gas_per_blob: u64, - }, - /// Error when the blob gas used in the header does not match the expected blob gas used. #[error("blob gas used mismatch: {0}")] BlobGasUsedDiff(GotExpected), diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index e692dde02a1..08c5895e02a 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -109,6 +109,7 @@ pub async fn setup_engine( num_nodes: usize, chain_spec: Arc, is_dev: bool, + tree_config: reth_node_api::TreeConfig, attributes_generator: impl Fn(u64) -> <::Payload as PayloadTypes>::PayloadBuilderAttributes + Send + Sync + Copy + 'static, ) -> eyre::Result<( Vec>>>, @@ -156,7 +157,7 @@ where let launcher = EngineNodeLauncher::new( builder.task_executor().clone(), builder.config().datadir(), - Default::default(), + tree_config.clone(), ); builder.launch_with(launcher) }) diff --git a/crates/e2e-test-utils/src/testsuite/actions.rs b/crates/e2e-test-utils/src/testsuite/actions.rs deleted file mode 100644 index a1fd24a584d..00000000000 --- a/crates/e2e-test-utils/src/testsuite/actions.rs +++ /dev/null @@ -1,670 +0,0 @@ -//! Actions that can be performed in tests. - -use crate::testsuite::Environment; -use alloy_primitives::{Bytes, B256, U256}; -use alloy_rpc_types_engine::{ - payload::ExecutionPayloadEnvelopeV3, ExecutionPayloadV3, ForkchoiceState, PayloadAttributes, - PayloadStatusEnum, -}; -use alloy_rpc_types_eth::{Block, Header, Receipt, Transaction}; -use eyre::Result; -use futures_util::future::BoxFuture; -use reth_node_api::{EngineTypes, PayloadTypes}; -use reth_rpc_api::clients::{EngineApiClient, EthApiClient}; -use std::{future::Future, marker::PhantomData, time::Duration}; -use tokio::time::sleep; -use tracing::debug; - -/// An action that can be performed on an instance. -/// -/// Actions execute operations and potentially make assertions in a single step. -/// The action name indicates what it does (e.g., `AssertMineBlock` would both -/// mine a block and assert it worked). -pub trait Action: Send + 'static { - /// Executes the action - fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>>; -} - -/// Simplified action container for storage in tests -#[expect(missing_debug_implementations)] -pub struct ActionBox(Box>); - -impl ActionBox { - /// Constructor for [`ActionBox`]. - pub fn new>(action: A) -> Self { - Self(Box::new(action)) - } - - /// Executes an [`ActionBox`] with the given [`Environment`] reference. - pub async fn execute(mut self, env: &mut Environment) -> Result<()> { - self.0.execute(env).await - } -} - -/// Implementation of `Action` for any function/closure that takes an Environment -/// reference and returns a Future resolving to Result<()>. -/// -/// This allows using closures directly as actions with `.with_action(async move |env| {...})`. -impl Action for F -where - F: FnMut(&Environment) -> Fut + Send + 'static, - Fut: Future> + Send + 'static, -{ - fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { - Box::pin(self(env)) - } -} - -/// Mine a single block with the given transactions and verify the block was created -/// successfully. -#[derive(Debug)] -pub struct AssertMineBlock -where - Engine: PayloadTypes, -{ - /// The node index to mine - pub node_idx: usize, - /// Transactions to include in the block - pub transactions: Vec, - /// Expected block hash (optional) - pub expected_hash: Option, - /// Block's payload attributes - // TODO: refactor once we have actions to generate payload attributes. - pub payload_attributes: Engine::PayloadAttributes, - /// Tracks engine type - _phantom: PhantomData, -} - -impl AssertMineBlock -where - Engine: PayloadTypes, -{ - /// Create a new `AssertMineBlock` action - pub fn new( - node_idx: usize, - transactions: Vec, - expected_hash: Option, - payload_attributes: Engine::PayloadAttributes, - ) -> Self { - Self { - node_idx, - transactions, - expected_hash, - payload_attributes, - _phantom: Default::default(), - } - } -} - -impl Action for AssertMineBlock -where - Engine: EngineTypes, -{ - fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { - Box::pin(async move { - if self.node_idx >= env.node_clients.len() { - return Err(eyre::eyre!("Node index out of bounds: {}", self.node_idx)); - } - - let node_client = &env.node_clients[self.node_idx]; - let rpc_client = &node_client.rpc; - let engine_client = node_client.engine.http_client(); - - // get the latest block to use as parent - let latest_block = - EthApiClient::::block_by_number( - rpc_client, - alloy_eips::BlockNumberOrTag::Latest, - false, - ) - .await?; - - let latest_block = latest_block.ok_or_else(|| eyre::eyre!("Latest block not found"))?; - let parent_hash = latest_block.header.hash; - - debug!("Latest block hash: {parent_hash}"); - - // create a simple forkchoice state with the latest block as head - let fork_choice_state = ForkchoiceState { - head_block_hash: parent_hash, - safe_block_hash: parent_hash, - finalized_block_hash: parent_hash, - }; - - let fcu_result = EngineApiClient::::fork_choice_updated_v2( - &engine_client, - fork_choice_state, - Some(self.payload_attributes.clone()), - ) - .await?; - - debug!("FCU result: {:?}", fcu_result); - - // check if we got a valid payload ID - match fcu_result.payload_status.status { - PayloadStatusEnum::Valid => { - if let Some(payload_id) = fcu_result.payload_id { - debug!("Got payload ID: {payload_id}"); - - // get the payload that was built - let _engine_payload = - EngineApiClient::::get_payload_v2(&engine_client, payload_id) - .await?; - Ok(()) - } else { - Err(eyre::eyre!("No payload ID returned from forkchoiceUpdated")) - } - } - _ => Err(eyre::eyre!("Payload status not valid: {:?}", fcu_result.payload_status)), - } - }) - } -} -/// Pick the next block producer based on the latest block information. -#[derive(Debug, Default)] -pub struct PickNextBlockProducer {} - -impl PickNextBlockProducer { - /// Create a new `PickNextBlockProducer` action - pub const fn new() -> Self { - Self {} - } -} - -impl Action for PickNextBlockProducer -where - Engine: EngineTypes, -{ - fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { - Box::pin(async move { - let num_clients = env.node_clients.len(); - if num_clients == 0 { - return Err(eyre::eyre!("No node clients available")); - } - - let latest_info = env - .latest_block_info - .as_ref() - .ok_or_else(|| eyre::eyre!("No latest block information available"))?; - - // Calculate the starting index based on the latest block number - let start_idx = ((latest_info.number + 1) % num_clients as u64) as usize; - - for i in 0..num_clients { - let idx = (start_idx + i) % num_clients; - let node_client = &env.node_clients[idx]; - let rpc_client = &node_client.rpc; - - let latest_block = - EthApiClient::::block_by_number( - rpc_client, - alloy_eips::BlockNumberOrTag::Latest, - false, - ) - .await?; - - if let Some(block) = latest_block { - let block_number = block.header.number; - let block_hash = block.header.hash; - - // Check if the block hash and number match the latest block info - if block_hash == latest_info.hash && block_number == latest_info.number { - env.last_producer_idx = Some(idx); - debug!("Selected node {} as the next block producer", idx); - return Ok(()); - } - } - } - - Err(eyre::eyre!("No suitable block producer found")) - }) - } -} - -/// Store payload attributes for the next block. -#[derive(Debug, Default)] -pub struct GeneratePayloadAttributes {} - -impl Action for GeneratePayloadAttributes -where - Engine: EngineTypes, -{ - fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { - Box::pin(async move { - let latest_block = env - .latest_block_info - .as_ref() - .ok_or_else(|| eyre::eyre!("No latest block information available"))?; - let block_number = latest_block.number; - let timestamp = env.latest_header_time + env.block_timestamp_increment; - let payload_attributes = alloy_rpc_types_engine::PayloadAttributes { - timestamp, - prev_randao: B256::random(), - suggested_fee_recipient: alloy_primitives::Address::random(), - withdrawals: Some(vec![]), - parent_beacon_block_root: Some(B256::ZERO), - }; - - env.payload_attributes.insert(latest_block.number + 1, payload_attributes); - debug!("Stored payload attributes for block {}", block_number + 1); - Ok(()) - }) - } -} -/// Action that generates the next payload -#[derive(Debug, Default)] -pub struct GenerateNextPayload {} - -impl Action for GenerateNextPayload -where - Engine: EngineTypes + PayloadTypes, - reth_node_ethereum::engine::EthPayloadAttributes: - From<::ExecutionPayloadEnvelopeV3>, -{ - fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { - Box::pin(async move { - let latest_block = env - .latest_block_info - .as_ref() - .ok_or_else(|| eyre::eyre!("No latest block information available"))?; - - let parent_hash = latest_block.hash; - debug!("Latest block hash: {parent_hash}"); - - let fork_choice_state = ForkchoiceState { - head_block_hash: parent_hash, - safe_block_hash: parent_hash, - finalized_block_hash: parent_hash, - }; - - let payload_attributes: PayloadAttributes = env - .payload_attributes - .get(&latest_block.number) - .cloned() - .ok_or_else(|| eyre::eyre!("No payload attributes found for latest block"))?; - - let fcu_result = EngineApiClient::::fork_choice_updated_v3( - &env.node_clients[0].engine.http_client(), - fork_choice_state, - Some(payload_attributes.clone()), - ) - .await?; - - debug!("FCU result: {:?}", fcu_result); - - let payload_id = fcu_result - .payload_id - .ok_or_else(|| eyre::eyre!("No payload ID returned from forkChoiceUpdated"))?; - - debug!("Received payload ID: {:?}", payload_id); - env.next_payload_id = Some(payload_id); - - sleep(Duration::from_secs(1)).await; - - let built_payload: PayloadAttributes = EngineApiClient::::get_payload_v3( - &env.node_clients[0].engine.http_client(), - payload_id, - ) - .await? - .into(); - env.payload_id_history.insert(latest_block.number + 1, payload_id); - env.latest_payload_built = Some(built_payload); - - Ok(()) - }) - } -} - -///Action that broadcasts the latest fork choice state to all clients -#[derive(Debug, Default)] -pub struct BroadcastLatestForkchoice {} - -impl Action for BroadcastLatestForkchoice -where - Engine: EngineTypes + PayloadTypes, - reth_node_ethereum::engine::EthPayloadAttributes: - From<::ExecutionPayloadEnvelopeV3>, -{ - fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { - Box::pin(async move { - let payload = env.latest_payload_executed.clone(); - - if env.node_clients.is_empty() { - return Err(eyre::eyre!("No node clients available")); - } - let latest_block = env - .latest_block_info - .as_ref() - .ok_or_else(|| eyre::eyre!("No latest block information available"))?; - - let parent_hash = latest_block.hash; - debug!("Latest block hash: {parent_hash}"); - - let fork_choice_state = ForkchoiceState { - head_block_hash: parent_hash, - safe_block_hash: parent_hash, - finalized_block_hash: parent_hash, - }; - debug!( - "Broadcasting forkchoice update to {} clients. Head: {:?}", - env.node_clients.len(), - fork_choice_state.head_block_hash - ); - - for (idx, client) in env.node_clients.iter().enumerate() { - match EngineApiClient::::fork_choice_updated_v3( - &client.engine.http_client(), - fork_choice_state, - payload.clone(), - ) - .await - { - Ok(resp) => { - debug!( - "Client {}: Forkchoice update status: {:?}", - idx, resp.payload_status.status - ); - } - Err(err) => { - return Err(eyre::eyre!( - "Client {}: Failed to broadcast forkchoice: {:?}", - idx, - err - )); - } - } - } - debug!("Forkchoice update broadcasted successfully"); - Ok(()) - }) - } -} - -/// Action that checks whether the broadcasted new payload has been accepted -#[derive(Debug, Default)] -pub struct CheckPayloadAccepted {} - -impl Action for CheckPayloadAccepted -where - Engine: EngineTypes - + PayloadTypes, - ExecutionPayloadEnvelopeV3: From<::ExecutionPayloadEnvelopeV3>, -{ - fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { - Box::pin(async move { - let mut accepted_check: bool = false; - - let latest_block = env - .latest_block_info - .as_mut() - .ok_or_else(|| eyre::eyre!("No latest block information available"))?; - - let payload_id = *env - .payload_id_history - .get(&(latest_block.number + 1)) - .ok_or_else(|| eyre::eyre!("Cannot find payload_id"))?; - - for (idx, client) in env.node_clients.iter().enumerate() { - let rpc_client = &client.rpc; - - // get the last header by number using latest_head_number - let rpc_latest_header = - EthApiClient::::header_by_number( - rpc_client, - alloy_eips::BlockNumberOrTag::Latest, - ) - .await? - .ok_or_else(|| eyre::eyre!("No latest header found from rpc"))?; - - // perform several checks - let next_new_payload = env - .latest_payload_built - .as_ref() - .ok_or_else(|| eyre::eyre!("No next built payload found"))?; - - let built_payload = EngineApiClient::::get_payload_v3( - &client.engine.http_client(), - payload_id, - ) - .await?; - - let execution_payload_envelope: ExecutionPayloadEnvelopeV3 = built_payload; - let new_payload_block_hash = execution_payload_envelope - .execution_payload - .payload_inner - .payload_inner - .block_hash; - - if rpc_latest_header.hash != new_payload_block_hash { - debug!( - "Client {}: The hash is not matched: {:?} {:?}", - idx, rpc_latest_header.hash, new_payload_block_hash - ); - continue; - } - - if rpc_latest_header.inner.difficulty != U256::ZERO { - debug!( - "Client {}: difficulty != 0: {:?}", - idx, rpc_latest_header.inner.difficulty - ); - continue; - } - - if rpc_latest_header.inner.mix_hash != next_new_payload.prev_randao { - debug!( - "Client {}: The mix_hash and prev_randao is not same: {:?} {:?}", - idx, rpc_latest_header.inner.mix_hash, next_new_payload.prev_randao - ); - continue; - } - - let extra_len = rpc_latest_header.inner.extra_data.len(); - if extra_len <= 32 { - debug!("Client {}: extra_len is fewer than 32. extra_len: {}", idx, extra_len); - continue; - } - - // at least one client passes all the check, save the header in Env - if !accepted_check { - accepted_check = true; - // save the header in Env - env.latest_header_time = next_new_payload.timestamp; - - // add it to header history - env.latest_fork_choice_state.head_block_hash = rpc_latest_header.hash; - latest_block.hash = rpc_latest_header.hash as B256; - latest_block.number = rpc_latest_header.inner.number; - } - } - - if accepted_check { - Ok(()) - } else { - Err(eyre::eyre!("No clients passed payload acceptance checks")) - } - }) - } -} - -/// Action that produces a sequence of blocks using the available clients -#[derive(Debug)] -pub struct ProduceBlocks { - /// Number of blocks to produce - pub num_blocks: u64, - /// Tracks engine type - _phantom: PhantomData, -} - -impl ProduceBlocks { - /// Create a new `ProduceBlocks` action - pub fn new(num_blocks: u64) -> Self { - Self { num_blocks, _phantom: Default::default() } - } -} - -impl Default for ProduceBlocks { - fn default() -> Self { - Self::new(0) - } -} - -impl Action for ProduceBlocks -where - Engine: EngineTypes, -{ - fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { - Box::pin(async move { - // Create a sequence for producing a single block - let mut sequence = Sequence::new(vec![ - Box::new(PickNextBlockProducer::default()), - Box::new(GeneratePayloadAttributes::default()), - ]); - for _ in 0..self.num_blocks { - sequence.execute(env).await?; - } - Ok(()) - }) - } -} - -/// Run a sequence of actions in series. -#[expect(missing_debug_implementations)] -pub struct Sequence { - /// Actions to execute in sequence - pub actions: Vec>>, -} - -impl Sequence { - /// Create a new sequence of actions - pub fn new(actions: Vec>>) -> Self { - Self { actions } - } -} - -impl Action for Sequence { - fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { - Box::pin(async move { - // Execute each action in sequence - for action in &mut self.actions { - action.execute(env).await?; - } - - Ok(()) - }) - } -} - -/// Action that broadcasts the next new payload -#[derive(Debug, Default)] -pub struct BroadcastNextNewPayload {} - -impl Action for BroadcastNextNewPayload -where - Engine: EngineTypes + PayloadTypes, - reth_node_ethereum::engine::EthPayloadAttributes: - From<::ExecutionPayloadEnvelopeV3>, -{ - fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { - Box::pin(async move { - // Get the next new payload to broadcast - let next_new_payload = env - .latest_payload_built - .as_ref() - .ok_or_else(|| eyre::eyre!("No next built payload found"))?; - let parent_beacon_block_root = next_new_payload - .parent_beacon_block_root - .ok_or_else(|| eyre::eyre!("No parent beacon block root for next new payload"))?; - - // Loop through all clients and broadcast the next new payload - let mut successful_broadcast: bool = false; - - for client in &env.node_clients { - let engine = client.engine.http_client(); - let rpc_client = &client.rpc; - - // Get latest block from the client - let rpc_latest_block = - EthApiClient::::block_by_number( - rpc_client, - alloy_eips::BlockNumberOrTag::Latest, - false, - ) - .await? - .ok_or_else(|| eyre::eyre!("No latest block found from rpc"))?; - - let latest_block = reth_ethereum_primitives::Block { - header: rpc_latest_block.header.inner, - body: reth_ethereum_primitives::BlockBody { - transactions: rpc_latest_block - .transactions - .into_transactions() - .map(|tx| tx.inner.into_inner().into()) - .collect(), - ommers: Default::default(), - withdrawals: rpc_latest_block.withdrawals, - }, - }; - - // Validate block number matches expected - let latest_block_info = env - .latest_block_info - .as_ref() - .ok_or_else(|| eyre::eyre!("No latest block info found"))?; - - if latest_block.header.number != latest_block_info.number { - return Err(eyre::eyre!( - "Client block number {} does not match expected block number {}", - latest_block.header.number, - latest_block_info.number - )); - } - - // Validate parent beacon block root - let latest_block_parent_beacon_block_root = - latest_block.parent_beacon_block_root.ok_or_else(|| { - eyre::eyre!("No parent beacon block root for latest block") - })?; - - if parent_beacon_block_root != latest_block_parent_beacon_block_root { - return Err(eyre::eyre!( - "Parent beacon block root mismatch: expected {:?}, got {:?}", - parent_beacon_block_root, - latest_block_parent_beacon_block_root - )); - } - - // Construct and broadcast the execution payload from the latest block - // The latest block should contain the latest_payload_built - let execution_payload = ExecutionPayloadV3::from_block_slow(&latest_block); - let result = EngineApiClient::::new_payload_v3( - &engine, - execution_payload, - vec![], - parent_beacon_block_root, - ) - .await?; - - // Check if broadcast was successful - if result.status == PayloadStatusEnum::Valid { - successful_broadcast = true; - // We don't need to update the latest payload built since it should be the same. - // env.latest_payload_built = Some(next_new_payload.clone()); - env.latest_payload_executed = Some(next_new_payload.clone()); - break; - } else if let PayloadStatusEnum::Invalid { validation_error } = result.status { - debug!( - "Invalid payload status returned from broadcast: {:?}", - validation_error - ); - } - } - - if !successful_broadcast { - return Err(eyre::eyre!("Failed to successfully broadcast payload to any client")); - } - - Ok(()) - }) - } -} diff --git a/crates/e2e-test-utils/src/testsuite/actions/fork.rs b/crates/e2e-test-utils/src/testsuite/actions/fork.rs new file mode 100644 index 00000000000..a0be6bdd8d0 --- /dev/null +++ b/crates/e2e-test-utils/src/testsuite/actions/fork.rs @@ -0,0 +1,278 @@ +//! Fork creation actions for the e2e testing framework. + +use crate::testsuite::{ + actions::{produce_blocks::ProduceBlocks, Sequence}, + Action, BlockInfo, Environment, +}; +use alloy_rpc_types_engine::{ForkchoiceState, PayloadAttributes}; +use alloy_rpc_types_eth::{Block, Header, Receipt, Transaction}; +use eyre::Result; +use futures_util::future::BoxFuture; +use reth_node_api::{EngineTypes, PayloadTypes}; +use reth_rpc_api::clients::EthApiClient; +use std::marker::PhantomData; +use tracing::debug; + +/// Fork base target for fork creation +#[derive(Debug, Clone)] +pub enum ForkBase { + /// Block number + Number(u64), + /// Tagged block reference + Tag(String), +} + +/// Action to create a fork from a specified block and produce blocks on top +#[derive(Debug)] +pub struct CreateFork { + /// Fork base specification (either block number or tag) + pub fork_base: ForkBase, + /// Number of blocks to produce on top of the fork base + pub num_blocks: u64, + /// Tracks engine type + _phantom: PhantomData, +} + +impl CreateFork { + /// Create a new `CreateFork` action from a block number + pub fn new(fork_base_block: u64, num_blocks: u64) -> Self { + Self { + fork_base: ForkBase::Number(fork_base_block), + num_blocks, + _phantom: Default::default(), + } + } + + /// Create a new `CreateFork` action from a tagged block + pub fn new_from_tag(tag: impl Into, num_blocks: u64) -> Self { + Self { fork_base: ForkBase::Tag(tag.into()), num_blocks, _phantom: Default::default() } + } +} + +impl Action for CreateFork +where + Engine: EngineTypes + PayloadTypes, + Engine::PayloadAttributes: From + Clone, + Engine::ExecutionPayloadEnvelopeV3: + Into, +{ + fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { + Box::pin(async move { + // resolve the fork base and execute the appropriate sequence + match &self.fork_base { + ForkBase::Number(block_number) => { + // store the fork base for later validation on the active node + env.active_node_state_mut()?.current_fork_base = Some(*block_number); + + let mut sequence = Sequence::new(vec![ + Box::new(SetForkBase::new(*block_number)), + Box::new(ProduceBlocks::new(self.num_blocks)), + ]); + sequence.execute(env).await + } + ForkBase::Tag(tag) => { + let (block_info, _node_idx) = + env.block_registry.get(tag).copied().ok_or_else(|| { + eyre::eyre!("Block tag '{}' not found in registry", tag) + })?; + + // store the fork base for later validation on the active node + env.active_node_state_mut()?.current_fork_base = Some(block_info.number); + + let mut sequence = Sequence::new(vec![ + Box::new(SetForkBaseFromBlockInfo::new(block_info)), + Box::new(ProduceBlocks::new(self.num_blocks)), + ]); + sequence.execute(env).await + } + } + }) + } +} + +/// Sub-action to set the fork base block in the environment +#[derive(Debug)] +pub struct SetForkBase { + /// Block number to use as the base of the fork + pub fork_base_block: u64, +} + +/// Sub-action to set the fork base block from existing block info +#[derive(Debug)] +pub struct SetForkBaseFromBlockInfo { + /// Complete block info to use as the base of the fork + pub fork_base_info: BlockInfo, +} + +impl SetForkBase { + /// Create a new `SetForkBase` action + pub const fn new(fork_base_block: u64) -> Self { + Self { fork_base_block } + } +} + +impl SetForkBaseFromBlockInfo { + /// Create a new `SetForkBaseFromBlockInfo` action + pub const fn new(fork_base_info: BlockInfo) -> Self { + Self { fork_base_info } + } +} + +impl Action for SetForkBase +where + Engine: EngineTypes, +{ + fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { + Box::pin(async move { + if env.node_clients.is_empty() { + return Err(eyre::eyre!("No node clients available")); + } + + // get the block at the fork base number to establish the fork point + let rpc_client = &env.node_clients[0].rpc; + let fork_base_block = + EthApiClient::::block_by_number( + rpc_client, + alloy_eips::BlockNumberOrTag::Number(self.fork_base_block), + false, + ) + .await? + .ok_or_else(|| eyre::eyre!("Fork base block {} not found", self.fork_base_block))?; + + // update active node state to point to the fork base block + let active_node_state = env.active_node_state_mut()?; + active_node_state.current_block_info = Some(BlockInfo { + hash: fork_base_block.header.hash, + number: fork_base_block.header.number, + timestamp: fork_base_block.header.timestamp, + }); + + active_node_state.latest_header_time = fork_base_block.header.timestamp; + + // update fork choice state to the fork base + active_node_state.latest_fork_choice_state = ForkchoiceState { + head_block_hash: fork_base_block.header.hash, + safe_block_hash: fork_base_block.header.hash, + finalized_block_hash: fork_base_block.header.hash, + }; + + debug!( + "Set fork base to block {} (hash: {})", + self.fork_base_block, fork_base_block.header.hash + ); + + Ok(()) + }) + } +} + +impl Action for SetForkBaseFromBlockInfo +where + Engine: EngineTypes, +{ + fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { + Box::pin(async move { + let block_info = self.fork_base_info; + + debug!( + "Set fork base from block info: block {} (hash: {})", + block_info.number, block_info.hash + ); + + // update active node state to point to the fork base block + let active_node_state = env.active_node_state_mut()?; + active_node_state.current_block_info = Some(block_info); + active_node_state.latest_header_time = block_info.timestamp; + + // update fork choice state to the fork base + active_node_state.latest_fork_choice_state = ForkchoiceState { + head_block_hash: block_info.hash, + safe_block_hash: block_info.hash, + finalized_block_hash: block_info.hash, + }; + + debug!("Set fork base to block {} (hash: {})", block_info.number, block_info.hash); + + Ok(()) + }) + } +} + +/// Sub-action to validate that a fork was created correctly +#[derive(Debug)] +pub struct ValidateFork { + /// Number of the fork base block (stored here since we need it for validation) + pub fork_base_number: u64, +} + +impl ValidateFork { + /// Create a new `ValidateFork` action + pub const fn new(fork_base_number: u64) -> Self { + Self { fork_base_number } + } +} + +impl Action for ValidateFork +where + Engine: EngineTypes, +{ + fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { + Box::pin(async move { + let current_block_info = env + .current_block_info() + .ok_or_else(|| eyre::eyre!("No current block information available"))?; + + // verify that the current tip is at or ahead of the fork base + if current_block_info.number < self.fork_base_number { + return Err(eyre::eyre!( + "Fork validation failed: current block number {} is behind fork base {}", + current_block_info.number, + self.fork_base_number + )); + } + + // get the fork base hash from the environment's fork choice state + // we assume the fork choice state was set correctly by SetForkBase + let fork_base_hash = + env.active_node_state()?.latest_fork_choice_state.finalized_block_hash; + + // trace back from current tip to verify it's a descendant of the fork base + let rpc_client = &env.node_clients[0].rpc; + let mut current_hash = current_block_info.hash; + let mut current_number = current_block_info.number; + + // walk backwards through the chain until we reach the fork base + while current_number > self.fork_base_number { + let block = EthApiClient::::block_by_hash( + rpc_client, + current_hash, + false, + ) + .await? + .ok_or_else(|| { + eyre::eyre!("Block with hash {} not found during fork validation", current_hash) + })?; + + current_hash = block.header.parent_hash; + current_number = block.header.number.saturating_sub(1); + } + + // verify we reached the expected fork base + if current_hash != fork_base_hash { + return Err(eyre::eyre!( + "Fork validation failed: expected fork base hash {}, but found {} at block {}", + fork_base_hash, + current_hash, + current_number + )); + } + + debug!( + "Fork validation successful: tip block {} is descendant of fork base {} ({})", + current_block_info.number, self.fork_base_number, fork_base_hash + ); + + Ok(()) + }) + } +} diff --git a/crates/e2e-test-utils/src/testsuite/actions/mod.rs b/crates/e2e-test-utils/src/testsuite/actions/mod.rs new file mode 100644 index 00000000000..7f09c283568 --- /dev/null +++ b/crates/e2e-test-utils/src/testsuite/actions/mod.rs @@ -0,0 +1,271 @@ +//! Actions that can be performed in tests. + +use crate::testsuite::Environment; +use alloy_rpc_types_engine::{ForkchoiceUpdated, PayloadStatusEnum}; +use eyre::Result; +use futures_util::future::BoxFuture; +use reth_node_api::EngineTypes; +use std::future::Future; +use tracing::debug; + +pub mod fork; +pub mod node_ops; +pub mod produce_blocks; +pub mod reorg; + +pub use fork::{CreateFork, ForkBase, SetForkBase, SetForkBaseFromBlockInfo, ValidateFork}; +pub use node_ops::{CaptureBlockOnNode, CompareNodeChainTips, SelectActiveNode, ValidateBlockTag}; +pub use produce_blocks::{ + AssertMineBlock, BroadcastLatestForkchoice, BroadcastNextNewPayload, CheckPayloadAccepted, + ExpectFcuStatus, GenerateNextPayload, GeneratePayloadAttributes, PickNextBlockProducer, + ProduceBlocks, ProduceInvalidBlocks, TestFcuToTag, UpdateBlockInfo, + UpdateBlockInfoToLatestPayload, ValidateCanonicalTag, +}; +pub use reorg::{ReorgTarget, ReorgTo, SetReorgTarget}; + +/// An action that can be performed on an instance. +/// +/// Actions execute operations and potentially make assertions in a single step. +/// The action name indicates what it does (e.g., `AssertMineBlock` would both +/// mine a block and assert it worked). +pub trait Action: Send + 'static +where + I: EngineTypes, +{ + /// Executes the action + fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>>; +} + +/// Simplified action container for storage in tests +#[expect(missing_debug_implementations)] +pub struct ActionBox(Box>); + +impl ActionBox +where + I: EngineTypes + 'static, +{ + /// Constructor for [`ActionBox`]. + pub fn new>(action: A) -> Self { + Self(Box::new(action)) + } + + /// Executes an [`ActionBox`] with the given [`Environment`] reference. + pub async fn execute(mut self, env: &mut Environment) -> Result<()> { + self.0.execute(env).await + } +} + +/// Implementation of `Action` for any function/closure that takes an Environment +/// reference and returns a Future resolving to Result<()>. +/// +/// This allows using closures directly as actions with `.with_action(async move |env| {...})`. +impl Action for F +where + I: EngineTypes, + F: FnMut(&Environment) -> Fut + Send + 'static, + Fut: Future> + Send + 'static, +{ + fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { + Box::pin(self(env)) + } +} + +/// Run a sequence of actions in series. +#[expect(missing_debug_implementations)] +pub struct Sequence { + /// Actions to execute in sequence + pub actions: Vec>>, +} + +impl Sequence { + /// Create a new sequence of actions + pub fn new(actions: Vec>>) -> Self { + Self { actions } + } +} + +impl Action for Sequence +where + I: EngineTypes + Sync + Send + 'static, +{ + fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { + Box::pin(async move { + // Execute each action in sequence + for action in &mut self.actions { + action.execute(env).await?; + } + + Ok(()) + }) + } +} + +/// Action that makes the current latest block canonical by broadcasting a forkchoice update +#[derive(Debug, Default)] +pub struct MakeCanonical {} + +impl MakeCanonical { + /// Create a new `MakeCanonical` action + pub const fn new() -> Self { + Self {} + } +} + +impl Action for MakeCanonical +where + Engine: EngineTypes + reth_node_api::PayloadTypes, + Engine::PayloadAttributes: From + Clone, + Engine::ExecutionPayloadEnvelopeV3: + Into, +{ + fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { + Box::pin(async move { + let mut actions: Vec>> = vec![ + Box::new(BroadcastLatestForkchoice::default()), + Box::new(UpdateBlockInfo::default()), + ]; + + // if we're on a fork, validate it now that it's canonical + if let Ok(active_state) = env.active_node_state() { + if let Some(fork_base) = active_state.current_fork_base { + debug!("MakeCanonical: Adding fork validation from base block {}", fork_base); + actions.push(Box::new(ValidateFork::new(fork_base))); + // clear the fork base since we're now canonical + env.active_node_state_mut()?.current_fork_base = None; + } + } + + let mut sequence = Sequence::new(actions); + sequence.execute(env).await + }) + } +} + +/// Action that captures the current block and tags it with a name for later reference +#[derive(Debug)] +pub struct CaptureBlock { + /// Tag name to associate with the current block + pub tag: String, +} + +impl CaptureBlock { + /// Create a new `CaptureBlock` action + pub fn new(tag: impl Into) -> Self { + Self { tag: tag.into() } + } +} + +impl Action for CaptureBlock +where + Engine: EngineTypes, +{ + fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { + Box::pin(async move { + let current_block = env + .current_block_info() + .ok_or_else(|| eyre::eyre!("No current block information available"))?; + + env.block_registry.insert(self.tag.clone(), (current_block, env.active_node_idx)); + + debug!( + "Captured block {} (hash: {}) from active node {} with tag '{}'", + current_block.number, current_block.hash, env.active_node_idx, self.tag + ); + + Ok(()) + }) + } +} + +/// Validates a forkchoice update response and returns an error if invalid +pub fn validate_fcu_response(response: &ForkchoiceUpdated, context: &str) -> Result<()> { + match &response.payload_status.status { + PayloadStatusEnum::Valid => { + debug!("{}: FCU accepted as valid", context); + Ok(()) + } + PayloadStatusEnum::Invalid { validation_error } => { + Err(eyre::eyre!("{}: FCU rejected as invalid: {:?}", context, validation_error)) + } + PayloadStatusEnum::Syncing => { + debug!("{}: FCU accepted, node is syncing", context); + Ok(()) + } + PayloadStatusEnum::Accepted => { + debug!("{}: FCU accepted for processing", context); + Ok(()) + } + } +} + +/// Expects that the `ForkchoiceUpdated` response status is VALID. +pub fn expect_fcu_valid(response: &ForkchoiceUpdated, context: &str) -> Result<()> { + match &response.payload_status.status { + PayloadStatusEnum::Valid => { + debug!("{}: FCU status is VALID as expected.", context); + Ok(()) + } + other_status => { + Err(eyre::eyre!("{}: Expected FCU status VALID, but got {:?}", context, other_status)) + } + } +} + +/// Expects that the `ForkchoiceUpdated` response status is INVALID. +pub fn expect_fcu_invalid(response: &ForkchoiceUpdated, context: &str) -> Result<()> { + match &response.payload_status.status { + PayloadStatusEnum::Invalid { validation_error } => { + debug!("{}: FCU status is INVALID as expected: {:?}", context, validation_error); + Ok(()) + } + other_status => { + Err(eyre::eyre!("{}: Expected FCU status INVALID, but got {:?}", context, other_status)) + } + } +} + +/// Expects that the `ForkchoiceUpdated` response status is either SYNCING or ACCEPTED. +pub fn expect_fcu_syncing_or_accepted(response: &ForkchoiceUpdated, context: &str) -> Result<()> { + match &response.payload_status.status { + PayloadStatusEnum::Syncing => { + debug!("{}: FCU status is SYNCING as expected (SYNCING or ACCEPTED).", context); + Ok(()) + } + PayloadStatusEnum::Accepted => { + debug!("{}: FCU status is ACCEPTED as expected (SYNCING or ACCEPTED).", context); + Ok(()) + } + other_status => Err(eyre::eyre!( + "{}: Expected FCU status SYNCING or ACCEPTED, but got {:?}", + context, + other_status + )), + } +} + +/// Expects that the `ForkchoiceUpdated` response status is not SYNCING and not ACCEPTED. +pub fn expect_fcu_not_syncing_or_accepted( + response: &ForkchoiceUpdated, + context: &str, +) -> Result<()> { + match &response.payload_status.status { + PayloadStatusEnum::Valid => { + debug!("{}: FCU status is VALID as expected (not SYNCING or ACCEPTED).", context); + Ok(()) + } + PayloadStatusEnum::Invalid { validation_error } => { + debug!( + "{}: FCU status is INVALID as expected (not SYNCING or ACCEPTED): {:?}", + context, validation_error + ); + Ok(()) + } + syncing_or_accepted_status @ (PayloadStatusEnum::Syncing | PayloadStatusEnum::Accepted) => { + Err(eyre::eyre!( + "{}: Expected FCU status not SYNCING or ACCEPTED (i.e., VALID or INVALID), but got {:?}", + context, + syncing_or_accepted_status + )) + } + } +} diff --git a/crates/e2e-test-utils/src/testsuite/actions/node_ops.rs b/crates/e2e-test-utils/src/testsuite/actions/node_ops.rs new file mode 100644 index 00000000000..3a240d8f644 --- /dev/null +++ b/crates/e2e-test-utils/src/testsuite/actions/node_ops.rs @@ -0,0 +1,215 @@ +//! Node-specific operations for multi-node testing. + +use crate::testsuite::{Action, Environment}; +use alloy_rpc_types_eth::{Block, Header, Receipt, Transaction}; +use eyre::Result; +use futures_util::future::BoxFuture; +use reth_node_api::EngineTypes; +use reth_rpc_api::clients::EthApiClient; +use tracing::debug; + +/// Action to select which node should be active for subsequent single-node operations. +#[derive(Debug)] +pub struct SelectActiveNode { + /// Node index to set as active + pub node_idx: usize, +} + +impl SelectActiveNode { + /// Create a new `SelectActiveNode` action + pub const fn new(node_idx: usize) -> Self { + Self { node_idx } + } +} + +impl Action for SelectActiveNode +where + Engine: EngineTypes, +{ + fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { + Box::pin(async move { + env.set_active_node(self.node_idx)?; + debug!("Set active node to {}", self.node_idx); + Ok(()) + }) + } +} + +/// Action to compare chain tips between two nodes. +#[derive(Debug)] +pub struct CompareNodeChainTips { + /// First node index + pub node_a: usize, + /// Second node index + pub node_b: usize, + /// Whether tips should be the same or different + pub should_be_equal: bool, +} + +impl CompareNodeChainTips { + /// Create a new action expecting nodes to have the same chain tip + pub const fn expect_same(node_a: usize, node_b: usize) -> Self { + Self { node_a, node_b, should_be_equal: true } + } + + /// Create a new action expecting nodes to have different chain tips + pub const fn expect_different(node_a: usize, node_b: usize) -> Self { + Self { node_a, node_b, should_be_equal: false } + } +} + +impl Action for CompareNodeChainTips +where + Engine: EngineTypes, +{ + fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { + Box::pin(async move { + if self.node_a >= env.node_count() || self.node_b >= env.node_count() { + return Err(eyre::eyre!("Node index out of bounds")); + } + + let node_a_client = &env.node_clients[self.node_a]; + let node_b_client = &env.node_clients[self.node_b]; + + // Get latest block from each node + let block_a = EthApiClient::::block_by_number( + &node_a_client.rpc, + alloy_eips::BlockNumberOrTag::Latest, + false, + ) + .await? + .ok_or_else(|| eyre::eyre!("Failed to get latest block from node {}", self.node_a))?; + + let block_b = EthApiClient::::block_by_number( + &node_b_client.rpc, + alloy_eips::BlockNumberOrTag::Latest, + false, + ) + .await? + .ok_or_else(|| eyre::eyre!("Failed to get latest block from node {}", self.node_b))?; + + let tips_equal = block_a.header.hash == block_b.header.hash; + + debug!( + "Node {} chain tip: {} (block {}), Node {} chain tip: {} (block {})", + self.node_a, + block_a.header.hash, + block_a.header.number, + self.node_b, + block_b.header.hash, + block_b.header.number + ); + + if self.should_be_equal && !tips_equal { + return Err(eyre::eyre!( + "Expected nodes {} and {} to have the same chain tip, but node {} has {} and node {} has {}", + self.node_a, self.node_b, self.node_a, block_a.header.hash, self.node_b, block_b.header.hash + )); + } + + if !self.should_be_equal && tips_equal { + return Err(eyre::eyre!( + "Expected nodes {} and {} to have different chain tips, but both have {}", + self.node_a, + self.node_b, + block_a.header.hash + )); + } + + Ok(()) + }) + } +} + +/// Action to capture a block with a tag, associating it with a specific node. +#[derive(Debug)] +pub struct CaptureBlockOnNode { + /// Tag name to associate with the block + pub tag: String, + /// Node index to capture the block from + pub node_idx: usize, +} + +impl CaptureBlockOnNode { + /// Create a new `CaptureBlockOnNode` action + pub fn new(tag: impl Into, node_idx: usize) -> Self { + Self { tag: tag.into(), node_idx } + } +} + +impl Action for CaptureBlockOnNode +where + Engine: EngineTypes, +{ + fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { + Box::pin(async move { + let node_state = env.node_state(self.node_idx)?; + let current_block = node_state.current_block_info.ok_or_else(|| { + eyre::eyre!("No current block information available for node {}", self.node_idx) + })?; + + env.block_registry.insert(self.tag.clone(), (current_block, self.node_idx)); + + debug!( + "Captured block {} (hash: {}) from node {} with tag '{}'", + current_block.number, current_block.hash, self.node_idx, self.tag + ); + + Ok(()) + }) + } +} + +/// Action to get a block by tag and verify which node it came from. +#[derive(Debug)] +pub struct ValidateBlockTag { + /// Tag to look up + pub tag: String, + /// Expected node index (optional) + pub expected_node_idx: Option, +} + +impl ValidateBlockTag { + /// Create a new action to validate a block tag exists + pub fn exists(tag: impl Into) -> Self { + Self { tag: tag.into(), expected_node_idx: None } + } + + /// Create a new action to validate a block tag came from a specific node + pub fn from_node(tag: impl Into, node_idx: usize) -> Self { + Self { tag: tag.into(), expected_node_idx: Some(node_idx) } + } +} + +impl Action for ValidateBlockTag +where + Engine: EngineTypes, +{ + fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { + Box::pin(async move { + let (block_info, node_idx) = env + .block_registry + .get(&self.tag) + .copied() + .ok_or_else(|| eyre::eyre!("Block tag '{}' not found in registry", self.tag))?; + + if let Some(expected_node) = self.expected_node_idx { + if node_idx != expected_node { + return Err(eyre::eyre!( + "Block tag '{}' came from node {} but expected node {}", + self.tag, + node_idx, + expected_node + )); + } + } + + debug!( + "Validated block tag '{}': block {} (hash: {}) from node {}", + self.tag, block_info.number, block_info.hash, node_idx + ); + + Ok(()) + }) + } +} diff --git a/crates/e2e-test-utils/src/testsuite/actions/produce_blocks.rs b/crates/e2e-test-utils/src/testsuite/actions/produce_blocks.rs new file mode 100644 index 00000000000..02f7155b66a --- /dev/null +++ b/crates/e2e-test-utils/src/testsuite/actions/produce_blocks.rs @@ -0,0 +1,991 @@ +//! Block production actions for the e2e testing framework. + +use crate::testsuite::{ + actions::{validate_fcu_response, Action, Sequence}, + BlockInfo, Environment, +}; +use alloy_primitives::{Bytes, B256}; +use alloy_rpc_types_engine::{ + payload::ExecutionPayloadEnvelopeV3, ForkchoiceState, PayloadAttributes, PayloadStatusEnum, +}; +use alloy_rpc_types_eth::{Block, Header, Receipt, Transaction}; +use eyre::Result; +use futures_util::future::BoxFuture; +use reth_node_api::{EngineTypes, PayloadTypes}; +use reth_rpc_api::clients::{EngineApiClient, EthApiClient}; +use std::{collections::HashSet, marker::PhantomData, time::Duration}; +use tokio::time::sleep; +use tracing::debug; + +/// Mine a single block with the given transactions and verify the block was created +/// successfully. +#[derive(Debug)] +pub struct AssertMineBlock +where + Engine: PayloadTypes, +{ + /// The node index to mine + pub node_idx: usize, + /// Transactions to include in the block + pub transactions: Vec, + /// Expected block hash (optional) + pub expected_hash: Option, + /// Block's payload attributes + // TODO: refactor once we have actions to generate payload attributes. + pub payload_attributes: Engine::PayloadAttributes, + /// Tracks engine type + _phantom: PhantomData, +} + +impl AssertMineBlock +where + Engine: PayloadTypes, +{ + /// Create a new `AssertMineBlock` action + pub fn new( + node_idx: usize, + transactions: Vec, + expected_hash: Option, + payload_attributes: Engine::PayloadAttributes, + ) -> Self { + Self { + node_idx, + transactions, + expected_hash, + payload_attributes, + _phantom: Default::default(), + } + } +} + +impl Action for AssertMineBlock +where + Engine: EngineTypes, +{ + fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { + Box::pin(async move { + if self.node_idx >= env.node_clients.len() { + return Err(eyre::eyre!("Node index out of bounds: {}", self.node_idx)); + } + + let node_client = &env.node_clients[self.node_idx]; + let rpc_client = &node_client.rpc; + let engine_client = node_client.engine.http_client(); + + // get the latest block to use as parent + let latest_block = + EthApiClient::::block_by_number( + rpc_client, + alloy_eips::BlockNumberOrTag::Latest, + false, + ) + .await?; + + let latest_block = latest_block.ok_or_else(|| eyre::eyre!("Latest block not found"))?; + let parent_hash = latest_block.header.hash; + + debug!("Latest block hash: {parent_hash}"); + + // create a simple forkchoice state with the latest block as head + let fork_choice_state = ForkchoiceState { + head_block_hash: parent_hash, + safe_block_hash: parent_hash, + finalized_block_hash: parent_hash, + }; + + let fcu_result = EngineApiClient::::fork_choice_updated_v2( + &engine_client, + fork_choice_state, + Some(self.payload_attributes.clone()), + ) + .await?; + + debug!("FCU result: {:?}", fcu_result); + + // check if we got a valid payload ID + match fcu_result.payload_status.status { + PayloadStatusEnum::Valid => { + if let Some(payload_id) = fcu_result.payload_id { + debug!("Got payload ID: {payload_id}"); + + // get the payload that was built + let _engine_payload = + EngineApiClient::::get_payload_v2(&engine_client, payload_id) + .await?; + Ok(()) + } else { + Err(eyre::eyre!("No payload ID returned from forkchoiceUpdated")) + } + } + _ => Err(eyre::eyre!("Payload status not valid: {:?}", fcu_result.payload_status)), + } + }) + } +} + +/// Pick the next block producer based on the latest block information. +#[derive(Debug, Default)] +pub struct PickNextBlockProducer {} + +impl PickNextBlockProducer { + /// Create a new `PickNextBlockProducer` action + pub const fn new() -> Self { + Self {} + } +} + +impl Action for PickNextBlockProducer +where + Engine: EngineTypes, +{ + fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { + Box::pin(async move { + let num_clients = env.node_clients.len(); + if num_clients == 0 { + return Err(eyre::eyre!("No node clients available")); + } + + let latest_info = env + .current_block_info() + .ok_or_else(|| eyre::eyre!("No latest block information available"))?; + + // simple round-robin selection based on next block number + let next_producer_idx = ((latest_info.number + 1) % num_clients as u64) as usize; + + env.last_producer_idx = Some(next_producer_idx); + debug!( + "Selected node {} as the next block producer for block {}", + next_producer_idx, + latest_info.number + 1 + ); + + Ok(()) + }) + } +} + +/// Store payload attributes for the next block. +#[derive(Debug, Default)] +pub struct GeneratePayloadAttributes {} + +impl Action for GeneratePayloadAttributes +where + Engine: EngineTypes + PayloadTypes, + Engine::PayloadAttributes: From, +{ + fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { + Box::pin(async move { + let latest_block = env + .current_block_info() + .ok_or_else(|| eyre::eyre!("No latest block information available"))?; + let block_number = latest_block.number; + let timestamp = + env.active_node_state()?.latest_header_time + env.block_timestamp_increment; + let payload_attributes = PayloadAttributes { + timestamp, + prev_randao: B256::random(), + suggested_fee_recipient: alloy_primitives::Address::random(), + withdrawals: Some(vec![]), + parent_beacon_block_root: Some(B256::ZERO), + }; + + env.active_node_state_mut()? + .payload_attributes + .insert(latest_block.number + 1, payload_attributes); + debug!("Stored payload attributes for block {}", block_number + 1); + Ok(()) + }) + } +} + +/// Action that generates the next payload +#[derive(Debug, Default)] +pub struct GenerateNextPayload {} + +impl Action for GenerateNextPayload +where + Engine: EngineTypes + PayloadTypes, + Engine::PayloadAttributes: From + Clone, +{ + fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { + Box::pin(async move { + let latest_block = env + .current_block_info() + .ok_or_else(|| eyre::eyre!("No latest block information available"))?; + + let parent_hash = latest_block.hash; + debug!("Latest block hash: {parent_hash}"); + + let fork_choice_state = ForkchoiceState { + head_block_hash: parent_hash, + safe_block_hash: parent_hash, + finalized_block_hash: parent_hash, + }; + + let payload_attributes = env + .active_node_state()? + .payload_attributes + .get(&(latest_block.number + 1)) + .cloned() + .ok_or_else(|| eyre::eyre!("No payload attributes found for next block"))?; + + let producer_idx = + env.last_producer_idx.ok_or_else(|| eyre::eyre!("No block producer selected"))?; + + let fcu_result = EngineApiClient::::fork_choice_updated_v3( + &env.node_clients[producer_idx].engine.http_client(), + fork_choice_state, + Some(payload_attributes.clone().into()), + ) + .await?; + + debug!("FCU result: {:?}", fcu_result); + + // validate the FCU status before proceeding + validate_fcu_response(&fcu_result, "GenerateNextPayload")?; + + let payload_id = if let Some(payload_id) = fcu_result.payload_id { + debug!("Received new payload ID: {:?}", payload_id); + payload_id + } else { + debug!("No payload ID returned, generating fresh payload attributes for forking"); + + let fresh_payload_attributes = PayloadAttributes { + timestamp: env.active_node_state()?.latest_header_time + + env.block_timestamp_increment, + prev_randao: B256::random(), + suggested_fee_recipient: alloy_primitives::Address::random(), + withdrawals: Some(vec![]), + parent_beacon_block_root: Some(B256::ZERO), + }; + + let fresh_fcu_result = EngineApiClient::::fork_choice_updated_v3( + &env.node_clients[producer_idx].engine.http_client(), + fork_choice_state, + Some(fresh_payload_attributes.clone().into()), + ) + .await?; + + debug!("Fresh FCU result: {:?}", fresh_fcu_result); + + // validate the fresh FCU status + validate_fcu_response(&fresh_fcu_result, "GenerateNextPayload (fresh)")?; + + if let Some(payload_id) = fresh_fcu_result.payload_id { + payload_id + } else { + debug!("Engine considers the fork base already canonical, skipping payload generation"); + return Ok(()); + } + }; + + env.active_node_state_mut()?.next_payload_id = Some(payload_id); + + sleep(Duration::from_secs(1)).await; + + let built_payload_envelope = EngineApiClient::::get_payload_v3( + &env.node_clients[producer_idx].engine.http_client(), + payload_id, + ) + .await?; + + // Store the payload attributes that were used to generate this payload + let built_payload = payload_attributes.clone(); + env.active_node_state_mut()? + .payload_id_history + .insert(latest_block.number + 1, payload_id); + env.active_node_state_mut()?.latest_payload_built = Some(built_payload); + env.active_node_state_mut()?.latest_payload_envelope = Some(built_payload_envelope); + + Ok(()) + }) + } +} + +/// Action that broadcasts the latest fork choice state to all clients +#[derive(Debug, Default)] +pub struct BroadcastLatestForkchoice {} + +impl Action for BroadcastLatestForkchoice +where + Engine: EngineTypes + PayloadTypes, + Engine::PayloadAttributes: From + Clone, + Engine::ExecutionPayloadEnvelopeV3: Into, +{ + fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { + Box::pin(async move { + if env.node_clients.is_empty() { + return Err(eyre::eyre!("No node clients available")); + } + + // use the hash of the newly executed payload if available + let head_hash = if let Some(payload_envelope) = + &env.active_node_state()?.latest_payload_envelope + { + let execution_payload_envelope: ExecutionPayloadEnvelopeV3 = + payload_envelope.clone().into(); + let new_block_hash = execution_payload_envelope + .execution_payload + .payload_inner + .payload_inner + .block_hash; + debug!("Using newly executed block hash as head: {new_block_hash}"); + new_block_hash + } else { + // fallback to RPC query + let rpc_client = &env.node_clients[0].rpc; + let current_head_block = + EthApiClient::::block_by_number( + rpc_client, + alloy_eips::BlockNumberOrTag::Latest, + false, + ) + .await? + .ok_or_else(|| eyre::eyre!("No latest block found from RPC"))?; + debug!("Using RPC latest block hash as head: {}", current_head_block.header.hash); + current_head_block.header.hash + }; + + let fork_choice_state = ForkchoiceState { + head_block_hash: head_hash, + safe_block_hash: head_hash, + finalized_block_hash: head_hash, + }; + debug!( + "Broadcasting forkchoice update to {} clients. Head: {:?}", + env.node_clients.len(), + fork_choice_state.head_block_hash + ); + + for (idx, client) in env.node_clients.iter().enumerate() { + match EngineApiClient::::fork_choice_updated_v3( + &client.engine.http_client(), + fork_choice_state, + None, + ) + .await + { + Ok(resp) => { + debug!( + "Client {}: Forkchoice update status: {:?}", + idx, resp.payload_status.status + ); + // validate that the forkchoice update was accepted + validate_fcu_response(&resp, &format!("Client {idx}"))?; + } + Err(err) => { + return Err(eyre::eyre!( + "Client {}: Failed to broadcast forkchoice: {:?}", + idx, + err + )); + } + } + } + debug!("Forkchoice update broadcasted successfully"); + Ok(()) + }) + } +} + +/// Action that syncs environment state with the node's canonical chain via RPC. +/// +/// This queries the latest canonical block from the node and updates the environment +/// to match. Typically used after forkchoice operations to ensure the environment +/// is in sync with the node's view of the canonical chain. +#[derive(Debug, Default)] +pub struct UpdateBlockInfo {} + +impl Action for UpdateBlockInfo +where + Engine: EngineTypes, +{ + fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { + Box::pin(async move { + // get the latest block from the first client to update environment state + let rpc_client = &env.node_clients[0].rpc; + let latest_block = + EthApiClient::::block_by_number( + rpc_client, + alloy_eips::BlockNumberOrTag::Latest, + false, + ) + .await? + .ok_or_else(|| eyre::eyre!("No latest block found from RPC"))?; + + // update environment with the new block information + env.set_current_block_info(BlockInfo { + hash: latest_block.header.hash, + number: latest_block.header.number, + timestamp: latest_block.header.timestamp, + })?; + + env.active_node_state_mut()?.latest_header_time = latest_block.header.timestamp; + env.active_node_state_mut()?.latest_fork_choice_state.head_block_hash = + latest_block.header.hash; + + debug!( + "Updated environment to block {} (hash: {})", + latest_block.header.number, latest_block.header.hash + ); + + Ok(()) + }) + } +} + +/// Action that updates environment state using the locally produced payload. +/// +/// This uses the execution payload stored in the environment rather than querying RPC, +/// making it more efficient and reliable during block production. Preferred over +/// `UpdateBlockInfo` when we have just produced a block and have the payload available. +#[derive(Debug, Default)] +pub struct UpdateBlockInfoToLatestPayload {} + +impl Action for UpdateBlockInfoToLatestPayload +where + Engine: EngineTypes + PayloadTypes, + Engine::ExecutionPayloadEnvelopeV3: Into, +{ + fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { + Box::pin(async move { + let payload_envelope = env + .active_node_state()? + .latest_payload_envelope + .as_ref() + .ok_or_else(|| eyre::eyre!("No execution payload envelope available"))?; + + let execution_payload_envelope: ExecutionPayloadEnvelopeV3 = + payload_envelope.clone().into(); + let execution_payload = execution_payload_envelope.execution_payload; + + let block_hash = execution_payload.payload_inner.payload_inner.block_hash; + let block_number = execution_payload.payload_inner.payload_inner.block_number; + let block_timestamp = execution_payload.payload_inner.payload_inner.timestamp; + + // update environment with the new block information from the payload + env.set_current_block_info(BlockInfo { + hash: block_hash, + number: block_number, + timestamp: block_timestamp, + })?; + + env.active_node_state_mut()?.latest_header_time = block_timestamp; + env.active_node_state_mut()?.latest_fork_choice_state.head_block_hash = block_hash; + + debug!( + "Updated environment to newly produced block {} (hash: {})", + block_number, block_hash + ); + + Ok(()) + }) + } +} + +/// Action that checks whether the broadcasted new payload has been accepted +#[derive(Debug, Default)] +pub struct CheckPayloadAccepted {} + +impl Action for CheckPayloadAccepted +where + Engine: EngineTypes, + Engine::ExecutionPayloadEnvelopeV3: Into, +{ + fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { + Box::pin(async move { + let mut accepted_check: bool = false; + + let mut latest_block = env + .current_block_info() + .ok_or_else(|| eyre::eyre!("No latest block information available"))?; + + let payload_id = *env + .active_node_state()? + .payload_id_history + .get(&(latest_block.number + 1)) + .ok_or_else(|| eyre::eyre!("Cannot find payload_id"))?; + + let node_clients = env.node_clients.clone(); + for (idx, client) in node_clients.iter().enumerate() { + let rpc_client = &client.rpc; + + // get the last header by number using latest_head_number + let rpc_latest_header = + EthApiClient::::header_by_number( + rpc_client, + alloy_eips::BlockNumberOrTag::Latest, + ) + .await? + .ok_or_else(|| eyre::eyre!("No latest header found from rpc"))?; + + // perform several checks + let next_new_payload = env + .active_node_state()? + .latest_payload_built + .as_ref() + .ok_or_else(|| eyre::eyre!("No next built payload found"))?; + + let built_payload = EngineApiClient::::get_payload_v3( + &client.engine.http_client(), + payload_id, + ) + .await?; + + let execution_payload_envelope: ExecutionPayloadEnvelopeV3 = built_payload.into(); + let new_payload_block_hash = execution_payload_envelope + .execution_payload + .payload_inner + .payload_inner + .block_hash; + + if rpc_latest_header.hash != new_payload_block_hash { + debug!( + "Client {}: The hash is not matched: {:?} {:?}", + idx, rpc_latest_header.hash, new_payload_block_hash + ); + continue; + } + + if rpc_latest_header.inner.difficulty != alloy_primitives::U256::ZERO { + debug!( + "Client {}: difficulty != 0: {:?}", + idx, rpc_latest_header.inner.difficulty + ); + continue; + } + + if rpc_latest_header.inner.mix_hash != next_new_payload.prev_randao { + debug!( + "Client {}: The mix_hash and prev_randao is not same: {:?} {:?}", + idx, rpc_latest_header.inner.mix_hash, next_new_payload.prev_randao + ); + continue; + } + + let extra_len = rpc_latest_header.inner.extra_data.len(); + if extra_len <= 32 { + debug!("Client {}: extra_len is fewer than 32. extra_len: {}", idx, extra_len); + continue; + } + + // at least one client passes all the check, save the header in Env + if !accepted_check { + accepted_check = true; + // save the header in Env + env.active_node_state_mut()?.latest_header_time = next_new_payload.timestamp; + + // add it to header history + env.active_node_state_mut()?.latest_fork_choice_state.head_block_hash = + rpc_latest_header.hash; + latest_block.hash = rpc_latest_header.hash; + latest_block.number = rpc_latest_header.inner.number; + } + } + + if accepted_check { + Ok(()) + } else { + Err(eyre::eyre!("No clients passed payload acceptance checks")) + } + }) + } +} + +/// Action that broadcasts the next new payload +#[derive(Debug, Default)] +pub struct BroadcastNextNewPayload {} + +impl Action for BroadcastNextNewPayload +where + Engine: EngineTypes + PayloadTypes, + Engine::PayloadAttributes: From + Clone, + Engine::ExecutionPayloadEnvelopeV3: Into, +{ + fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { + Box::pin(async move { + // Get the next new payload to broadcast + let next_new_payload = env + .active_node_state()? + .latest_payload_built + .as_ref() + .ok_or_else(|| eyre::eyre!("No next built payload found"))? + .clone(); + let parent_beacon_block_root = next_new_payload + .parent_beacon_block_root + .ok_or_else(|| eyre::eyre!("No parent beacon block root for next new payload"))?; + + let payload_envelope = env + .active_node_state()? + .latest_payload_envelope + .as_ref() + .ok_or_else(|| eyre::eyre!("No execution payload envelope available"))? + .clone(); + + let execution_payload_envelope: ExecutionPayloadEnvelopeV3 = payload_envelope.into(); + let execution_payload = execution_payload_envelope.execution_payload; + + // Loop through all clients and broadcast the next new payload + let mut broadcast_results = Vec::new(); + let mut first_valid_seen = false; + + for (idx, client) in env.node_clients.iter().enumerate() { + let engine = client.engine.http_client(); + + // Broadcast the execution payload + let result = EngineApiClient::::new_payload_v3( + &engine, + execution_payload.clone(), + vec![], + parent_beacon_block_root, + ) + .await?; + + broadcast_results.push((idx, result.status.clone())); + debug!("Node {}: new_payload broadcast status: {:?}", idx, result.status); + + // Check if this node accepted the payload + if result.status == PayloadStatusEnum::Valid && !first_valid_seen { + first_valid_seen = true; + } else if let PayloadStatusEnum::Invalid { validation_error } = result.status { + debug!( + "Node {}: Invalid payload status returned from broadcast: {:?}", + idx, validation_error + ); + } + } + + // Update the executed payload state after broadcasting to all nodes + if first_valid_seen { + env.active_node_state_mut()?.latest_payload_executed = Some(next_new_payload); + } + + // Check if at least one node accepted the payload + let any_valid = + broadcast_results.iter().any(|(_, status)| *status == PayloadStatusEnum::Valid); + if !any_valid { + return Err(eyre::eyre!("Failed to successfully broadcast payload to any client")); + } + + debug!("Broadcast complete. Results: {:?}", broadcast_results); + + Ok(()) + }) + } +} + +/// Action that produces a sequence of blocks using the available clients +#[derive(Debug)] +pub struct ProduceBlocks { + /// Number of blocks to produce + pub num_blocks: u64, + /// Tracks engine type + _phantom: PhantomData, +} + +impl ProduceBlocks { + /// Create a new `ProduceBlocks` action + pub fn new(num_blocks: u64) -> Self { + Self { num_blocks, _phantom: Default::default() } + } +} + +impl Default for ProduceBlocks { + fn default() -> Self { + Self::new(0) + } +} + +impl Action for ProduceBlocks +where + Engine: EngineTypes + PayloadTypes, + Engine::PayloadAttributes: From + Clone, + Engine::ExecutionPayloadEnvelopeV3: Into, +{ + fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { + Box::pin(async move { + for _ in 0..self.num_blocks { + // create a fresh sequence for each block to avoid state pollution + // Note: This produces blocks but does NOT make them canonical + // Use MakeCanonical action explicitly if canonicalization is needed + let mut sequence = Sequence::new(vec![ + Box::new(PickNextBlockProducer::default()), + Box::new(GeneratePayloadAttributes::default()), + Box::new(GenerateNextPayload::default()), + Box::new(BroadcastNextNewPayload::default()), + Box::new(UpdateBlockInfoToLatestPayload::default()), + ]); + sequence.execute(env).await?; + } + Ok(()) + }) + } +} + +/// Action to test forkchoice update to a tagged block with expected status +#[derive(Debug)] +pub struct TestFcuToTag { + /// Tag name of the target block + pub tag: String, + /// Expected payload status + pub expected_status: PayloadStatusEnum, +} + +impl TestFcuToTag { + /// Create a new `TestFcuToTag` action + pub fn new(tag: impl Into, expected_status: PayloadStatusEnum) -> Self { + Self { tag: tag.into(), expected_status } + } +} + +impl Action for TestFcuToTag +where + Engine: EngineTypes, +{ + fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { + Box::pin(async move { + // get the target block from the registry + let (target_block, _node_idx) = env + .block_registry + .get(&self.tag) + .copied() + .ok_or_else(|| eyre::eyre!("Block tag '{}' not found in registry", self.tag))?; + + let engine_client = env.node_clients[0].engine.http_client(); + let fcu_state = ForkchoiceState { + head_block_hash: target_block.hash, + safe_block_hash: target_block.hash, + finalized_block_hash: target_block.hash, + }; + + let fcu_response = + EngineApiClient::::fork_choice_updated_v2(&engine_client, fcu_state, None) + .await?; + + // validate the response matches expected status + match (&fcu_response.payload_status.status, &self.expected_status) { + (PayloadStatusEnum::Valid, PayloadStatusEnum::Valid) => { + debug!("FCU to '{}' returned VALID as expected", self.tag); + } + (PayloadStatusEnum::Invalid { .. }, PayloadStatusEnum::Invalid { .. }) => { + debug!("FCU to '{}' returned INVALID as expected", self.tag); + } + (PayloadStatusEnum::Syncing, PayloadStatusEnum::Syncing) => { + debug!("FCU to '{}' returned SYNCING as expected", self.tag); + } + (PayloadStatusEnum::Accepted, PayloadStatusEnum::Accepted) => { + debug!("FCU to '{}' returned ACCEPTED as expected", self.tag); + } + (actual, expected) => { + return Err(eyre::eyre!( + "FCU to '{}': expected status {:?}, but got {:?}", + self.tag, + expected, + actual + )); + } + } + + Ok(()) + }) + } +} + +/// Action to expect a specific FCU status when targeting a tagged block +#[derive(Debug)] +pub struct ExpectFcuStatus { + /// Tag name of the target block + pub target_tag: String, + /// Expected payload status + pub expected_status: PayloadStatusEnum, +} + +impl ExpectFcuStatus { + /// Create a new `ExpectFcuStatus` action expecting VALID status + pub fn valid(target_tag: impl Into) -> Self { + Self { target_tag: target_tag.into(), expected_status: PayloadStatusEnum::Valid } + } + + /// Create a new `ExpectFcuStatus` action expecting INVALID status + pub fn invalid(target_tag: impl Into) -> Self { + Self { + target_tag: target_tag.into(), + expected_status: PayloadStatusEnum::Invalid { + validation_error: "corrupted block".to_string(), + }, + } + } + + /// Create a new `ExpectFcuStatus` action expecting SYNCING status + pub fn syncing(target_tag: impl Into) -> Self { + Self { target_tag: target_tag.into(), expected_status: PayloadStatusEnum::Syncing } + } + + /// Create a new `ExpectFcuStatus` action expecting ACCEPTED status + pub fn accepted(target_tag: impl Into) -> Self { + Self { target_tag: target_tag.into(), expected_status: PayloadStatusEnum::Accepted } + } +} + +impl Action for ExpectFcuStatus +where + Engine: EngineTypes, +{ + fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { + Box::pin(async move { + let mut test_fcu = TestFcuToTag::new(&self.target_tag, self.expected_status.clone()); + test_fcu.execute(env).await + }) + } +} + +/// Action to validate that a tagged block remains canonical by performing FCU to it +#[derive(Debug)] +pub struct ValidateCanonicalTag { + /// Tag name of the block to validate as canonical + pub tag: String, +} + +impl ValidateCanonicalTag { + /// Create a new `ValidateCanonicalTag` action + pub fn new(tag: impl Into) -> Self { + Self { tag: tag.into() } + } +} + +impl Action for ValidateCanonicalTag +where + Engine: EngineTypes, +{ + fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { + Box::pin(async move { + let mut expect_valid = ExpectFcuStatus::valid(&self.tag); + expect_valid.execute(env).await?; + + debug!("Successfully validated that '{}' remains canonical", self.tag); + Ok(()) + }) + } +} + +/// Action that produces a sequence of blocks where some blocks are intentionally invalid +#[derive(Debug)] +pub struct ProduceInvalidBlocks { + /// Number of blocks to produce + pub num_blocks: u64, + /// Set of indices (0-based) where blocks should be made invalid + pub invalid_indices: HashSet, + /// Tracks engine type + _phantom: PhantomData, +} + +impl ProduceInvalidBlocks { + /// Create a new `ProduceInvalidBlocks` action + pub fn new(num_blocks: u64, invalid_indices: HashSet) -> Self { + Self { num_blocks, invalid_indices, _phantom: Default::default() } + } + + /// Create a new `ProduceInvalidBlocks` action with a single invalid block at the specified + /// index + pub fn with_invalid_at(num_blocks: u64, invalid_index: u64) -> Self { + let mut invalid_indices = HashSet::new(); + invalid_indices.insert(invalid_index); + Self::new(num_blocks, invalid_indices) + } +} + +impl Action for ProduceInvalidBlocks +where + Engine: EngineTypes + PayloadTypes, + Engine::PayloadAttributes: From + Clone, + Engine::ExecutionPayloadEnvelopeV3: Into, +{ + fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { + Box::pin(async move { + for block_index in 0..self.num_blocks { + let is_invalid = self.invalid_indices.contains(&block_index); + + if is_invalid { + debug!("Producing invalid block at index {}", block_index); + + // produce a valid block first, then corrupt it + let mut sequence = Sequence::new(vec![ + Box::new(PickNextBlockProducer::default()), + Box::new(GeneratePayloadAttributes::default()), + Box::new(GenerateNextPayload::default()), + ]); + sequence.execute(env).await?; + + // get the latest payload and corrupt it + let latest_envelope = + env.active_node_state()?.latest_payload_envelope.as_ref().ok_or_else( + || eyre::eyre!("No payload envelope available to corrupt"), + )?; + + let envelope_v3: ExecutionPayloadEnvelopeV3 = latest_envelope.clone().into(); + let mut corrupted_payload = envelope_v3.execution_payload; + + // corrupt the state root to make the block invalid + corrupted_payload.payload_inner.payload_inner.state_root = B256::random(); + + debug!( + "Corrupted state root for block {} to: {}", + block_index, corrupted_payload.payload_inner.payload_inner.state_root + ); + + // send the corrupted payload via newPayload + let engine_client = env.node_clients[0].engine.http_client(); + // for simplicity, we'll use empty versioned hashes for invalid block testing + let versioned_hashes = Vec::new(); + // use a random parent beacon block root since this is for invalid block testing + let parent_beacon_block_root = B256::random(); + + let new_payload_response = EngineApiClient::::new_payload_v3( + &engine_client, + corrupted_payload.clone(), + versioned_hashes, + parent_beacon_block_root, + ) + .await?; + + // expect the payload to be rejected as invalid + match new_payload_response.status { + PayloadStatusEnum::Invalid { validation_error } => { + debug!( + "Block {} correctly rejected as invalid: {:?}", + block_index, validation_error + ); + } + other_status => { + return Err(eyre::eyre!( + "Expected block {} to be rejected as INVALID, but got: {:?}", + block_index, + other_status + )); + } + } + + // update block info with the corrupted block (for potential future reference) + env.set_current_block_info(BlockInfo { + hash: corrupted_payload.payload_inner.payload_inner.block_hash, + number: corrupted_payload.payload_inner.payload_inner.block_number, + timestamp: corrupted_payload.timestamp(), + })?; + } else { + debug!("Producing valid block at index {}", block_index); + + // produce a valid block normally + let mut sequence = Sequence::new(vec![ + Box::new(PickNextBlockProducer::default()), + Box::new(GeneratePayloadAttributes::default()), + Box::new(GenerateNextPayload::default()), + Box::new(BroadcastNextNewPayload::default()), + Box::new(UpdateBlockInfoToLatestPayload::default()), + ]); + sequence.execute(env).await?; + } + } + Ok(()) + }) + } +} diff --git a/crates/e2e-test-utils/src/testsuite/actions/reorg.rs b/crates/e2e-test-utils/src/testsuite/actions/reorg.rs new file mode 100644 index 00000000000..337734c3282 --- /dev/null +++ b/crates/e2e-test-utils/src/testsuite/actions/reorg.rs @@ -0,0 +1,123 @@ +//! Reorg actions for the e2e testing framework. + +use crate::testsuite::{ + actions::{produce_blocks::BroadcastLatestForkchoice, Action, Sequence}, + BlockInfo, Environment, +}; +use alloy_primitives::B256; +use alloy_rpc_types_engine::{ForkchoiceState, PayloadAttributes}; +use eyre::Result; +use futures_util::future::BoxFuture; +use reth_node_api::{EngineTypes, PayloadTypes}; +use std::marker::PhantomData; +use tracing::debug; + +/// Target for reorg operation +#[derive(Debug, Clone)] +pub enum ReorgTarget { + /// Direct block hash + Hash(B256), + /// Tagged block reference + Tag(String), +} + +/// Action that performs a reorg by setting a new head block as canonical +#[derive(Debug)] +pub struct ReorgTo { + /// Target for the reorg operation + pub target: ReorgTarget, + /// Tracks engine type + _phantom: PhantomData, +} + +impl ReorgTo { + /// Create a new `ReorgTo` action with a direct block hash + pub const fn new(target_hash: B256) -> Self { + Self { target: ReorgTarget::Hash(target_hash), _phantom: PhantomData } + } + + /// Create a new `ReorgTo` action with a tagged block reference + pub fn new_from_tag(tag: impl Into) -> Self { + Self { target: ReorgTarget::Tag(tag.into()), _phantom: PhantomData } + } +} + +impl Action for ReorgTo +where + Engine: EngineTypes + PayloadTypes, + Engine::PayloadAttributes: From + Clone, + Engine::ExecutionPayloadEnvelopeV3: + Into, +{ + fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { + Box::pin(async move { + // resolve the target block info from either direct hash or tag + let target_block_info = match &self.target { + ReorgTarget::Hash(_hash) => { + return Err(eyre::eyre!( + "Direct hash reorgs are not supported. Use CaptureBlock to tag the target block first, then use ReorgTo::new_from_tag()" + )); + } + ReorgTarget::Tag(tag) => { + let (block_info, _node_idx) = + env.block_registry.get(tag).copied().ok_or_else(|| { + eyre::eyre!("Block tag '{}' not found in registry", tag) + })?; + block_info + } + }; + + let mut sequence = Sequence::new(vec![ + Box::new(SetReorgTarget::new(target_block_info)), + Box::new(BroadcastLatestForkchoice::default()), + ]); + + sequence.execute(env).await + }) + } +} + +/// Sub-action to set the reorg target block in the environment +#[derive(Debug)] +pub struct SetReorgTarget { + /// Complete block info for the reorg target + pub target_block_info: BlockInfo, +} + +impl SetReorgTarget { + /// Create a new `SetReorgTarget` action + pub const fn new(target_block_info: BlockInfo) -> Self { + Self { target_block_info } + } +} + +impl Action for SetReorgTarget +where + Engine: EngineTypes, +{ + fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { + Box::pin(async move { + let block_info = self.target_block_info; + + debug!( + "Setting reorg target to block {} (hash: {})", + block_info.number, block_info.hash + ); + + // update active node state to point to the target block + let active_node_state = env.active_node_state_mut()?; + active_node_state.current_block_info = Some(block_info); + active_node_state.latest_header_time = block_info.timestamp; + + // update fork choice state to make the target block canonical + active_node_state.latest_fork_choice_state = ForkchoiceState { + head_block_hash: block_info.hash, + safe_block_hash: block_info.hash, + finalized_block_hash: block_info.hash, + }; + + debug!("Set reorg target to block {}", block_info.hash); + Ok(()) + }) + } +} diff --git a/crates/e2e-test-utils/src/testsuite/examples.rs b/crates/e2e-test-utils/src/testsuite/examples.rs index 0d9343482dd..fc7afd04359 100644 --- a/crates/e2e-test-utils/src/testsuite/examples.rs +++ b/crates/e2e-test-utils/src/testsuite/examples.rs @@ -1,7 +1,10 @@ //! Example tests using the test suite framework. use crate::testsuite::{ - actions::{AssertMineBlock, ProduceBlocks}, + actions::{ + AssertMineBlock, CaptureBlock, CaptureBlockOnNode, CompareNodeChainTips, CreateFork, + MakeCanonical, ProduceBlocks, ReorgTo, SelectActiveNode, + }, setup::{NetworkSetup, Setup}, TestBuilder, }; @@ -9,6 +12,7 @@ use alloy_primitives::{Address, B256}; use alloy_rpc_types_engine::PayloadAttributes; use eyre::Result; use reth_chainspec::{ChainSpecBuilder, MAINNET}; +use reth_node_api::TreeConfig; use reth_node_ethereum::{EthEngineTypes, EthereumNode}; use std::sync::Arc; @@ -63,8 +67,145 @@ async fn test_testsuite_produce_blocks() -> Result<()> { )) .with_network(NetworkSetup::single_node()); - let test = - TestBuilder::new().with_setup(setup).with_action(ProduceBlocks::::new(0)); + let test = TestBuilder::new() + .with_setup(setup) + .with_action(ProduceBlocks::::new(5)) + .with_action(MakeCanonical::new()); + + test.run::().await?; + + Ok(()) +} + +#[tokio::test] +async fn test_testsuite_create_fork() -> Result<()> { + reth_tracing::init_test_tracing(); + + let setup = Setup::default() + .with_chain_spec(Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(serde_json::from_str(include_str!("assets/genesis.json")).unwrap()) + .cancun_activated() + .build(), + )) + .with_network(NetworkSetup::single_node()); + + let test = TestBuilder::new() + .with_setup(setup) + .with_action(ProduceBlocks::::new(2)) + .with_action(MakeCanonical::new()) + .with_action(CreateFork::::new(1, 3)); + + test.run::().await?; + + Ok(()) +} + +#[tokio::test] +async fn test_testsuite_reorg_with_tagging() -> Result<()> { + reth_tracing::init_test_tracing(); + + let setup = Setup::default() + .with_chain_spec(Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(serde_json::from_str(include_str!("assets/genesis.json")).unwrap()) + .cancun_activated() + .build(), + )) + .with_network(NetworkSetup::single_node()); + + let test = TestBuilder::new() + .with_setup(setup) + .with_action(ProduceBlocks::::new(3)) // produce blocks 1, 2, 3 + .with_action(MakeCanonical::new()) // make main chain tip canonical + .with_action(CreateFork::::new(1, 2)) // fork from block 1, produce blocks 2', 3' + .with_action(CaptureBlock::new("fork_tip")) // tag fork tip + .with_action(ReorgTo::::new_from_tag("fork_tip")); // reorg to fork tip + + test.run::().await?; + + Ok(()) +} + +#[tokio::test] +async fn test_testsuite_deep_reorg() -> Result<()> { + reth_tracing::init_test_tracing(); + + let setup = Setup::default() + .with_chain_spec(Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(serde_json::from_str(include_str!("assets/genesis.json")).unwrap()) + .cancun_activated() + .build(), + )) + .with_network(NetworkSetup::single_node()) + .with_tree_config(TreeConfig::default().with_state_root_fallback(true)); + + let test = TestBuilder::new() + .with_setup(setup) + // receive newPayload and forkchoiceUpdated with block height 1 + .with_action(ProduceBlocks::::new(1)) + .with_action(MakeCanonical::new()) + .with_action(CaptureBlock::new("block1")) + // receive forkchoiceUpdated with block hash A as head (block A at height 2) + .with_action(CreateFork::::new(1, 1)) + .with_action(CaptureBlock::new("blockA_height2")) + .with_action(MakeCanonical::new()) + // receive newPayload with block hash B and height 2 + .with_action(ReorgTo::::new_from_tag("block1")) + .with_action(CreateFork::::new(1, 1)) + .with_action(CaptureBlock::new("blockB_height2")) + // receive forkchoiceUpdated with block hash B as head + .with_action(ReorgTo::::new_from_tag("blockB_height2")); + + test.run::().await?; + + Ok(()) +} + +/// Multi-node test demonstrating block creation and coordination across multiple nodes. +/// +/// This test demonstrates the working multi-node framework: +/// - Multiple nodes start from the same genesis +/// - Nodes can be selected for specific operations +/// - Block production can happen on different nodes +/// - Chain tips can be compared between nodes +/// - Node-specific state is properly tracked +#[tokio::test] +async fn test_testsuite_multinode_block_production() -> Result<()> { + reth_tracing::init_test_tracing(); + + let setup = Setup::default() + .with_chain_spec(Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(serde_json::from_str(include_str!("assets/genesis.json")).unwrap()) + .cancun_activated() + .build(), + )) + .with_network(NetworkSetup::multi_node(2)) // Create 2 nodes + .with_tree_config(TreeConfig::default().with_state_root_fallback(true)); + + let test = TestBuilder::new() + .with_setup(setup) + // both nodes start from genesis + .with_action(CaptureBlock::new("genesis")) + .with_action(CompareNodeChainTips::expect_same(0, 1)) + // build main chain (blocks 1-3) + .with_action(SelectActiveNode::new(0)) + .with_action(ProduceBlocks::::new(3)) + .with_action(MakeCanonical::new()) + .with_action(CaptureBlockOnNode::new("node0_tip", 0)) + .with_action(CompareNodeChainTips::expect_same(0, 1)) + // node 0 already has the state and can continue producing blocks + .with_action(ProduceBlocks::::new(2)) + .with_action(MakeCanonical::new()) + .with_action(CaptureBlockOnNode::new("node0_tip_2", 0)) + // verify both nodes remain in sync + .with_action(CompareNodeChainTips::expect_same(0, 1)); test.run::().await?; diff --git a/crates/e2e-test-utils/src/testsuite/mod.rs b/crates/e2e-test-utils/src/testsuite/mod.rs index f147bc5c572..811d76a68db 100644 --- a/crates/e2e-test-utils/src/testsuite/mod.rs +++ b/crates/e2e-test-utils/src/testsuite/mod.rs @@ -9,7 +9,7 @@ use alloy_primitives::B256; use eyre::Result; use jsonrpsee::http_client::HttpClient; use reth_engine_local::LocalPayloadAttributesBuilder; -use reth_node_api::{NodeTypes, PayloadTypes}; +use reth_node_api::{EngineTypes, NodeTypes, PayloadTypes}; use reth_payload_builder::PayloadId; use std::{collections::HashMap, marker::PhantomData}; pub mod actions; @@ -22,7 +22,7 @@ use reth_rpc_builder::auth::AuthServerHandle; mod examples; /// Client handles for both regular RPC and Engine API endpoints -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct NodeClient { /// Regular JSON-RPC client pub rpc: HttpClient, @@ -37,81 +37,222 @@ impl NodeClient { } } -/// Represents the latest block information. -#[derive(Debug, Clone)] -pub struct LatestBlockInfo { - /// Hash of the latest block +/// Represents complete block information. +#[derive(Debug, Clone, Copy)] +pub struct BlockInfo { + /// Hash of the block pub hash: B256, - /// Number of the latest block + /// Number of the block pub number: u64, + /// Timestamp of the block + pub timestamp: u64, +} + +/// Per-node state tracking for multi-node environments +#[derive(Clone)] +pub struct NodeState +where + I: EngineTypes, +{ + /// Current block information for this node + pub current_block_info: Option, + /// Stores payload attributes indexed by block number for this node + pub payload_attributes: HashMap, + /// Tracks the latest block header timestamp for this node + pub latest_header_time: u64, + /// Stores payload IDs returned by this node, indexed by block number + pub payload_id_history: HashMap, + /// Stores the next expected payload ID for this node + pub next_payload_id: Option, + /// Stores the latest fork choice state for this node + pub latest_fork_choice_state: ForkchoiceState, + /// Stores the most recent built execution payload for this node + pub latest_payload_built: Option, + /// Stores the most recent executed payload for this node + pub latest_payload_executed: Option, + /// Stores the most recent built execution payload envelope for this node + pub latest_payload_envelope: Option, + /// Fork base block number for validation (if this node is currently on a fork) + pub current_fork_base: Option, } + +impl Default for NodeState +where + I: EngineTypes, +{ + fn default() -> Self { + Self { + current_block_info: None, + payload_attributes: HashMap::new(), + latest_header_time: 0, + payload_id_history: HashMap::new(), + next_payload_id: None, + latest_fork_choice_state: ForkchoiceState::default(), + latest_payload_built: None, + latest_payload_executed: None, + latest_payload_envelope: None, + current_fork_base: None, + } + } +} + +impl std::fmt::Debug for NodeState +where + I: EngineTypes, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("NodeState") + .field("current_block_info", &self.current_block_info) + .field("payload_attributes", &self.payload_attributes) + .field("latest_header_time", &self.latest_header_time) + .field("payload_id_history", &self.payload_id_history) + .field("next_payload_id", &self.next_payload_id) + .field("latest_fork_choice_state", &self.latest_fork_choice_state) + .field("latest_payload_built", &self.latest_payload_built) + .field("latest_payload_executed", &self.latest_payload_executed) + .field("latest_payload_envelope", &"") + .field("current_fork_base", &self.current_fork_base) + .finish() + } +} + /// Represents a test environment. #[derive(Debug)] -pub struct Environment { +pub struct Environment +where + I: EngineTypes, +{ /// Combined clients with both RPC and Engine API endpoints pub node_clients: Vec, + /// Per-node state tracking + pub node_states: Vec>, /// Tracks instance generic. _phantom: PhantomData, - /// Latest block information - pub latest_block_info: Option, /// Last producer index pub last_producer_idx: Option, - /// Stores payload attributes indexed by block number - pub payload_attributes: HashMap, - /// Tracks the latest block header timestamp - pub latest_header_time: u64, /// Defines the increment for block timestamps (default: 2 seconds) pub block_timestamp_increment: u64, - /// Stores payload IDs returned by block producers, indexed by block number - pub payload_id_history: HashMap, - /// Stores the next expected payload ID - pub next_payload_id: Option, - /// Stores the latest fork choice state - pub latest_fork_choice_state: ForkchoiceState, - /// Stores the most recent built execution payload - pub latest_payload_built: Option, - /// Stores the most recent executed payload - pub latest_payload_executed: Option, /// Number of slots until a block is considered safe pub slots_to_safe: u64, /// Number of slots until a block is considered finalized pub slots_to_finalized: u64, + /// Registry for tagged blocks, mapping tag names to block info and node index + pub block_registry: HashMap, + /// Currently active node index for backward compatibility with single-node actions + pub active_node_idx: usize, } -impl Default for Environment { +impl Default for Environment +where + I: EngineTypes, +{ fn default() -> Self { Self { node_clients: vec![], + node_states: vec![], _phantom: Default::default(), - latest_block_info: None, last_producer_idx: None, - payload_attributes: Default::default(), - latest_header_time: 0, block_timestamp_increment: 2, - payload_id_history: HashMap::new(), - next_payload_id: None, - latest_fork_choice_state: ForkchoiceState::default(), - latest_payload_built: None, - latest_payload_executed: None, slots_to_safe: 0, slots_to_finalized: 0, + block_registry: HashMap::new(), + active_node_idx: 0, + } + } +} + +impl Environment +where + I: EngineTypes, +{ + /// Get the number of nodes in the environment + pub fn node_count(&self) -> usize { + self.node_clients.len() + } + + /// Get mutable reference to a specific node's state + pub fn node_state_mut(&mut self, node_idx: usize) -> Result<&mut NodeState, eyre::Error> { + let node_count = self.node_count(); + self.node_states.get_mut(node_idx).ok_or_else(|| { + eyre::eyre!("Node index {} out of bounds (have {} nodes)", node_idx, node_count) + }) + } + + /// Get immutable reference to a specific node's state + pub fn node_state(&self, node_idx: usize) -> Result<&NodeState, eyre::Error> { + self.node_states.get(node_idx).ok_or_else(|| { + eyre::eyre!("Node index {} out of bounds (have {} nodes)", node_idx, self.node_count()) + }) + } + + /// Get the currently active node's state + pub fn active_node_state(&self) -> Result<&NodeState, eyre::Error> { + self.node_state(self.active_node_idx) + } + + /// Get mutable reference to the currently active node's state + pub fn active_node_state_mut(&mut self) -> Result<&mut NodeState, eyre::Error> { + let idx = self.active_node_idx; + self.node_state_mut(idx) + } + + /// Set the active node index + pub fn set_active_node(&mut self, node_idx: usize) -> Result<(), eyre::Error> { + if node_idx >= self.node_count() { + return Err(eyre::eyre!( + "Node index {} out of bounds (have {} nodes)", + node_idx, + self.node_count() + )); } + self.active_node_idx = node_idx; + Ok(()) + } + + /// Initialize node states when nodes are created + pub fn initialize_node_states(&mut self, node_count: usize) { + self.node_states = (0..node_count).map(|_| NodeState::default()).collect(); + } + + /// Get current block info from active node + pub fn current_block_info(&self) -> Option { + self.active_node_state().ok()?.current_block_info + } + + /// Set current block info on active node + pub fn set_current_block_info(&mut self, block_info: BlockInfo) -> Result<(), eyre::Error> { + self.active_node_state_mut()?.current_block_info = Some(block_info); + Ok(()) } } /// Builder for creating test scenarios #[expect(missing_debug_implementations)] -#[derive(Default)] -pub struct TestBuilder { +pub struct TestBuilder +where + I: EngineTypes, +{ setup: Option>, actions: Vec>, env: Environment, } -impl TestBuilder { +impl Default for TestBuilder +where + I: EngineTypes, +{ + fn default() -> Self { + Self { setup: None, actions: Vec::new(), env: Default::default() } + } +} + +impl TestBuilder +where + I: EngineTypes + 'static, +{ /// Create a new test builder pub fn new() -> Self { - Self { setup: None, actions: Vec::new(), env: Default::default() } + Self::default() } /// Set the test setup diff --git a/crates/e2e-test-utils/src/testsuite/setup.rs b/crates/e2e-test-utils/src/testsuite/setup.rs index d81541156d5..0894c208203 100644 --- a/crates/e2e-test-utils/src/testsuite/setup.rs +++ b/crates/e2e-test-utils/src/testsuite/setup.rs @@ -6,13 +6,13 @@ use crate::{ }; use alloy_eips::BlockNumberOrTag; use alloy_primitives::B256; -use alloy_rpc_types_engine::PayloadAttributes; +use alloy_rpc_types_engine::{ForkchoiceState, PayloadAttributes}; use alloy_rpc_types_eth::{Block as RpcBlock, Header, Receipt, Transaction}; use eyre::{eyre, Result}; use reth_chainspec::ChainSpec; use reth_engine_local::LocalPayloadAttributesBuilder; use reth_ethereum_primitives::Block; -use reth_node_api::{NodeTypes, PayloadTypes}; +use reth_node_api::{EngineTypes, NodeTypes, PayloadTypes, TreeConfig}; use reth_node_core::primitives::RecoveredBlock; use reth_payload_builder::EthPayloadBuilderAttributes; use reth_rpc_api::clients::EthApiClient; @@ -37,6 +37,8 @@ pub struct Setup { pub state: Option, /// Network configuration pub network: NetworkSetup, + /// Engine tree configuration + pub tree_config: TreeConfig, /// Shutdown channel to stop nodes when setup is dropped shutdown_tx: Option>, /// Is this setup in dev mode @@ -53,6 +55,7 @@ impl Default for Setup { blocks: Vec::new(), state: None, network: NetworkSetup::default(), + tree_config: TreeConfig::default(), shutdown_tx: None, is_dev: true, _phantom: Default::default(), @@ -69,7 +72,10 @@ impl Drop for Setup { } } -impl Setup { +impl Setup +where + I: EngineTypes, +{ /// Create a new setup with default values pub fn new() -> Self { Self::default() @@ -117,6 +123,12 @@ impl Setup { self } + /// Set the engine tree configuration + pub const fn with_tree_config(mut self, tree_config: TreeConfig) -> Self { + self.tree_config = tree_config; + self + } + /// Apply the setup to the environment pub async fn apply(&mut self, env: &mut Environment) -> Result<()> where @@ -153,6 +165,7 @@ impl Setup { node_count, Arc::::new((*chain_spec).clone().into()), is_dev, + self.tree_config.clone(), attributes_generator, ) .await; @@ -225,6 +238,49 @@ impl Setup { env.node_clients = node_clients; + // Initialize per-node states for all nodes + env.initialize_node_states(node_count); + + // Initialize each node's state with genesis block information + let genesis_block_info = { + let first_client = &env.node_clients[0]; + let genesis_block = + EthApiClient::::block_by_number( + &first_client.rpc, + BlockNumberOrTag::Number(0), + false, + ) + .await? + .ok_or_else(|| eyre!("Genesis block not found"))?; + + crate::testsuite::BlockInfo { + hash: genesis_block.header.hash, + number: genesis_block.header.number, + timestamp: genesis_block.header.timestamp, + } + }; + + // Initialize all node states with the same genesis block + for (node_idx, node_state) in env.node_states.iter_mut().enumerate() { + node_state.current_block_info = Some(genesis_block_info); + node_state.latest_header_time = genesis_block_info.timestamp; + node_state.latest_fork_choice_state = ForkchoiceState { + head_block_hash: genesis_block_info.hash, + safe_block_hash: genesis_block_info.hash, + finalized_block_hash: genesis_block_info.hash, + }; + + debug!( + "Node {} initialized with genesis block {} (hash: {})", + node_idx, genesis_block_info.number, genesis_block_info.hash + ); + } + + debug!( + "Environment initialized with {} nodes, all starting from genesis block {} (hash: {})", + node_count, genesis_block_info.number, genesis_block_info.hash + ); + // TODO: For each block in self.blocks, replay it on the node Ok(()) diff --git a/crates/engine/local/Cargo.toml b/crates/engine/local/Cargo.toml index 3709348816b..2de5ec3c882 100644 --- a/crates/engine/local/Cargo.toml +++ b/crates/engine/local/Cargo.toml @@ -11,19 +11,12 @@ exclude.workspace = true [dependencies] # reth reth-chainspec.workspace = true -reth-consensus.workspace = true reth-engine-primitives.workspace = true -reth-engine-service.workspace = true -reth-engine-tree.workspace = true -reth-node-types.workspace = true -reth-evm.workspace = true reth-ethereum-engine-primitives.workspace = true reth-payload-builder.workspace = true reth-payload-primitives.workspace = true reth-provider.workspace = true -reth-prune.workspace = true reth-transaction-pool.workspace = true -reth-stages-api.workspace = true # alloy alloy-consensus.workspace = true @@ -53,10 +46,8 @@ op = [ "dep:op-alloy-rpc-types-engine", "dep:reth-optimism-chainspec", "reth-payload-primitives/op", - "reth-evm/op", ] scroll-alloy-traits = [ "dep:scroll-alloy-rpc-types-engine", - "reth-evm/scroll-alloy-traits", "reth-payload-primitives/scroll-alloy-traits", ] diff --git a/crates/engine/local/src/lib.rs b/crates/engine/local/src/lib.rs index 26c84d50c85..072b42a030e 100644 --- a/crates/engine/local/src/lib.rs +++ b/crates/engine/local/src/lib.rs @@ -10,8 +10,6 @@ pub mod miner; pub mod payload; -pub mod service; -pub use miner::MiningMode; +pub use miner::{LocalMiner, MiningMode}; pub use payload::LocalPayloadAttributesBuilder; -pub use service::LocalEngineService; diff --git a/crates/engine/local/src/miner.rs b/crates/engine/local/src/miner.rs index 7b907d29492..a3318f1f5c2 100644 --- a/crates/engine/local/src/miner.rs +++ b/crates/engine/local/src/miner.rs @@ -5,7 +5,7 @@ use alloy_primitives::{TxHash, B256}; use alloy_rpc_types_engine::ForkchoiceState; use eyre::OptionExt; use futures_util::{stream::Fuse, StreamExt}; -use reth_engine_primitives::BeaconEngineMessage; +use reth_engine_primitives::BeaconConsensusEngineHandle; use reth_payload_builder::PayloadBuilderHandle; use reth_payload_primitives::{ BuiltPayload, EngineApiMessageVersion, PayloadAttributesBuilder, PayloadKind, PayloadTypes, @@ -18,10 +18,7 @@ use std::{ task::{Context, Poll}, time::{Duration, UNIX_EPOCH}, }; -use tokio::{ - sync::{mpsc::UnboundedSender, oneshot}, - time::Interval, -}; +use tokio::time::Interval; use tokio_stream::wrappers::ReceiverStream; use tracing::error; @@ -78,7 +75,7 @@ pub struct LocalMiner { /// The payload attribute builder for the engine payload_attributes_builder: B, /// Sender for events to engine. - to_engine: UnboundedSender>, + to_engine: BeaconConsensusEngineHandle, /// The mining mode for the engine mode: MiningMode, /// The payload builder for the engine @@ -95,31 +92,28 @@ where B: PayloadAttributesBuilder<::PayloadAttributes>, { /// Spawns a new [`LocalMiner`] with the given parameters. - pub fn spawn_new( + pub fn new( provider: impl BlockReader, payload_attributes_builder: B, - to_engine: UnboundedSender>, + to_engine: BeaconConsensusEngineHandle, mode: MiningMode, payload_builder: PayloadBuilderHandle, - ) { + ) -> Self { let latest_header = provider.sealed_header(provider.best_block_number().unwrap()).unwrap().unwrap(); - let miner = Self { + Self { payload_attributes_builder, to_engine, mode, payload_builder, last_timestamp: latest_header.timestamp(), last_block_hashes: vec![latest_header.hash()], - }; - - // Spawn the miner - tokio::spawn(miner.run()); + } } /// Runs the [`LocalMiner`] in a loop, polling the miner and building payloads. - async fn run(mut self) { + pub async fn run(mut self) { let mut fcu_interval = tokio::time::interval(Duration::from_secs(1)); loop { tokio::select! { @@ -156,16 +150,12 @@ where /// Sends a FCU to the engine. async fn update_forkchoice_state(&self) -> eyre::Result<()> { - let (tx, rx) = oneshot::channel(); - self.to_engine.send(BeaconEngineMessage::ForkchoiceUpdated { - state: self.forkchoice_state(), - payload_attrs: None, - tx, - version: EngineApiMessageVersion::default(), - })?; - - let res = rx.await??; - if !res.forkchoice_status().is_valid() { + let res = self + .to_engine + .fork_choice_updated(self.forkchoice_state(), None, EngineApiMessageVersion::default()) + .await?; + + if !res.is_valid() { eyre::bail!("Invalid fork choice update") } @@ -183,16 +173,16 @@ where .as_secs(), ); - let (tx, rx) = oneshot::channel(); - self.to_engine.send(BeaconEngineMessage::ForkchoiceUpdated { - state: self.forkchoice_state(), - payload_attrs: Some(self.payload_attributes_builder.build(timestamp)), - tx, - version: EngineApiMessageVersion::default(), - })?; + let res = self + .to_engine + .fork_choice_updated( + self.forkchoice_state(), + Some(self.payload_attributes_builder.build(timestamp)), + EngineApiMessageVersion::default(), + ) + .await?; - let res = rx.await??.await?; - if !res.payload_status.is_valid() { + if !res.is_valid() { eyre::bail!("Invalid payload status") } @@ -206,11 +196,8 @@ where let block = payload.block(); - let (tx, rx) = oneshot::channel(); let payload = T::block_to_payload(payload.block().clone()); - self.to_engine.send(BeaconEngineMessage::NewPayload { payload, tx })?; - - let res = rx.await??; + let res = self.to_engine.new_payload(payload).await?; if !res.is_valid() { eyre::bail!("Invalid payload") diff --git a/crates/engine/local/src/payload.rs b/crates/engine/local/src/payload.rs index af8d68bb7ce..7d126f9406d 100644 --- a/crates/engine/local/src/payload.rs +++ b/crates/engine/local/src/payload.rs @@ -1,5 +1,5 @@ //! The implementation of the [`PayloadAttributesBuilder`] for the -//! [`LocalEngineService`](super::service::LocalEngineService). +//! [`LocalMiner`](super::LocalMiner). use alloy_primitives::{Address, B256}; use reth_chainspec::EthereumHardforks; diff --git a/crates/engine/local/src/service.rs b/crates/engine/local/src/service.rs deleted file mode 100644 index 90fe47f94af..00000000000 --- a/crates/engine/local/src/service.rs +++ /dev/null @@ -1,163 +0,0 @@ -//! Provides a local dev service engine that can be used to run a dev chain. -//! -//! [`LocalEngineService`] polls the payload builder based on a mining mode -//! which can be set to `Instant` or `Interval`. The `Instant` mode will -//! constantly poll the payload builder and initiate block building -//! with a single transaction. The `Interval` mode will initiate block -//! building at a fixed interval. - -use core::fmt; -use std::{ - fmt::{Debug, Formatter}, - pin::Pin, - sync::Arc, - task::{Context, Poll}, -}; - -use crate::miner::{LocalMiner, MiningMode}; -use futures_util::{Stream, StreamExt}; -use reth_chainspec::EthChainSpec; -use reth_consensus::{ConsensusError, FullConsensus}; -use reth_engine_primitives::{BeaconConsensusEngineEvent, BeaconEngineMessage, EngineValidator}; -use reth_engine_service::service::EngineMessageStream; -use reth_engine_tree::{ - chain::{ChainEvent, HandlerEvent}, - engine::{ - EngineApiKind, EngineApiRequest, EngineApiRequestHandler, EngineRequestHandler, FromEngine, - RequestHandlerEvent, - }, - persistence::PersistenceHandle, - tree::{EngineApiTreeHandler, InvalidBlockHook, TreeConfig}, -}; -use reth_evm::ConfigureEvm; -use reth_node_types::BlockTy; -use reth_payload_builder::PayloadBuilderHandle; -use reth_payload_primitives::{PayloadAttributesBuilder, PayloadTypes}; -use reth_provider::{ - providers::{BlockchainProvider, ProviderNodeTypes}, - ChainSpecProvider, ProviderFactory, -}; -use reth_prune::PrunerWithFactory; -use reth_stages_api::MetricEventsSender; -use tokio::sync::mpsc::UnboundedSender; -use tracing::error; - -/// Provides a local dev service engine that can be used to drive the -/// chain forward. -/// -/// This service both produces and consumes [`BeaconEngineMessage`]s. This is done to allow -/// modifications of the stream -pub struct LocalEngineService -where - N: ProviderNodeTypes, -{ - /// Processes requests. - /// - /// This type is responsible for processing incoming requests. - handler: EngineApiRequestHandler, N::Primitives>, - /// Receiver for incoming requests (from the engine API endpoint) that need to be processed. - incoming_requests: EngineMessageStream, -} - -impl LocalEngineService -where - N: ProviderNodeTypes, -{ - /// Constructor for [`LocalEngineService`]. - #[expect(clippy::too_many_arguments)] - pub fn new( - consensus: Arc>, - provider: ProviderFactory, - blockchain_db: BlockchainProvider, - pruner: PrunerWithFactory>, - payload_builder: PayloadBuilderHandle, - payload_validator: V, - tree_config: TreeConfig, - invalid_block_hook: Box>, - sync_metrics_tx: MetricEventsSender, - to_engine: UnboundedSender>, - from_engine: EngineMessageStream, - mode: MiningMode, - payload_attributes_builder: B, - evm_config: C, - ) -> Self - where - B: PayloadAttributesBuilder<::PayloadAttributes>, - V: EngineValidator>, - C: ConfigureEvm + 'static, - { - let chain_spec = provider.chain_spec(); - let engine_kind = - if chain_spec.is_optimism() { EngineApiKind::OpStack } else { EngineApiKind::Ethereum }; - - let persistence_handle = - PersistenceHandle::::spawn_service(provider, pruner, sync_metrics_tx); - let canonical_in_memory_state = blockchain_db.canonical_in_memory_state(); - - let (to_tree_tx, from_tree) = EngineApiTreeHandler::::spawn_new( - blockchain_db.clone(), - consensus, - payload_validator, - persistence_handle, - payload_builder.clone(), - canonical_in_memory_state, - tree_config, - invalid_block_hook, - engine_kind, - evm_config, - ); - - let handler = EngineApiRequestHandler::new(to_tree_tx, from_tree); - - LocalMiner::spawn_new( - blockchain_db, - payload_attributes_builder, - to_engine, - mode, - payload_builder, - ); - - Self { handler, incoming_requests: from_engine } - } -} - -impl Stream for LocalEngineService -where - N: ProviderNodeTypes, -{ - type Item = ChainEvent>; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = self.get_mut(); - - if let Poll::Ready(ev) = this.handler.poll(cx) { - return match ev { - RequestHandlerEvent::HandlerEvent(ev) => match ev { - HandlerEvent::BackfillAction(_) => { - error!(target: "engine::local", "received backfill request in local engine"); - Poll::Ready(Some(ChainEvent::FatalError)) - } - HandlerEvent::Event(ev) => Poll::Ready(Some(ChainEvent::Handler(ev))), - HandlerEvent::FatalError => Poll::Ready(Some(ChainEvent::FatalError)), - }, - RequestHandlerEvent::Download(_) => { - error!(target: "engine::local", "received download request in local engine"); - Poll::Ready(Some(ChainEvent::FatalError)) - } - } - } - - // forward incoming requests to the handler - while let Poll::Ready(Some(req)) = this.incoming_requests.poll_next_unpin(cx) { - this.handler.on_event(FromEngine::Request(req.into())); - } - - Poll::Pending - } -} - -impl Debug for LocalEngineService { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - f.debug_struct("LocalEngineService").finish_non_exhaustive() - } -} diff --git a/crates/engine/primitives/Cargo.toml b/crates/engine/primitives/Cargo.toml index 43c0cf69dcd..3e1f7893093 100644 --- a/crates/engine/primitives/Cargo.toml +++ b/crates/engine/primitives/Cargo.toml @@ -26,6 +26,7 @@ reth-trie-common.workspace = true alloy-primitives.workspace = true alloy-consensus.workspace = true alloy-rpc-types-engine.workspace = true +alloy-eips.workspace = true # async tokio = { workspace = true, features = ["sync"] } @@ -46,6 +47,7 @@ std = [ "alloy-primitives/std", "alloy-consensus/std", "alloy-rpc-types-engine/std", + "alloy-eips/std", "futures/std", "serde/std", "thiserror/std", diff --git a/crates/engine/primitives/src/config.rs b/crates/engine/primitives/src/config.rs index 1f5736c9aeb..639b227679d 100644 --- a/crates/engine/primitives/src/config.rs +++ b/crates/engine/primitives/src/config.rs @@ -37,7 +37,7 @@ pub fn has_enough_parallelism() -> bool { } /// The configuration of the engine tree. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct TreeConfig { /// Maximum number of blocks to be kept only in memory without triggering /// persistence. @@ -75,8 +75,24 @@ pub struct TreeConfig { max_proof_task_concurrency: u64, /// Number of reserved CPU cores for non-reth processes reserved_cpu_cores: usize, - /// Whether to enable the precompile cache - precompile_cache_enabled: bool, + /// Whether to disable the precompile cache + precompile_cache_disabled: bool, + /// Whether to use state root fallback for testing + state_root_fallback: bool, + /// Whether to always process payload attributes and begin a payload build process + /// even if `forkchoiceState.headBlockHash` is already the canonical head or an ancestor. + /// + /// The Engine API specification generally states that client software "MUST NOT begin a + /// payload build process if `forkchoiceState.headBlockHash` references a `VALID` + /// ancestor of the head of canonical chain". + /// See: (Rule 2) + /// + /// This flag allows overriding that behavior. + /// This is useful for specific chain configurations (e.g., OP Stack where proposers + /// can reorg their own chain), various custom chains, or for development/testing purposes + /// where immediate payload regeneration is desired despite the head not changing or moving to + /// an ancestor. + always_process_payload_attributes_on_canonical_head: bool, } impl Default for TreeConfig { @@ -95,7 +111,9 @@ impl Default for TreeConfig { has_enough_parallelism: has_enough_parallelism(), max_proof_task_concurrency: DEFAULT_MAX_PROOF_TASK_CONCURRENCY, reserved_cpu_cores: DEFAULT_RESERVED_CPU_CORES, - precompile_cache_enabled: false, + precompile_cache_disabled: false, + state_root_fallback: false, + always_process_payload_attributes_on_canonical_head: false, } } } @@ -117,7 +135,9 @@ impl TreeConfig { has_enough_parallelism: bool, max_proof_task_concurrency: u64, reserved_cpu_cores: usize, - precompile_cache_enabled: bool, + precompile_cache_disabled: bool, + state_root_fallback: bool, + always_process_payload_attributes_on_canonical_head: bool, ) -> Self { Self { persistence_threshold, @@ -133,7 +153,9 @@ impl TreeConfig { has_enough_parallelism, max_proof_task_concurrency, reserved_cpu_cores, - precompile_cache_enabled, + precompile_cache_disabled, + state_root_fallback, + always_process_payload_attributes_on_canonical_head, } } @@ -199,9 +221,30 @@ impl TreeConfig { self.cross_block_cache_size } - /// Returns whether precompile cache is enabled. - pub const fn precompile_cache_enabled(&self) -> bool { - self.precompile_cache_enabled + /// Returns whether precompile cache is disabled. + pub const fn precompile_cache_disabled(&self) -> bool { + self.precompile_cache_disabled + } + + /// Returns whether to use state root fallback. + pub const fn state_root_fallback(&self) -> bool { + self.state_root_fallback + } + + /// Sets whether to always process payload attributes when the FCU head is already canonical. + pub const fn with_always_process_payload_attributes_on_canonical_head( + mut self, + always_process_payload_attributes_on_canonical_head: bool, + ) -> Self { + self.always_process_payload_attributes_on_canonical_head = + always_process_payload_attributes_on_canonical_head; + self + } + + /// Returns true if payload attributes should always be processed even when the FCU head is + /// canonical. + pub const fn always_process_payload_attributes_on_canonical_head(&self) -> bool { + self.always_process_payload_attributes_on_canonical_head } /// Setter for persistence threshold. @@ -301,9 +344,15 @@ impl TreeConfig { self } - /// Setter for whether to use the precompile cache. - pub const fn with_precompile_cache_enabled(mut self, precompile_cache_enabled: bool) -> Self { - self.precompile_cache_enabled = precompile_cache_enabled; + /// Setter for whether to disable the precompile cache. + pub const fn without_precompile_cache(mut self, precompile_cache_disabled: bool) -> Self { + self.precompile_cache_disabled = precompile_cache_disabled; + self + } + + /// Setter for whether to use state root fallback, useful for testing. + pub const fn with_state_root_fallback(mut self, state_root_fallback: bool) -> Self { + self.state_root_fallback = state_root_fallback; self } diff --git a/crates/engine/primitives/src/event.rs b/crates/engine/primitives/src/event.rs index d8165bed1c8..14a5a138014 100644 --- a/crates/engine/primitives/src/event.rs +++ b/crates/engine/primitives/src/event.rs @@ -3,6 +3,7 @@ use crate::ForkchoiceStatus; use alloc::boxed::Box; use alloy_consensus::BlockHeader; +use alloy_eips::BlockNumHash; use alloy_primitives::B256; use alloy_rpc_types_engine::ForkchoiceState; use core::{ @@ -20,6 +21,8 @@ pub enum BeaconConsensusEngineEvent { ForkchoiceUpdated(ForkchoiceState, ForkchoiceStatus), /// A block was added to the fork chain. ForkBlockAdded(ExecutedBlockWithTrieUpdates, Duration), + /// A new block was received from the consensus engine + BlockReceived(BlockNumHash), /// A block was added to the canonical chain, and the elapsed time validating the block CanonicalBlockAdded(ExecutedBlockWithTrieUpdates, Duration), /// A canonical chain was committed, and the elapsed time committing the data @@ -69,6 +72,9 @@ where Self::LiveSyncProgress(progress) => { write!(f, "LiveSyncProgress({progress:?})") } + Self::BlockReceived(num_hash) => { + write!(f, "BlockReceived({num_hash:?})") + } } } } diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index 1d690e28624..df164b461d5 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -89,6 +89,7 @@ reth-testing-utils.workspace = true reth-tracing.workspace = true reth-trie-db.workspace = true reth-node-ethereum.workspace = true +reth-e2e-test-utils.workspace = true # alloy alloy-rlp.workspace = true @@ -96,6 +97,8 @@ revm-state.workspace = true assert_matches.workspace = true criterion.workspace = true +eyre.workspace = true +serde_json.workspace = true crossbeam-channel.workspace = true proptest.workspace = true rand.workspace = true diff --git a/crates/engine/tree/src/tree/e2e_tests.rs b/crates/engine/tree/src/tree/e2e_tests.rs new file mode 100644 index 00000000000..fbadae28698 --- /dev/null +++ b/crates/engine/tree/src/tree/e2e_tests.rs @@ -0,0 +1,153 @@ +//! E2E test implementations using the e2e test framework for engine tree functionality. + +use crate::tree::TreeConfig; +use eyre::Result; +use reth_chainspec::{ChainSpecBuilder, MAINNET}; +use reth_e2e_test_utils::testsuite::{ + actions::{ + CaptureBlock, CreateFork, ExpectFcuStatus, MakeCanonical, ProduceBlocks, + ProduceInvalidBlocks, ReorgTo, ValidateCanonicalTag, + }, + setup::{NetworkSetup, Setup}, + TestBuilder, +}; +use reth_ethereum_engine_primitives::EthEngineTypes; +use reth_node_ethereum::EthereumNode; +use std::sync::Arc; + +/// Creates the standard setup for engine tree e2e tests. +fn default_engine_tree_setup() -> Setup { + Setup::default() + .with_chain_spec(Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis( + serde_json::from_str(include_str!( + "../../../../e2e-test-utils/src/testsuite/assets/genesis.json" + )) + .unwrap(), + ) + .cancun_activated() + .build(), + )) + .with_network(NetworkSetup::single_node()) + .with_tree_config( + TreeConfig::default().with_legacy_state_root(false).with_has_enough_parallelism(true), + ) +} + +/// Test that verifies forkchoice update and canonical chain insertion functionality. +#[tokio::test] +async fn test_engine_tree_fcu_canon_chain_insertion_e2e() -> Result<()> { + reth_tracing::init_test_tracing(); + + let test = TestBuilder::new() + .with_setup(default_engine_tree_setup()) + // produce one block + .with_action(ProduceBlocks::::new(1)) + // make it canonical via forkchoice update + .with_action(MakeCanonical::new()) + // extend with 3 more blocks + .with_action(ProduceBlocks::::new(3)) + // make the latest block canonical + .with_action(MakeCanonical::new()); + + test.run::().await?; + + Ok(()) +} + +/// Test that verifies forkchoice update with a reorg where all blocks are already available. +#[tokio::test] +async fn test_engine_tree_fcu_reorg_with_all_blocks_e2e() -> Result<()> { + reth_tracing::init_test_tracing(); + + let test = TestBuilder::new() + .with_setup(default_engine_tree_setup()) + // create a main chain with 5 blocks (blocks 0-4) + .with_action(ProduceBlocks::::new(5)) + .with_action(MakeCanonical::new()) + // create a fork from block 2 with 3 additional blocks + .with_action(CreateFork::::new(2, 3)) + .with_action(CaptureBlock::new("fork_tip")) + // perform FCU to the fork tip - this should make the fork canonical + .with_action(ReorgTo::::new_from_tag("fork_tip")); + + test.run::().await?; + + Ok(()) +} + +/// Test that verifies valid forks with an older canonical head. +/// +/// This test creates two competing fork chains starting from a common ancestor, +/// then switches between them using forkchoice updates, verifying that the engine +/// correctly handles chains where the canonical head is older than fork tips. +#[tokio::test] +async fn test_engine_tree_valid_forks_with_older_canonical_head_e2e() -> Result<()> { + reth_tracing::init_test_tracing(); + + let test = TestBuilder::new() + .with_setup(default_engine_tree_setup()) + // create base chain with 1 block (this will be our old head) + .with_action(ProduceBlocks::::new(1)) + .with_action(CaptureBlock::new("old_head")) + .with_action(MakeCanonical::new()) + // extend base chain with 5 more blocks to establish a fork point + .with_action(ProduceBlocks::::new(5)) + .with_action(CaptureBlock::new("fork_point")) + .with_action(MakeCanonical::new()) + // revert to old head to simulate scenario where canonical head is older + .with_action(ReorgTo::::new_from_tag("old_head")) + // create first competing chain (chain A) from fork point with 10 blocks + .with_action(CreateFork::::new_from_tag("fork_point", 10)) + .with_action(CaptureBlock::new("chain_a_tip")) + // create second competing chain (chain B) from same fork point with 10 blocks + .with_action(CreateFork::::new_from_tag("fork_point", 10)) + .with_action(CaptureBlock::new("chain_b_tip")) + // switch to chain B via forkchoice update - this should become canonical + .with_action(ReorgTo::::new_from_tag("chain_b_tip")); + + test.run::().await?; + + Ok(()) +} + +/// Test that verifies valid and invalid forks with an older canonical head. +#[tokio::test] +async fn test_engine_tree_valid_and_invalid_forks_with_older_canonical_head_e2e() -> Result<()> { + reth_tracing::init_test_tracing(); + + let test = TestBuilder::new() + .with_setup(default_engine_tree_setup()) + // create base chain with 1 block (old head) + .with_action(ProduceBlocks::::new(1)) + .with_action(CaptureBlock::new("old_head")) + .with_action(MakeCanonical::new()) + // extend base chain with 5 more blocks to establish fork point + .with_action(ProduceBlocks::::new(5)) + .with_action(CaptureBlock::new("fork_point")) + .with_action(MakeCanonical::new()) + // revert to old head to simulate older canonical head scenario + .with_action(ReorgTo::::new_from_tag("old_head")) + // create chain B (the valid chain) from fork point with 10 blocks + .with_action(CreateFork::::new_from_tag("fork_point", 10)) + .with_action(CaptureBlock::new("chain_b_tip")) + // make chain B canonical via FCU - this becomes the valid chain + .with_action(ReorgTo::::new_from_tag("chain_b_tip")) + // create chain A (competing chain) - first produce valid blocks, then test invalid + // scenario + .with_action(ReorgTo::::new_from_tag("fork_point")) + .with_action(ProduceBlocks::::new(10)) + .with_action(CaptureBlock::new("chain_a_tip")) + // test that FCU to chain A tip returns VALID status (it's a valid competing chain) + .with_action(ExpectFcuStatus::valid("chain_a_tip")) + // attempt to produce invalid blocks (which should be rejected) + .with_action(ProduceInvalidBlocks::::with_invalid_at(3, 2)) + // chain B remains the canonical chain + .with_action(ValidateCanonicalTag::new("chain_b_tip")); + + test.run::().await?; + + Ok(()) +} diff --git a/crates/engine/tree/src/tree/error.rs b/crates/engine/tree/src/tree/error.rs index f5edc3b860f..b7932f876ed 100644 --- a/crates/engine/tree/src/tree/error.rs +++ b/crates/engine/tree/src/tree/error.rs @@ -1,6 +1,7 @@ //! Internal errors for the tree module. use alloy_consensus::BlockHeader; +use alloy_primitives::B256; use reth_consensus::ConsensusError; use reth_errors::{BlockExecutionError, BlockValidationError, ProviderError}; use reth_evm::execute::InternalBlockExecutionError; @@ -17,6 +18,20 @@ pub enum AdvancePersistenceError { /// A provider error #[error(transparent)] Provider(#[from] ProviderError), + /// Missing ancestor. + /// + /// This error occurs when we need to compute the state root for a block with missing trie + /// updates, but the ancestor block is not available. State root computation requires the state + /// from the parent block as a starting point. + /// + /// A block may be missing the trie updates when it's a fork chain block building on top of the + /// historical database state. Since we don't store the historical trie state, we cannot + /// generate the trie updates for it until the moment when database is unwound to the canonical + /// chain. + /// + /// Also see [`reth_chain_state::ExecutedTrieUpdates::Missing`]. + #[error("Missing ancestor with hash {0}")] + MissingAncestor(B256), } #[derive(thiserror::Error)] diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index dbb460aeca8..7b8454175ec 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -20,10 +20,10 @@ use payload_processor::sparse_trie::StateRootComputeOutcome; use persistence_state::CurrentPersistenceAction; use precompile_cache::{CachedPrecompile, PrecompileCacheMap}; use reth_chain_state::{ - CanonicalInMemoryState, ExecutedBlock, ExecutedBlockWithTrieUpdates, + CanonicalInMemoryState, ExecutedBlock, ExecutedBlockWithTrieUpdates, ExecutedTrieUpdates, MemoryOverlayStateProvider, NewCanonicalChain, }; -use reth_consensus::{Consensus, FullConsensus, HeaderValidator}; +use reth_consensus::{Consensus, FullConsensus}; pub use reth_engine_primitives::InvalidBlockHook; use reth_engine_primitives::{ BeaconConsensusEngineEvent, BeaconEngineMessage, BeaconOnNewPayloadError, EngineValidator, @@ -33,7 +33,9 @@ use reth_errors::{ConsensusError, ProviderResult}; use reth_evm::{ConfigureEvm, Evm, SpecFor}; use reth_payload_builder::PayloadBuilderHandle; use reth_payload_primitives::{EngineApiMessageVersion, PayloadBuilderAttributes, PayloadTypes}; -use reth_primitives_traits::{Block, NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader}; +use reth_primitives_traits::{ + Block, GotExpected, NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader, +}; use reth_provider::{ providers::ConsistentDbView, BlockNumReader, BlockReader, DBProvider, DatabaseProviderFactory, ExecutionOutcome, HashedPostStateProvider, ProviderError, StateCommitmentProvider, @@ -47,6 +49,7 @@ use reth_trie_db::{DatabaseHashedPostState, StateCommitment}; use reth_trie_parallel::root::{ParallelStateRoot, ParallelStateRootError}; use state::TreeState; use std::{ + borrow::Cow, fmt::Debug, sync::{ mpsc::{Receiver, RecvError, RecvTimeoutError, Sender}, @@ -62,6 +65,8 @@ use tracing::*; mod block_buffer; mod cached_state; +#[cfg(test)] +mod e2e_tests; pub mod error; mod instrumented_state; mod invalid_block_hook; @@ -70,6 +75,8 @@ mod metrics; mod payload_processor; mod persistence_state; pub mod precompile_cache; +#[cfg(test)] +mod tests; // TODO(alexey): compare trie updates in `insert_block_inner` #[expect(unused)] mod trie_updates; @@ -564,6 +571,10 @@ where } }; + let num_hash = block.num_hash(); + let engine_event = BeaconConsensusEngineEvent::BlockReceived(num_hash); + self.emit_event(EngineApiEvent::BeaconConsensus(engine_event)); + let block_hash = block.hash(); let mut lowest_buffered_ancestor = self.lowest_buffered_ancestor_or(block_hash); if lowest_buffered_ancestor == block_hash { @@ -632,6 +643,7 @@ where fn on_new_head(&self, new_head: B256) -> ProviderResult>> { // get the executed new head block let Some(new_head_block) = self.state.tree_state.blocks_by_hash.get(&new_head) else { + debug!(target: "engine::tree", new_head=?new_head, "New head block not found in inmemory tree state"); return Ok(None) }; @@ -724,11 +736,16 @@ where /// extension of the canonical chain. /// * walking back from the current head to verify that the target hash is not already part of /// the canonical chain. - fn is_fork(&self, target_hash: B256) -> ProviderResult { + /// + /// The header is required as an arg, because we might be checking that the header is a fork + /// block before it's in the tree state and before it's in the database. + fn is_fork(&self, target_header: &SealedHeader) -> ProviderResult { + let target_hash = target_header.hash(); // verify that the given hash is not part of an extension of the canon chain. let canonical_head = self.state.tree_state.canonical_head(); - let mut current_hash = target_hash; - while let Some(current_block) = self.sealed_header_by_hash(current_hash)? { + let mut current_hash; + let mut current_block = Cow::Borrowed(target_header); + loop { if current_block.hash() == canonical_head.hash { return Ok(false) } @@ -737,6 +754,9 @@ where break } current_hash = current_block.parent_hash(); + + let Some(next_block) = self.sealed_header_by_hash(current_hash)? else { break }; + current_block = Cow::Owned(next_block); } // verify that the given hash is not already part of canonical chain stored in memory @@ -752,6 +772,26 @@ where Ok(true) } + /// Check if the given block has any ancestors with missing trie updates. + fn has_ancestors_with_missing_trie_updates( + &self, + target_header: &SealedHeader, + ) -> bool { + // Walk back through the chain starting from the parent of the target block + let mut current_hash = target_header.parent_hash(); + while let Some(block) = self.state.tree_state.blocks_by_hash.get(¤t_hash) { + // Check if this block is missing trie updates + if block.trie.is_missing() { + return true; + } + + // Move to the parent block + current_hash = block.recovered_block().parent_hash(); + } + + false + } + /// Returns the persisting kind for the input block. fn persisting_kind_for(&self, block: &N::BlockHeader) -> PersistingKind { // Check that we're currently persisting. @@ -850,7 +890,10 @@ where // For OpStack the proposers are allowed to reorg their own chain at will, so we need to // always trigger a new payload job if requested. - if self.engine_kind.is_opstack() { + // Also allow forcing this behavior via a config flag. + if self.engine_kind.is_opstack() || + self.config.always_process_payload_attributes_on_canonical_head() + { if let Some(attr) = attrs { debug!(target: "engine::tree", head = canonical_header.number(), "handling payload attributes for canonical head"); let updated = @@ -1018,7 +1061,7 @@ where if let Some(new_tip_num) = self.find_disk_reorg()? { self.remove_blocks(new_tip_num) } else if self.should_persist() { - let blocks_to_persist = self.get_canonical_blocks_to_persist(); + let blocks_to_persist = self.get_canonical_blocks_to_persist()?; self.persist_blocks(blocks_to_persist); } } @@ -1345,9 +1388,20 @@ where } /// Returns a batch of consecutive canonical blocks to persist in the range - /// `(last_persisted_number .. canonical_head - threshold]` . The expected + /// `(last_persisted_number .. canonical_head - threshold]`. The expected /// order is oldest -> newest. - fn get_canonical_blocks_to_persist(&self) -> Vec> { + /// + /// For those blocks that didn't have the trie updates calculated, runs the state root + /// calculation, and saves the trie updates. + /// + /// Returns an error if the state root calculation fails. + fn get_canonical_blocks_to_persist( + &mut self, + ) -> Result>, AdvancePersistenceError> { + // We will calculate the state root using the database, so we need to be sure there are no + // changes + debug_assert!(!self.persistence_state.in_progress()); + let mut blocks_to_persist = Vec::new(); let mut current_hash = self.state.tree_state.canonical_block_hash(); let last_persisted_number = self.persistence_state.last_persisted_block.number; @@ -1370,10 +1424,51 @@ where current_hash = block.recovered_block().parent_hash(); } - // reverse the order so that the oldest block comes first + // Reverse the order so that the oldest block comes first blocks_to_persist.reverse(); - blocks_to_persist + // Calculate missing trie updates + for block in &mut blocks_to_persist { + if block.trie.is_present() { + continue + } + + debug!( + target: "engine::tree", + block = ?block.recovered_block().num_hash(), + "Calculating trie updates before persisting" + ); + + let provider = self + .state_provider_builder(block.recovered_block().parent_hash())? + .ok_or(AdvancePersistenceError::MissingAncestor( + block.recovered_block().parent_hash(), + ))? + .build()?; + + let mut trie_input = self.compute_trie_input( + self.persisting_kind_for(block.recovered_block().header()), + self.provider.database_provider_ro()?, + block.recovered_block().parent_hash(), + )?; + // Extend with block we are generating trie updates for. + trie_input.append_ref(block.hashed_state()); + let (_root, updates) = provider.state_root_from_nodes_with_updates(trie_input)?; + debug_assert_eq!(_root, block.recovered_block().state_root()); + + // Update trie updates in both tree state and blocks to persist that we return + let trie_updates = Arc::new(updates); + let tree_state_block = self + .state + .tree_state + .blocks_by_hash + .get_mut(&block.recovered_block().hash()) + .expect("blocks to persist are constructed from tree state blocks"); + tree_state_block.trie.set_present(trie_updates.clone()); + block.trie.set_present(trie_updates); + } + + Ok(blocks_to_persist) } /// This clears the blocks from the in-memory tree state that have been persisted to the @@ -1825,7 +1920,10 @@ where .persisted_trie_updates .get(&block.recovered_block.hash()) .cloned()?; - Some(ExecutedBlockWithTrieUpdates { block: block.clone(), trie }) + Some(ExecutedBlockWithTrieUpdates { + block: block.clone(), + trie: ExecutedTrieUpdates::Present(trie), + }) }) .collect::>(); self.reinsert_reorged_blocks(old); @@ -2074,12 +2172,25 @@ where // // See https://github.com/paradigmxyz/reth/issues/12688 for more details let persisting_kind = self.persisting_kind_for(block.header()); - let run_parallel_state_root = persisting_kind.can_run_parallel_state_root(); + // don't run parallel if state root fallback is set + let run_parallel_state_root = + persisting_kind.can_run_parallel_state_root() && !self.config.state_root_fallback(); + + // Use state root task only if: + // 1. No persistence is in progress + // 2. Config allows it + // 3. No ancestors with missing trie updates. If any exist, it will mean that every state + // root task proof calculation will include a lot of unrelated paths in the prefix sets. + // It's cheaper to run a parallel state root that does one walk over trie tables while + // accounting for the prefix sets. + let mut use_state_root_task = run_parallel_state_root && + self.config.use_state_root_task() && + !self.has_ancestors_with_missing_trie_updates(block.sealed_header()); // use prewarming background task let header = block.clone_sealed_header(); let txs = block.clone_transactions_recovered().collect(); - let mut handle = if run_parallel_state_root && self.config.use_state_root_task() { + let mut handle = if use_state_root_task { // use background tasks for state root calc let consistent_view = ensure_ok!(ConsistentDbView::new_with_latest_tip(self.provider.clone())); @@ -2088,7 +2199,7 @@ where let trie_input_start = Instant::now(); let res = self.compute_trie_input( persisting_kind, - consistent_view.clone(), + ensure_ok!(consistent_view.provider_ro()), block.header().parent_hash(), ); let trie_input = match res { @@ -2101,14 +2212,22 @@ where .trie_input_duration .record(trie_input_start.elapsed().as_secs_f64()); - self.payload_processor.spawn( - header, - txs, - provider_builder, - consistent_view, - trie_input, - &self.config, - ) + // Use state root task only if prefix sets are empty, otherwise proof generation is too + // expensive because it requires walking over the paths in the prefix set in every + // proof. + if trie_input.prefix_sets.is_empty() { + self.payload_processor.spawn( + header, + txs, + provider_builder, + consistent_view, + trie_input, + &self.config, + ) + } else { + use_state_root_task = false; + self.payload_processor.spawn_cache_exclusive(header, txs, provider_builder) + } } else { self.payload_processor.spawn_cache_exclusive(header, txs, provider_builder) }; @@ -2162,7 +2281,7 @@ where if run_parallel_state_root { // if we new payload extends the current canonical change we attempt to use the // background task or try to compute it in parallel - if self.config.use_state_root_task() { + if use_state_root_task { match handle.state_root() { Ok(StateRootComputeOutcome { state_root, trie_updates }) => { let elapsed = execution_finish.elapsed(); @@ -2212,8 +2331,13 @@ where maybe_state_root } else { // fallback is to compute the state root regularly in sync - warn!(target: "engine::tree", block=?block_num_hash, ?persisting_kind, "Failed to compute state root in parallel"); - self.metrics.block_validation.state_root_parallel_fallback_total.increment(1); + if self.config.state_root_fallback() { + debug!(target: "engine::tree", block=?block_num_hash, "Using state root fallback for testing"); + } else { + warn!(target: "engine::tree", block=?block_num_hash, ?persisting_kind, "Failed to compute state root in parallel"); + self.metrics.block_validation.state_root_parallel_fallback_total.increment(1); + } + let (root, updates) = ensure_ok!(state_provider.state_root_with_updates(hashed_state.clone())); (root, updates, root_time.elapsed()) @@ -2223,28 +2347,37 @@ where debug!(target: "engine::tree", ?root_elapsed, block=?block_num_hash, "Calculated state root"); // ensure state root matches - self.consensus - .validate_state_root(block.header(), state_root) - .inspect_err(|_| { - self.on_invalid_block( - &parent_block, - &block, - &output, - Some((&trie_output, state_root)), - ); - }) - .map_err(|err| (err.into(), block.clone()))?; + if state_root != block.header().state_root() { + // call post-block hook + self.on_invalid_block(&parent_block, &block, &output, Some((&trie_output, state_root))); + return Err(( + ConsensusError::BodyStateRootDiff( + GotExpected { got: state_root, expected: block.header().state_root() }.into(), + ) + .into(), + block, + )) + } // terminate prewarming task with good state output handle.terminate_caching(Some(output.state.clone())); + let is_fork = ensure_ok!(self.is_fork(block.sealed_header())); + + // If the block is a fork, we don't save the trie updates, because they may be incorrect. + // Instead, they will be recomputed on persistence. + let trie_updates = if is_fork { + ExecutedTrieUpdates::Missing + } else { + ExecutedTrieUpdates::Present(Arc::new(trie_output)) + }; let executed: ExecutedBlockWithTrieUpdates = ExecutedBlockWithTrieUpdates { block: ExecutedBlock { recovered_block: Arc::new(block), execution_output: Arc::new(ExecutionOutcome::from((output, block_num_hash.number))), hashed_state: Arc::new(hashed_state), }, - trie: Arc::new(trie_output), + trie: trie_updates, }; // if the parent is the canonical head, we can insert the block as the pending block @@ -2259,10 +2392,6 @@ where // emit insert event let elapsed = start.elapsed(); - let is_fork = match self.is_fork(block_num_hash.hash) { - Ok(val) => val, - Err(e) => return Err((e.into(), executed.block.recovered_block().clone())), - }; let engine_event = if is_fork { BeaconConsensusEngineEvent::ForkBlockAdded(executed, elapsed) } else { @@ -2289,7 +2418,7 @@ where .build(); let mut executor = self.evm_config.executor_for_block(&mut db, block); - if self.config.precompile_cache_enabled() { + if !self.config.precompile_cache_disabled() { executor.evm_mut().precompiles_mut().map_precompiles(|address, precompile| { CachedPrecompile::wrap( precompile, @@ -2328,7 +2457,7 @@ where let consistent_view = ConsistentDbView::new_with_latest_tip(self.provider.clone())?; let mut input = - self.compute_trie_input(persisting_kind, consistent_view.clone(), parent_hash)?; + self.compute_trie_input(persisting_kind, consistent_view.provider_ro()?, parent_hash)?; // Extend with block we are validating root for. input.append_ref(hashed_state); @@ -2350,15 +2479,14 @@ where /// block. /// 3. Once in-memory blocks are collected and optionally filtered, we compute the /// [`HashedPostState`] from them. - fn compute_trie_input( + fn compute_trie_input( &self, persisting_kind: PersistingKind, - consistent_view: ConsistentDbView

, + provider: TP, parent_hash: B256, - ) -> Result { + ) -> ProviderResult { let mut input = TrieInput::default(); - let provider = consistent_view.provider_ro()?; let best_block_number = provider.best_block_number()?; let (mut historical, mut blocks) = self @@ -2429,9 +2557,9 @@ where input.append(revert_state); // Extend with contents of parent in-memory blocks. - for block in blocks.iter().rev() { - input.append_cached_ref(block.trie_updates(), block.hashed_state()) - } + input.extend_with_blocks( + blocks.iter().rev().map(|block| (block.hashed_state(), block.trie_updates())), + ); Ok(input) } @@ -2774,1415 +2902,3 @@ impl PersistingKind { matches!(self, Self::PersistingDescendant) } } -#[cfg(test)] -mod tests { - use super::*; - use crate::persistence::PersistenceAction; - use alloy_consensus::Header; - use alloy_primitives::{ - map::{HashMap, HashSet}, - Bytes, B256, - }; - use alloy_rlp::Decodable; - use alloy_rpc_types_engine::{ - CancunPayloadFields, ExecutionData, ExecutionPayloadSidecar, ExecutionPayloadV1, - ExecutionPayloadV3, - }; - use assert_matches::assert_matches; - use reth_chain_state::{test_utils::TestBlockBuilder, BlockState}; - use reth_chainspec::{ChainSpec, HOLESKY, MAINNET}; - use reth_engine_primitives::ForkchoiceStatus; - use reth_ethereum_consensus::EthBeaconConsensus; - use reth_ethereum_engine_primitives::EthEngineTypes; - use reth_ethereum_primitives::{Block, EthPrimitives}; - use reth_evm_ethereum::MockEvmConfig; - use reth_node_ethereum::EthereumEngineValidator; - use reth_primitives_traits::Block as _; - use reth_provider::test_utils::MockEthProvider; - use reth_trie::{updates::TrieUpdates, HashedPostState}; - use std::{ - collections::BTreeMap, - str::FromStr, - sync::mpsc::{channel, Sender}, - }; - - /// This is a test channel that allows you to `release` any value that is in the channel. - /// - /// If nothing has been sent, then the next value will be immediately sent. - struct TestChannel { - /// If an item is sent to this channel, an item will be released in the wrapped channel - release: Receiver<()>, - /// The sender channel - tx: Sender, - /// The receiver channel - rx: Receiver, - } - - impl TestChannel { - /// Creates a new test channel - fn spawn_channel() -> (Sender, Receiver, TestChannelHandle) { - let (original_tx, original_rx) = channel(); - let (wrapped_tx, wrapped_rx) = channel(); - let (release_tx, release_rx) = channel(); - let handle = TestChannelHandle::new(release_tx); - let test_channel = Self { release: release_rx, tx: wrapped_tx, rx: original_rx }; - // spawn the task that listens and releases stuff - std::thread::spawn(move || test_channel.intercept_loop()); - (original_tx, wrapped_rx, handle) - } - - /// Runs the intercept loop, waiting for the handle to release a value - fn intercept_loop(&self) { - while self.release.recv() == Ok(()) { - let Ok(value) = self.rx.recv() else { return }; - - let _ = self.tx.send(value); - } - } - } - - struct TestChannelHandle { - /// The sender to use for releasing values - release: Sender<()>, - } - - impl TestChannelHandle { - /// Returns a [`TestChannelHandle`] - const fn new(release: Sender<()>) -> Self { - Self { release } - } - - /// Signals to the channel task that a value should be released - #[expect(dead_code)] - fn release(&self) { - let _ = self.release.send(()); - } - } - - struct TestHarness { - tree: EngineApiTreeHandler< - EthPrimitives, - MockEthProvider, - EthEngineTypes, - EthereumEngineValidator, - MockEvmConfig, - >, - to_tree_tx: Sender, Block>>, - from_tree_rx: UnboundedReceiver, - blocks: Vec, - action_rx: Receiver, - evm_config: MockEvmConfig, - block_builder: TestBlockBuilder, - provider: MockEthProvider, - } - - impl TestHarness { - fn new(chain_spec: Arc) -> Self { - let (action_tx, action_rx) = channel(); - Self::with_persistence_channel(chain_spec, action_tx, action_rx) - } - - #[expect(dead_code)] - fn with_test_channel(chain_spec: Arc) -> (Self, TestChannelHandle) { - let (action_tx, action_rx, handle) = TestChannel::spawn_channel(); - (Self::with_persistence_channel(chain_spec, action_tx, action_rx), handle) - } - - fn with_persistence_channel( - chain_spec: Arc, - action_tx: Sender, - action_rx: Receiver, - ) -> Self { - let persistence_handle = PersistenceHandle::new(action_tx); - - let consensus = Arc::new(EthBeaconConsensus::new(chain_spec.clone())); - - let provider = MockEthProvider::default(); - - let payload_validator = EthereumEngineValidator::new(chain_spec.clone()); - - let (from_tree_tx, from_tree_rx) = unbounded_channel(); - - let header = chain_spec.genesis_header().clone(); - let header = SealedHeader::seal_slow(header); - let engine_api_tree_state = - EngineApiTreeState::new(10, 10, header.num_hash(), EngineApiKind::Ethereum); - let canonical_in_memory_state = CanonicalInMemoryState::with_head(header, None, None); - - let (to_payload_service, _payload_command_rx) = unbounded_channel(); - let payload_builder = PayloadBuilderHandle::new(to_payload_service); - - let evm_config = MockEvmConfig::default(); - - let tree = EngineApiTreeHandler::new( - provider.clone(), - consensus, - payload_validator, - from_tree_tx, - engine_api_tree_state, - canonical_in_memory_state, - persistence_handle, - PersistenceState::default(), - payload_builder, - // TODO: fix tests for state root task https://github.com/paradigmxyz/reth/issues/14376 - // always assume enough parallelism for tests - TreeConfig::default() - .with_legacy_state_root(true) - .with_has_enough_parallelism(true), - EngineApiKind::Ethereum, - evm_config.clone(), - ); - - let block_builder = TestBlockBuilder::default().with_chain_spec((*chain_spec).clone()); - Self { - to_tree_tx: tree.incoming_tx.clone(), - tree, - from_tree_rx, - blocks: vec![], - action_rx, - evm_config, - block_builder, - provider, - } - } - - fn with_blocks(mut self, blocks: Vec) -> Self { - let mut blocks_by_hash = HashMap::default(); - let mut blocks_by_number = BTreeMap::new(); - let mut state_by_hash = HashMap::default(); - let mut hash_by_number = BTreeMap::new(); - let mut parent_to_child: HashMap> = HashMap::default(); - let mut parent_hash = B256::ZERO; - - for block in &blocks { - let sealed_block = block.recovered_block(); - let hash = sealed_block.hash(); - let number = sealed_block.number; - blocks_by_hash.insert(hash, block.clone()); - blocks_by_number.entry(number).or_insert_with(Vec::new).push(block.clone()); - state_by_hash.insert(hash, Arc::new(BlockState::new(block.clone()))); - hash_by_number.insert(number, hash); - parent_to_child.entry(parent_hash).or_default().insert(hash); - parent_hash = hash; - } - - self.tree.state.tree_state = TreeState { - blocks_by_hash, - blocks_by_number, - current_canonical_head: blocks.last().unwrap().recovered_block().num_hash(), - parent_to_child, - persisted_trie_updates: HashMap::default(), - engine_kind: EngineApiKind::Ethereum, - }; - - let last_executed_block = blocks.last().unwrap().clone(); - let pending = Some(BlockState::new(last_executed_block)); - self.tree.canonical_in_memory_state = - CanonicalInMemoryState::new(state_by_hash, hash_by_number, pending, None, None); - - self.blocks = blocks.clone(); - - let recovered_blocks = - blocks.iter().map(|b| b.recovered_block().clone()).collect::>(); - - self.persist_blocks(recovered_blocks); - - self - } - - const fn with_backfill_state(mut self, state: BackfillSyncState) -> Self { - self.tree.backfill_sync_state = state; - self - } - - fn extend_execution_outcome( - &self, - execution_outcomes: impl IntoIterator>, - ) { - self.evm_config.extend(execution_outcomes); - } - - fn insert_block( - &mut self, - block: RecoveredBlock, - ) -> Result> { - let execution_outcome = self.block_builder.get_execution_outcome(block.clone()); - self.extend_execution_outcome([execution_outcome]); - self.tree.provider.add_state_root(block.state_root); - self.tree.insert_block(block) - } - - async fn fcu_to(&mut self, block_hash: B256, fcu_status: impl Into) { - let fcu_status = fcu_status.into(); - - self.send_fcu(block_hash, fcu_status).await; - - self.check_fcu(block_hash, fcu_status).await; - } - - async fn send_fcu(&mut self, block_hash: B256, fcu_status: impl Into) { - let fcu_state = self.fcu_state(block_hash); - - let (tx, rx) = oneshot::channel(); - self.tree - .on_engine_message(FromEngine::Request( - BeaconEngineMessage::ForkchoiceUpdated { - state: fcu_state, - payload_attrs: None, - tx, - version: EngineApiMessageVersion::default(), - } - .into(), - )) - .unwrap(); - - let response = rx.await.unwrap().unwrap().await.unwrap(); - match fcu_status.into() { - ForkchoiceStatus::Valid => assert!(response.payload_status.is_valid()), - ForkchoiceStatus::Syncing => assert!(response.payload_status.is_syncing()), - ForkchoiceStatus::Invalid => assert!(response.payload_status.is_invalid()), - } - } - - async fn check_fcu(&mut self, block_hash: B256, fcu_status: impl Into) { - let fcu_state = self.fcu_state(block_hash); - - // check for ForkchoiceUpdated event - let event = self.from_tree_rx.recv().await.unwrap(); - match event { - EngineApiEvent::BeaconConsensus(BeaconConsensusEngineEvent::ForkchoiceUpdated( - state, - status, - )) => { - assert_eq!(state, fcu_state); - assert_eq!(status, fcu_status.into()); - } - _ => panic!("Unexpected event: {event:#?}"), - } - } - - const fn fcu_state(&self, block_hash: B256) -> ForkchoiceState { - ForkchoiceState { - head_block_hash: block_hash, - safe_block_hash: block_hash, - finalized_block_hash: block_hash, - } - } - - async fn send_new_payload( - &mut self, - block: RecoveredBlock, - ) { - let payload = ExecutionPayloadV3::from_block_unchecked( - block.hash(), - &block.clone_sealed_block().into_block(), - ); - self.tree - .on_new_payload(ExecutionData { - payload: payload.into(), - sidecar: ExecutionPayloadSidecar::v3(CancunPayloadFields { - parent_beacon_block_root: block.parent_beacon_block_root.unwrap(), - versioned_hashes: vec![], - }), - }) - .unwrap(); - } - - async fn insert_chain( - &mut self, - chain: impl IntoIterator> + Clone, - ) { - for block in chain.clone() { - self.insert_block(block.clone()).unwrap(); - } - self.check_canon_chain_insertion(chain).await; - } - - async fn check_canon_commit(&mut self, hash: B256) { - let event = self.from_tree_rx.recv().await.unwrap(); - match event { - EngineApiEvent::BeaconConsensus( - BeaconConsensusEngineEvent::CanonicalChainCommitted(header, _), - ) => { - assert_eq!(header.hash(), hash); - } - _ => panic!("Unexpected event: {event:#?}"), - } - } - - async fn check_fork_chain_insertion( - &mut self, - chain: impl IntoIterator> + Clone, - ) { - for block in chain { - self.check_fork_block_added(block.hash()).await; - } - } - - async fn check_canon_chain_insertion( - &mut self, - chain: impl IntoIterator> + Clone, - ) { - for block in chain.clone() { - self.check_canon_block_added(block.hash()).await; - } - } - - async fn check_canon_block_added(&mut self, expected_hash: B256) { - let event = self.from_tree_rx.recv().await.unwrap(); - match event { - EngineApiEvent::BeaconConsensus( - BeaconConsensusEngineEvent::CanonicalBlockAdded(executed, _), - ) => { - assert_eq!(executed.recovered_block.hash(), expected_hash); - } - _ => panic!("Unexpected event: {event:#?}"), - } - } - - async fn check_fork_block_added(&mut self, expected_hash: B256) { - let event = self.from_tree_rx.recv().await.unwrap(); - match event { - EngineApiEvent::BeaconConsensus(BeaconConsensusEngineEvent::ForkBlockAdded( - executed, - _, - )) => { - assert_eq!(executed.recovered_block.hash(), expected_hash); - } - _ => panic!("Unexpected event: {event:#?}"), - } - } - - async fn check_invalid_block(&mut self, expected_hash: B256) { - let event = self.from_tree_rx.recv().await.unwrap(); - match event { - EngineApiEvent::BeaconConsensus(BeaconConsensusEngineEvent::InvalidBlock( - block, - )) => { - assert_eq!(block.hash(), expected_hash); - } - _ => panic!("Unexpected event: {event:#?}"), - } - } - - fn persist_blocks(&self, blocks: Vec>) { - let mut block_data: Vec<(B256, Block)> = Vec::with_capacity(blocks.len()); - let mut headers_data: Vec<(B256, Header)> = Vec::with_capacity(blocks.len()); - - for block in &blocks { - block_data.push((block.hash(), block.clone_block())); - headers_data.push((block.hash(), block.header().clone())); - } - - self.provider.extend_blocks(block_data); - self.provider.extend_headers(headers_data); - } - - fn setup_range_insertion_for_valid_chain( - &mut self, - chain: Vec>, - ) { - self.setup_range_insertion_for_chain(chain, None) - } - - fn setup_range_insertion_for_invalid_chain( - &mut self, - chain: Vec>, - index: usize, - ) { - self.setup_range_insertion_for_chain(chain, Some(index)) - } - - fn setup_range_insertion_for_chain( - &mut self, - chain: Vec>, - invalid_index: Option, - ) { - // setting up execution outcomes for the chain, the blocks will be - // executed starting from the oldest, so we need to reverse. - let mut chain_rev = chain; - chain_rev.reverse(); - - let mut execution_outcomes = Vec::with_capacity(chain_rev.len()); - for (index, block) in chain_rev.iter().enumerate() { - let execution_outcome = self.block_builder.get_execution_outcome(block.clone()); - let state_root = if invalid_index.is_some() && invalid_index.unwrap() == index { - B256::random() - } else { - block.state_root - }; - self.tree.provider.add_state_root(state_root); - execution_outcomes.push(execution_outcome); - } - self.extend_execution_outcome(execution_outcomes); - } - - fn check_canon_head(&self, head_hash: B256) { - assert_eq!(self.tree.state.tree_state.canonical_head().hash, head_hash); - } - } - - #[test] - fn test_tree_persist_block_batch() { - let tree_config = TreeConfig::default(); - let chain_spec = MAINNET.clone(); - let mut test_block_builder = TestBlockBuilder::eth().with_chain_spec((*chain_spec).clone()); - - // we need more than tree_config.persistence_threshold() +1 blocks to - // trigger the persistence task. - let blocks: Vec<_> = test_block_builder - .get_executed_blocks(1..tree_config.persistence_threshold() + 2) - .collect(); - let mut test_harness = TestHarness::new(chain_spec).with_blocks(blocks); - - let mut blocks = vec![]; - for idx in 0..tree_config.max_execute_block_batch_size() * 2 { - blocks.push(test_block_builder.generate_random_block(idx as u64, B256::random())); - } - - test_harness.to_tree_tx.send(FromEngine::DownloadedBlocks(blocks)).unwrap(); - - // process the message - let msg = test_harness.tree.try_recv_engine_message().unwrap().unwrap(); - test_harness.tree.on_engine_message(msg).unwrap(); - - // we now should receive the other batch - let msg = test_harness.tree.try_recv_engine_message().unwrap().unwrap(); - match msg { - FromEngine::DownloadedBlocks(blocks) => { - assert_eq!(blocks.len(), tree_config.max_execute_block_batch_size()); - } - _ => panic!("unexpected message: {msg:#?}"), - } - } - - #[tokio::test] - async fn test_tree_persist_blocks() { - let tree_config = TreeConfig::default(); - let chain_spec = MAINNET.clone(); - let mut test_block_builder = TestBlockBuilder::eth().with_chain_spec((*chain_spec).clone()); - - // we need more than tree_config.persistence_threshold() +1 blocks to - // trigger the persistence task. - let blocks: Vec<_> = test_block_builder - .get_executed_blocks(1..tree_config.persistence_threshold() + 2) - .collect(); - let test_harness = TestHarness::new(chain_spec).with_blocks(blocks.clone()); - std::thread::Builder::new() - .name("Tree Task".to_string()) - .spawn(|| test_harness.tree.run()) - .unwrap(); - - // send a message to the tree to enter the main loop. - test_harness.to_tree_tx.send(FromEngine::DownloadedBlocks(vec![])).unwrap(); - - let received_action = - test_harness.action_rx.recv().expect("Failed to receive save blocks action"); - if let PersistenceAction::SaveBlocks(saved_blocks, _) = received_action { - // only blocks.len() - tree_config.memory_block_buffer_target() will be - // persisted - let expected_persist_len = - blocks.len() - tree_config.memory_block_buffer_target() as usize; - assert_eq!(saved_blocks.len(), expected_persist_len); - assert_eq!(saved_blocks, blocks[..expected_persist_len]); - } else { - panic!("unexpected action received {received_action:?}"); - } - } - - #[tokio::test] - async fn test_in_memory_state_trait_impl() { - let blocks: Vec<_> = TestBlockBuilder::eth().get_executed_blocks(0..10).collect(); - let test_harness = TestHarness::new(MAINNET.clone()).with_blocks(blocks.clone()); - - for executed_block in blocks { - let sealed_block = executed_block.recovered_block(); - - let expected_state = BlockState::new(executed_block.clone()); - - let actual_state_by_hash = test_harness - .tree - .canonical_in_memory_state - .state_by_hash(sealed_block.hash()) - .unwrap(); - assert_eq!(expected_state, *actual_state_by_hash); - - let actual_state_by_number = test_harness - .tree - .canonical_in_memory_state - .state_by_number(sealed_block.number) - .unwrap(); - assert_eq!(expected_state, *actual_state_by_number); - } - } - - #[tokio::test] - async fn test_engine_request_during_backfill() { - let tree_config = TreeConfig::default(); - let blocks: Vec<_> = TestBlockBuilder::eth() - .get_executed_blocks(0..tree_config.persistence_threshold()) - .collect(); - let mut test_harness = TestHarness::new(MAINNET.clone()) - .with_blocks(blocks) - .with_backfill_state(BackfillSyncState::Active); - - let (tx, rx) = oneshot::channel(); - test_harness - .tree - .on_engine_message(FromEngine::Request( - BeaconEngineMessage::ForkchoiceUpdated { - state: ForkchoiceState { - head_block_hash: B256::random(), - safe_block_hash: B256::random(), - finalized_block_hash: B256::random(), - }, - payload_attrs: None, - tx, - version: EngineApiMessageVersion::default(), - } - .into(), - )) - .unwrap(); - - let resp = rx.await.unwrap().unwrap().await.unwrap(); - assert!(resp.payload_status.is_syncing()); - } - - #[test] - fn test_disconnected_payload() { - let s = include_str!("../../test-data/holesky/2.rlp"); - let data = Bytes::from_str(s).unwrap(); - let block = Block::decode(&mut data.as_ref()).unwrap(); - let sealed = block.seal_slow(); - let hash = sealed.hash(); - let payload = ExecutionPayloadV1::from_block_unchecked(hash, &sealed.clone().into_block()); - - let mut test_harness = TestHarness::new(HOLESKY.clone()); - - let outcome = test_harness - .tree - .on_new_payload(ExecutionData { - payload: payload.into(), - sidecar: ExecutionPayloadSidecar::none(), - }) - .unwrap(); - assert!(outcome.outcome.is_syncing()); - - // ensure block is buffered - let buffered = test_harness.tree.state.buffer.block(&hash).unwrap(); - assert_eq!(buffered.clone_sealed_block(), sealed); - } - - #[test] - fn test_disconnected_block() { - let s = include_str!("../../test-data/holesky/2.rlp"); - let data = Bytes::from_str(s).unwrap(); - let block = Block::decode(&mut data.as_ref()).unwrap(); - let sealed = block.seal_slow().try_recover().unwrap(); - - let mut test_harness = TestHarness::new(HOLESKY.clone()); - - let outcome = test_harness.tree.insert_block(sealed.clone()).unwrap(); - assert_eq!( - outcome, - InsertPayloadOk::Inserted(BlockStatus::Disconnected { - head: test_harness.tree.state.tree_state.current_canonical_head, - missing_ancestor: sealed.parent_num_hash() - }) - ); - } - - #[tokio::test] - async fn test_holesky_payload() { - let s = include_str!("../../test-data/holesky/1.rlp"); - let data = Bytes::from_str(s).unwrap(); - let block: Block = Block::decode(&mut data.as_ref()).unwrap(); - let sealed = block.seal_slow(); - let payload = - ExecutionPayloadV1::from_block_unchecked(sealed.hash(), &sealed.clone().into_block()); - - let mut test_harness = - TestHarness::new(HOLESKY.clone()).with_backfill_state(BackfillSyncState::Active); - - let (tx, rx) = oneshot::channel(); - test_harness - .tree - .on_engine_message(FromEngine::Request( - BeaconEngineMessage::NewPayload { - payload: ExecutionData { - payload: payload.clone().into(), - sidecar: ExecutionPayloadSidecar::none(), - }, - tx, - } - .into(), - )) - .unwrap(); - - let resp = rx.await.unwrap().unwrap(); - assert!(resp.is_syncing()); - } - - #[tokio::test] - async fn test_tree_state_on_new_head_reorg() { - reth_tracing::init_test_tracing(); - let chain_spec = MAINNET.clone(); - - // Set persistence_threshold to 1 - let mut test_harness = TestHarness::new(chain_spec); - test_harness.tree.config = test_harness - .tree - .config - .with_persistence_threshold(1) - .with_memory_block_buffer_target(1); - let mut test_block_builder = TestBlockBuilder::eth(); - let blocks: Vec<_> = test_block_builder.get_executed_blocks(1..6).collect(); - - for block in &blocks { - test_harness.tree.state.tree_state.insert_executed(block.clone()); - } - - // set block 3 as the current canonical head - test_harness - .tree - .state - .tree_state - .set_canonical_head(blocks[2].recovered_block().num_hash()); - - // create a fork from block 2 - let fork_block_3 = test_block_builder - .get_executed_block_with_number(3, blocks[1].recovered_block().hash()); - let fork_block_4 = test_block_builder - .get_executed_block_with_number(4, fork_block_3.recovered_block().hash()); - let fork_block_5 = test_block_builder - .get_executed_block_with_number(5, fork_block_4.recovered_block().hash()); - - test_harness.tree.state.tree_state.insert_executed(fork_block_3.clone()); - test_harness.tree.state.tree_state.insert_executed(fork_block_4.clone()); - test_harness.tree.state.tree_state.insert_executed(fork_block_5.clone()); - - // normal (non-reorg) case - let result = test_harness.tree.on_new_head(blocks[4].recovered_block().hash()).unwrap(); - assert!(matches!(result, Some(NewCanonicalChain::Commit { .. }))); - if let Some(NewCanonicalChain::Commit { new }) = result { - assert_eq!(new.len(), 2); - assert_eq!(new[0].recovered_block().hash(), blocks[3].recovered_block().hash()); - assert_eq!(new[1].recovered_block().hash(), blocks[4].recovered_block().hash()); - } - - // should be a None persistence action before we advance persistence - let current_action = test_harness.tree.persistence_state.current_action(); - assert_eq!(current_action, None); - - // let's attempt to persist and check that it attempts to save blocks - // - // since in-memory block buffer target and persistence_threshold are both 1, this should - // save all but the current tip of the canonical chain (up to blocks[1]) - test_harness.tree.advance_persistence().unwrap(); - let current_action = test_harness.tree.persistence_state.current_action().cloned(); - assert_eq!( - current_action, - Some(CurrentPersistenceAction::SavingBlocks { - highest: blocks[1].recovered_block().num_hash() - }) - ); - - // get rid of the prev action - let received_action = test_harness.action_rx.recv().unwrap(); - let PersistenceAction::SaveBlocks(saved_blocks, sender) = received_action else { - panic!("received wrong action"); - }; - assert_eq!(saved_blocks, vec![blocks[0].clone(), blocks[1].clone()]); - - // send the response so we can advance again - sender.send(Some(blocks[1].recovered_block().num_hash())).unwrap(); - - // we should be persisting blocks[1] because we threw out the prev action - let current_action = test_harness.tree.persistence_state.current_action().cloned(); - assert_eq!( - current_action, - Some(CurrentPersistenceAction::SavingBlocks { - highest: blocks[1].recovered_block().num_hash() - }) - ); - - // after advancing persistence, we should be at `None` for the next action - test_harness.tree.advance_persistence().unwrap(); - let current_action = test_harness.tree.persistence_state.current_action().cloned(); - assert_eq!(current_action, None); - - // reorg case - let result = test_harness.tree.on_new_head(fork_block_5.recovered_block().hash()).unwrap(); - assert!(matches!(result, Some(NewCanonicalChain::Reorg { .. }))); - - if let Some(NewCanonicalChain::Reorg { new, old }) = result { - assert_eq!(new.len(), 3); - assert_eq!(new[0].recovered_block().hash(), fork_block_3.recovered_block().hash()); - assert_eq!(new[1].recovered_block().hash(), fork_block_4.recovered_block().hash()); - assert_eq!(new[2].recovered_block().hash(), fork_block_5.recovered_block().hash()); - - assert_eq!(old.len(), 1); - assert_eq!(old[0].recovered_block().hash(), blocks[2].recovered_block().hash()); - } - - // The canonical block has not changed, so we will not get any active persistence action - test_harness.tree.advance_persistence().unwrap(); - let current_action = test_harness.tree.persistence_state.current_action().cloned(); - assert_eq!(current_action, None); - - // Let's change the canonical head and advance persistence - test_harness - .tree - .state - .tree_state - .set_canonical_head(fork_block_5.recovered_block().num_hash()); - - // The canonical block has changed now, we should get fork_block_4 due to the persistence - // threshold and in memory block buffer target - test_harness.tree.advance_persistence().unwrap(); - let current_action = test_harness.tree.persistence_state.current_action().cloned(); - assert_eq!( - current_action, - Some(CurrentPersistenceAction::SavingBlocks { - highest: fork_block_4.recovered_block().num_hash() - }) - ); - } - - #[test] - fn test_tree_state_on_new_head_deep_fork() { - reth_tracing::init_test_tracing(); - - let chain_spec = MAINNET.clone(); - let mut test_harness = TestHarness::new(chain_spec); - let mut test_block_builder = TestBlockBuilder::eth(); - - let blocks: Vec<_> = test_block_builder.get_executed_blocks(0..5).collect(); - - for block in &blocks { - test_harness.tree.state.tree_state.insert_executed(block.clone()); - } - - // set last block as the current canonical head - let last_block = blocks.last().unwrap().recovered_block().clone(); - - test_harness.tree.state.tree_state.set_canonical_head(last_block.num_hash()); - - // create a fork chain from last_block - let chain_a = test_block_builder.create_fork(&last_block, 10); - let chain_b = test_block_builder.create_fork(&last_block, 10); - - for block in &chain_a { - test_harness.tree.state.tree_state.insert_executed(ExecutedBlockWithTrieUpdates { - block: ExecutedBlock { - recovered_block: Arc::new(block.clone()), - execution_output: Arc::new(ExecutionOutcome::default()), - hashed_state: Arc::new(HashedPostState::default()), - }, - trie: Arc::new(TrieUpdates::default()), - }); - } - test_harness.tree.state.tree_state.set_canonical_head(chain_a.last().unwrap().num_hash()); - - for block in &chain_b { - test_harness.tree.state.tree_state.insert_executed(ExecutedBlockWithTrieUpdates { - block: ExecutedBlock { - recovered_block: Arc::new(block.clone()), - execution_output: Arc::new(ExecutionOutcome::default()), - hashed_state: Arc::new(HashedPostState::default()), - }, - trie: Arc::new(TrieUpdates::default()), - }); - } - - // for each block in chain_b, reorg to it and then back to canonical - let mut expected_new = Vec::new(); - for block in &chain_b { - // reorg to chain from block b - let result = test_harness.tree.on_new_head(block.hash()).unwrap(); - assert_matches!(result, Some(NewCanonicalChain::Reorg { .. })); - - expected_new.push(block); - if let Some(NewCanonicalChain::Reorg { new, old }) = result { - assert_eq!(new.len(), expected_new.len()); - for (index, block) in expected_new.iter().enumerate() { - assert_eq!(new[index].recovered_block().hash(), block.hash()); - } - - assert_eq!(old.len(), chain_a.len()); - for (index, block) in chain_a.iter().enumerate() { - assert_eq!(old[index].recovered_block().hash(), block.hash()); - } - } - - // set last block of chain a as canonical head - test_harness.tree.on_new_head(chain_a.last().unwrap().hash()).unwrap(); - } - } - - #[tokio::test] - async fn test_get_canonical_blocks_to_persist() { - let chain_spec = MAINNET.clone(); - let mut test_harness = TestHarness::new(chain_spec); - let mut test_block_builder = TestBlockBuilder::eth(); - - let canonical_head_number = 9; - let blocks: Vec<_> = - test_block_builder.get_executed_blocks(0..canonical_head_number + 1).collect(); - test_harness = test_harness.with_blocks(blocks.clone()); - - let last_persisted_block_number = 3; - test_harness.tree.persistence_state.last_persisted_block = - blocks[last_persisted_block_number as usize].recovered_block.num_hash(); - - let persistence_threshold = 4; - let memory_block_buffer_target = 3; - test_harness.tree.config = TreeConfig::default() - .with_persistence_threshold(persistence_threshold) - .with_memory_block_buffer_target(memory_block_buffer_target); - - let blocks_to_persist = test_harness.tree.get_canonical_blocks_to_persist(); - - let expected_blocks_to_persist_length: usize = - (canonical_head_number - memory_block_buffer_target - last_persisted_block_number) - .try_into() - .unwrap(); - - assert_eq!(blocks_to_persist.len(), expected_blocks_to_persist_length); - for (i, item) in - blocks_to_persist.iter().enumerate().take(expected_blocks_to_persist_length) - { - assert_eq!(item.recovered_block().number, last_persisted_block_number + i as u64 + 1); - } - - // make sure only canonical blocks are included - let fork_block = test_block_builder.get_executed_block_with_number(4, B256::random()); - let fork_block_hash = fork_block.recovered_block().hash(); - test_harness.tree.state.tree_state.insert_executed(fork_block); - - assert!(test_harness.tree.state.tree_state.block_by_hash(fork_block_hash).is_some()); - - let blocks_to_persist = test_harness.tree.get_canonical_blocks_to_persist(); - assert_eq!(blocks_to_persist.len(), expected_blocks_to_persist_length); - - // check that the fork block is not included in the blocks to persist - assert!(!blocks_to_persist.iter().any(|b| b.recovered_block().hash() == fork_block_hash)); - - // check that the original block 4 is still included - assert!(blocks_to_persist.iter().any(|b| b.recovered_block().number == 4 && - b.recovered_block().hash() == blocks[4].recovered_block().hash())); - - // check that if we advance persistence, the persistence action is the correct value - test_harness.tree.advance_persistence().expect("advancing persistence should succeed"); - assert_eq!( - test_harness.tree.persistence_state.current_action().cloned(), - Some(CurrentPersistenceAction::SavingBlocks { - highest: blocks_to_persist.last().unwrap().recovered_block().num_hash() - }) - ); - } - - #[tokio::test] - async fn test_engine_tree_fcu_missing_head() { - let chain_spec = MAINNET.clone(); - let mut test_harness = TestHarness::new(chain_spec.clone()); - - let mut test_block_builder = TestBlockBuilder::eth().with_chain_spec((*chain_spec).clone()); - - let blocks: Vec<_> = test_block_builder.get_executed_blocks(0..5).collect(); - test_harness = test_harness.with_blocks(blocks); - - let missing_block = test_block_builder - .generate_random_block(6, test_harness.blocks.last().unwrap().recovered_block().hash()); - - test_harness.fcu_to(missing_block.hash(), PayloadStatusEnum::Syncing).await; - - // after FCU we receive an EngineApiEvent::Download event to get the missing block. - let event = test_harness.from_tree_rx.recv().await.unwrap(); - match event { - EngineApiEvent::Download(DownloadRequest::BlockSet(actual_block_set)) => { - let expected_block_set = HashSet::from_iter([missing_block.hash()]); - assert_eq!(actual_block_set, expected_block_set); - } - _ => panic!("Unexpected event: {event:#?}"), - } - } - - #[tokio::test] - async fn test_engine_tree_fcu_canon_chain_insertion() { - let chain_spec = MAINNET.clone(); - let mut test_harness = TestHarness::new(chain_spec.clone()); - - let base_chain: Vec<_> = test_harness.block_builder.get_executed_blocks(0..1).collect(); - test_harness = test_harness.with_blocks(base_chain.clone()); - - test_harness - .fcu_to(base_chain.last().unwrap().recovered_block().hash(), ForkchoiceStatus::Valid) - .await; - - // extend main chain - let main_chain = test_harness.block_builder.create_fork(base_chain[0].recovered_block(), 3); - - test_harness.insert_chain(main_chain).await; - } - - #[tokio::test] - async fn test_engine_tree_fcu_reorg_with_all_blocks() { - let chain_spec = MAINNET.clone(); - let mut test_harness = TestHarness::new(chain_spec.clone()); - - let main_chain: Vec<_> = test_harness.block_builder.get_executed_blocks(0..5).collect(); - test_harness = test_harness.with_blocks(main_chain.clone()); - - let fork_chain = test_harness.block_builder.create_fork(main_chain[2].recovered_block(), 3); - let fork_chain_last_hash = fork_chain.last().unwrap().hash(); - - // add fork blocks to the tree - for block in &fork_chain { - test_harness.insert_block(block.clone()).unwrap(); - } - - test_harness.send_fcu(fork_chain_last_hash, ForkchoiceStatus::Valid).await; - - // check for ForkBlockAdded events, we expect fork_chain.len() blocks added - test_harness.check_fork_chain_insertion(fork_chain.clone()).await; - - // check for CanonicalChainCommitted event - test_harness.check_canon_commit(fork_chain_last_hash).await; - - test_harness.check_fcu(fork_chain_last_hash, ForkchoiceStatus::Valid).await; - - // new head is the tip of the fork chain - test_harness.check_canon_head(fork_chain_last_hash); - } - - #[tokio::test] - async fn test_engine_tree_live_sync_transition_required_blocks_requested() { - reth_tracing::init_test_tracing(); - - let chain_spec = MAINNET.clone(); - let mut test_harness = TestHarness::new(chain_spec.clone()); - - let base_chain: Vec<_> = test_harness.block_builder.get_executed_blocks(0..1).collect(); - test_harness = test_harness.with_blocks(base_chain.clone()); - - test_harness - .fcu_to(base_chain.last().unwrap().recovered_block().hash(), ForkchoiceStatus::Valid) - .await; - - // extend main chain with enough blocks to trigger pipeline run but don't insert them - let main_chain = test_harness - .block_builder - .create_fork(base_chain[0].recovered_block(), MIN_BLOCKS_FOR_PIPELINE_RUN + 10); - - let main_chain_last_hash = main_chain.last().unwrap().hash(); - test_harness.send_fcu(main_chain_last_hash, ForkchoiceStatus::Syncing).await; - - test_harness.check_fcu(main_chain_last_hash, ForkchoiceStatus::Syncing).await; - - // create event for backfill finished - let backfill_finished_block_number = MIN_BLOCKS_FOR_PIPELINE_RUN + 1; - let backfill_finished = FromOrchestrator::BackfillSyncFinished(ControlFlow::Continue { - block_number: backfill_finished_block_number, - }); - - let backfill_tip_block = main_chain[(backfill_finished_block_number - 1) as usize].clone(); - // add block to mock provider to enable persistence clean up. - test_harness.provider.add_block(backfill_tip_block.hash(), backfill_tip_block.into_block()); - test_harness.tree.on_engine_message(FromEngine::Event(backfill_finished)).unwrap(); - - let event = test_harness.from_tree_rx.recv().await.unwrap(); - match event { - EngineApiEvent::Download(DownloadRequest::BlockSet(hash_set)) => { - assert_eq!(hash_set, HashSet::from_iter([main_chain_last_hash])); - } - _ => panic!("Unexpected event: {event:#?}"), - } - - test_harness - .tree - .on_engine_message(FromEngine::DownloadedBlocks(vec![main_chain - .last() - .unwrap() - .clone()])) - .unwrap(); - - let event = test_harness.from_tree_rx.recv().await.unwrap(); - match event { - EngineApiEvent::Download(DownloadRequest::BlockRange(initial_hash, total_blocks)) => { - assert_eq!( - total_blocks, - (main_chain.len() - backfill_finished_block_number as usize - 1) as u64 - ); - assert_eq!(initial_hash, main_chain.last().unwrap().parent_hash); - } - _ => panic!("Unexpected event: {event:#?}"), - } - } - - #[tokio::test] - async fn test_engine_tree_live_sync_transition_eventually_canonical() { - reth_tracing::init_test_tracing(); - - let chain_spec = MAINNET.clone(); - let mut test_harness = TestHarness::new(chain_spec.clone()); - test_harness.tree.config = test_harness.tree.config.with_max_execute_block_batch_size(100); - - // create base chain and setup test harness with it - let base_chain: Vec<_> = test_harness.block_builder.get_executed_blocks(0..1).collect(); - test_harness = test_harness.with_blocks(base_chain.clone()); - - // fcu to the tip of base chain - test_harness - .fcu_to(base_chain.last().unwrap().recovered_block().hash(), ForkchoiceStatus::Valid) - .await; - - // create main chain, extension of base chain, with enough blocks to - // trigger backfill sync - let main_chain = test_harness - .block_builder - .create_fork(base_chain[0].recovered_block(), MIN_BLOCKS_FOR_PIPELINE_RUN + 10); - - let main_chain_last = main_chain.last().unwrap(); - let main_chain_last_hash = main_chain_last.hash(); - let main_chain_backfill_target = - main_chain.get(MIN_BLOCKS_FOR_PIPELINE_RUN as usize).unwrap(); - let main_chain_backfill_target_hash = main_chain_backfill_target.hash(); - - // fcu to the element of main chain that should trigger backfill sync - test_harness.send_fcu(main_chain_backfill_target_hash, ForkchoiceStatus::Syncing).await; - test_harness.check_fcu(main_chain_backfill_target_hash, ForkchoiceStatus::Syncing).await; - - // check download request for target - let event = test_harness.from_tree_rx.recv().await.unwrap(); - match event { - EngineApiEvent::Download(DownloadRequest::BlockSet(hash_set)) => { - assert_eq!(hash_set, HashSet::from_iter([main_chain_backfill_target_hash])); - } - _ => panic!("Unexpected event: {event:#?}"), - } - - // send message to tell the engine the requested block was downloaded - test_harness - .tree - .on_engine_message(FromEngine::DownloadedBlocks(vec![ - main_chain_backfill_target.clone() - ])) - .unwrap(); - - // check that backfill is triggered - let event = test_harness.from_tree_rx.recv().await.unwrap(); - match event { - EngineApiEvent::BackfillAction(BackfillAction::Start( - reth_stages::PipelineTarget::Sync(target_hash), - )) => { - assert_eq!(target_hash, main_chain_backfill_target_hash); - } - _ => panic!("Unexpected event: {event:#?}"), - } - - // persist blocks of main chain, same as the backfill operation would do - let backfilled_chain: Vec<_> = - main_chain.clone().drain(0..(MIN_BLOCKS_FOR_PIPELINE_RUN + 1) as usize).collect(); - test_harness.persist_blocks(backfilled_chain.clone()); - - test_harness.setup_range_insertion_for_valid_chain(backfilled_chain); - - // send message to mark backfill finished - test_harness - .tree - .on_engine_message(FromEngine::Event(FromOrchestrator::BackfillSyncFinished( - ControlFlow::Continue { block_number: main_chain_backfill_target.number }, - ))) - .unwrap(); - - // send fcu to the tip of main - test_harness.fcu_to(main_chain_last_hash, ForkchoiceStatus::Syncing).await; - - let event = test_harness.from_tree_rx.recv().await.unwrap(); - match event { - EngineApiEvent::Download(DownloadRequest::BlockSet(target_hash)) => { - assert_eq!(target_hash, HashSet::from_iter([main_chain_last_hash])); - } - _ => panic!("Unexpected event: {event:#?}"), - } - - // tell engine main chain tip downloaded - test_harness - .tree - .on_engine_message(FromEngine::DownloadedBlocks(vec![main_chain_last.clone()])) - .unwrap(); - - // check download range request - let event = test_harness.from_tree_rx.recv().await.unwrap(); - match event { - EngineApiEvent::Download(DownloadRequest::BlockRange(initial_hash, total_blocks)) => { - assert_eq!( - total_blocks, - (main_chain.len() - MIN_BLOCKS_FOR_PIPELINE_RUN as usize - 2) as u64 - ); - assert_eq!(initial_hash, main_chain_last.parent_hash); - } - _ => panic!("Unexpected event: {event:#?}"), - } - - let remaining: Vec<_> = main_chain - .clone() - .drain((MIN_BLOCKS_FOR_PIPELINE_RUN + 1) as usize..main_chain.len()) - .collect(); - - test_harness.setup_range_insertion_for_valid_chain(remaining.clone()); - - // tell engine block range downloaded - test_harness - .tree - .on_engine_message(FromEngine::DownloadedBlocks(remaining.clone())) - .unwrap(); - - test_harness.check_canon_chain_insertion(remaining).await; - - // check canonical chain committed event with the hash of the latest block - test_harness.check_canon_commit(main_chain_last_hash).await; - - // new head is the tip of the main chain - test_harness.check_canon_head(main_chain_last_hash); - } - - #[tokio::test] - async fn test_engine_tree_live_sync_fcu_extends_canon_chain() { - reth_tracing::init_test_tracing(); - - let chain_spec = MAINNET.clone(); - let mut test_harness = TestHarness::new(chain_spec.clone()); - - // create base chain and setup test harness with it - let base_chain: Vec<_> = test_harness.block_builder.get_executed_blocks(0..1).collect(); - test_harness = test_harness.with_blocks(base_chain.clone()); - - // fcu to the tip of base chain - test_harness - .fcu_to(base_chain.last().unwrap().recovered_block().hash(), ForkchoiceStatus::Valid) - .await; - - // create main chain, extension of base chain - let main_chain = - test_harness.block_builder.create_fork(base_chain[0].recovered_block(), 10); - // determine target in the middle of main hain - let target = main_chain.get(5).unwrap(); - let target_hash = target.hash(); - let main_last = main_chain.last().unwrap(); - let main_last_hash = main_last.hash(); - - // insert main chain - test_harness.insert_chain(main_chain).await; - - // send fcu to target - test_harness.send_fcu(target_hash, ForkchoiceStatus::Valid).await; - - test_harness.check_canon_commit(target_hash).await; - test_harness.check_fcu(target_hash, ForkchoiceStatus::Valid).await; - - // send fcu to main tip - test_harness.send_fcu(main_last_hash, ForkchoiceStatus::Valid).await; - - test_harness.check_canon_commit(main_last_hash).await; - test_harness.check_fcu(main_last_hash, ForkchoiceStatus::Valid).await; - test_harness.check_canon_head(main_last_hash); - } - - #[tokio::test] - async fn test_engine_tree_valid_forks_with_older_canonical_head() { - reth_tracing::init_test_tracing(); - - let chain_spec = MAINNET.clone(); - let mut test_harness = TestHarness::new(chain_spec.clone()); - - // create base chain and setup test harness with it - let base_chain: Vec<_> = test_harness.block_builder.get_executed_blocks(0..1).collect(); - test_harness = test_harness.with_blocks(base_chain.clone()); - - let old_head = base_chain.first().unwrap().recovered_block(); - - // extend base chain - let extension_chain = test_harness.block_builder.create_fork(old_head, 5); - let fork_block = extension_chain.last().unwrap().clone_sealed_block(); - - test_harness.setup_range_insertion_for_valid_chain(extension_chain.clone()); - test_harness.insert_chain(extension_chain).await; - - // fcu to old_head - test_harness.fcu_to(old_head.hash(), ForkchoiceStatus::Valid).await; - - // create two competing chains starting from fork_block - let chain_a = test_harness.block_builder.create_fork(&fork_block, 10); - let chain_b = test_harness.block_builder.create_fork(&fork_block, 10); - - // insert chain A blocks using newPayload - test_harness.setup_range_insertion_for_valid_chain(chain_a.clone()); - for block in &chain_a { - test_harness.send_new_payload(block.clone()).await; - } - - test_harness.check_canon_chain_insertion(chain_a.clone()).await; - - // insert chain B blocks using newPayload - test_harness.setup_range_insertion_for_valid_chain(chain_b.clone()); - for block in &chain_b { - test_harness.send_new_payload(block.clone()).await; - } - - test_harness.check_canon_chain_insertion(chain_b.clone()).await; - - // send FCU to make the tip of chain B the new head - let chain_b_tip_hash = chain_b.last().unwrap().hash(); - test_harness.send_fcu(chain_b_tip_hash, ForkchoiceStatus::Valid).await; - - // check for CanonicalChainCommitted event - test_harness.check_canon_commit(chain_b_tip_hash).await; - - // verify FCU was processed - test_harness.check_fcu(chain_b_tip_hash, ForkchoiceStatus::Valid).await; - - // verify the new canonical head - test_harness.check_canon_head(chain_b_tip_hash); - - // verify that chain A is now considered a fork - assert!(test_harness.tree.is_fork(chain_a.last().unwrap().hash()).unwrap()); - } - - #[tokio::test] - async fn test_engine_tree_buffered_blocks_are_eventually_connected() { - let chain_spec = MAINNET.clone(); - let mut test_harness = TestHarness::new(chain_spec.clone()); - - let base_chain: Vec<_> = test_harness.block_builder.get_executed_blocks(0..1).collect(); - test_harness = test_harness.with_blocks(base_chain.clone()); - - // side chain consisting of two blocks, the last will be inserted first - // so that we force it to be buffered - let side_chain = - test_harness.block_builder.create_fork(base_chain.last().unwrap().recovered_block(), 2); - - // buffer last block of side chain - let buffered_block = side_chain.last().unwrap(); - let buffered_block_hash = buffered_block.hash(); - - test_harness.setup_range_insertion_for_valid_chain(vec![buffered_block.clone()]); - test_harness.send_new_payload(buffered_block.clone()).await; - - assert!(test_harness.tree.state.buffer.block(&buffered_block_hash).is_some()); - - let non_buffered_block = side_chain.first().unwrap(); - let non_buffered_block_hash = non_buffered_block.hash(); - - // insert block that continues the canon chain, should not be buffered - test_harness.setup_range_insertion_for_valid_chain(vec![non_buffered_block.clone()]); - test_harness.send_new_payload(non_buffered_block.clone()).await; - assert!(test_harness.tree.state.buffer.block(&non_buffered_block_hash).is_none()); - - // the previously buffered block should be connected now - assert!(test_harness.tree.state.buffer.block(&buffered_block_hash).is_none()); - - // both blocks are added to the canon chain in order - test_harness.check_canon_block_added(non_buffered_block_hash).await; - test_harness.check_canon_block_added(buffered_block_hash).await; - } - - #[tokio::test] - async fn test_engine_tree_valid_and_invalid_forks_with_older_canonical_head() { - reth_tracing::init_test_tracing(); - - let chain_spec = MAINNET.clone(); - let mut test_harness = TestHarness::new(chain_spec.clone()); - - // create base chain and setup test harness with it - let base_chain: Vec<_> = test_harness.block_builder.get_executed_blocks(0..1).collect(); - test_harness = test_harness.with_blocks(base_chain.clone()); - - let old_head = base_chain.first().unwrap().recovered_block(); - - // extend base chain - let extension_chain = test_harness.block_builder.create_fork(old_head, 5); - let fork_block = extension_chain.last().unwrap().clone_sealed_block(); - test_harness.insert_chain(extension_chain).await; - - // fcu to old_head - test_harness.fcu_to(old_head.hash(), ForkchoiceStatus::Valid).await; - - // create two competing chains starting from fork_block, one of them invalid - let total_fork_elements = 10; - let chain_a = test_harness.block_builder.create_fork(&fork_block, total_fork_elements); - let chain_b = test_harness.block_builder.create_fork(&fork_block, total_fork_elements); - - // insert chain B blocks using newPayload - test_harness.setup_range_insertion_for_valid_chain(chain_b.clone()); - for block in &chain_b { - test_harness.send_new_payload(block.clone()).await; - test_harness.send_fcu(block.hash(), ForkchoiceStatus::Valid).await; - test_harness.check_canon_block_added(block.hash()).await; - test_harness.check_canon_commit(block.hash()).await; - test_harness.check_fcu(block.hash(), ForkchoiceStatus::Valid).await; - } - - // insert chain A blocks using newPayload, one of the blocks will be invalid - let invalid_index = 3; - test_harness.setup_range_insertion_for_invalid_chain(chain_a.clone(), invalid_index); - for block in &chain_a { - test_harness.send_new_payload(block.clone()).await; - } - - // check canon chain insertion up to the invalid index and taking into - // account reversed ordering - test_harness - .check_fork_chain_insertion( - chain_a[..chain_a.len() - invalid_index - 1].iter().cloned(), - ) - .await; - for block in &chain_a[chain_a.len() - invalid_index - 1..] { - test_harness.check_invalid_block(block.hash()).await; - } - - // send FCU to make the tip of chain A, expect invalid - let chain_a_tip_hash = chain_a.last().unwrap().hash(); - test_harness.fcu_to(chain_a_tip_hash, ForkchoiceStatus::Invalid).await; - - // send FCU to make the tip of chain B the new head - let chain_b_tip_hash = chain_b.last().unwrap().hash(); - - // verify the new canonical head - test_harness.check_canon_head(chain_b_tip_hash); - - // verify the canonical head didn't change - test_harness.check_canon_head(chain_b_tip_hash); - } - - #[tokio::test] - async fn test_engine_tree_reorg_with_missing_ancestor_expecting_valid() { - reth_tracing::init_test_tracing(); - let chain_spec = MAINNET.clone(); - let mut test_harness = TestHarness::new(chain_spec.clone()); - - let base_chain: Vec<_> = test_harness.block_builder.get_executed_blocks(0..6).collect(); - test_harness = test_harness.with_blocks(base_chain.clone()); - - // create a side chain with an invalid block - let side_chain = test_harness - .block_builder - .create_fork(base_chain.last().unwrap().recovered_block(), 15); - let invalid_index = 9; - - test_harness.setup_range_insertion_for_invalid_chain(side_chain.clone(), invalid_index); - - for (index, block) in side_chain.iter().enumerate() { - test_harness.send_new_payload(block.clone()).await; - - if index < side_chain.len() - invalid_index - 1 { - test_harness.send_fcu(block.hash(), ForkchoiceStatus::Valid).await; - } - } - - // Try to do a forkchoice update to a block after the invalid one - let fork_tip_hash = side_chain.last().unwrap().hash(); - test_harness.send_fcu(fork_tip_hash, ForkchoiceStatus::Invalid).await; - } -} diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index ef702bf74dd..5c782fbd4bb 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -63,8 +63,8 @@ where disable_transaction_prewarming: bool, /// Determines how to configure the evm for execution. evm_config: Evm, - /// whether precompile cache should be enabled. - precompile_cache_enabled: bool, + /// Whether precompile cache should be disabled. + precompile_cache_disabled: bool, /// Precompile cache map. precompile_cache_map: PrecompileCacheMap>, _marker: std::marker::PhantomData, @@ -89,7 +89,7 @@ where cross_block_cache_size: config.cross_block_cache_size(), disable_transaction_prewarming: config.disable_caching_and_prewarming(), evm_config, - precompile_cache_enabled: config.precompile_cache_enabled(), + precompile_cache_disabled: config.precompile_cache_disabled(), precompile_cache_map, _marker: Default::default(), } @@ -273,7 +273,7 @@ where provider: provider_builder, metrics: PrewarmMetrics::default(), terminate_execution: Arc::new(AtomicBool::new(false)), - precompile_cache_enabled: self.precompile_cache_enabled, + precompile_cache_disabled: self.precompile_cache_disabled, precompile_cache_map: self.precompile_cache_map.clone(), }; diff --git a/crates/engine/tree/src/tree/payload_processor/prewarm.rs b/crates/engine/tree/src/tree/payload_processor/prewarm.rs index 4e4625d1fff..2153f6ee753 100644 --- a/crates/engine/tree/src/tree/payload_processor/prewarm.rs +++ b/crates/engine/tree/src/tree/payload_processor/prewarm.rs @@ -214,7 +214,7 @@ where pub(super) metrics: PrewarmMetrics, /// An atomic bool that tells prewarm tasks to not start any more execution. pub(super) terminate_execution: Arc, - pub(super) precompile_cache_enabled: bool, + pub(super) precompile_cache_disabled: bool, pub(super) precompile_cache_map: PrecompileCacheMap>, } @@ -237,7 +237,7 @@ where provider, metrics, terminate_execution, - precompile_cache_enabled, + precompile_cache_disabled, mut precompile_cache_map, } = self; @@ -269,7 +269,7 @@ where let spec_id = *evm_env.spec_id(); let mut evm = evm_config.evm_with_env(state_provider, evm_env); - if precompile_cache_enabled { + if !precompile_cache_disabled { evm.precompiles_mut().map_precompiles(|address, precompile| { CachedPrecompile::wrap( precompile, diff --git a/crates/engine/tree/src/tree/precompile_cache.rs b/crates/engine/tree/src/tree/precompile_cache.rs index 0c74f2a4995..47d985a9296 100644 --- a/crates/engine/tree/src/tree/precompile_cache.rs +++ b/crates/engine/tree/src/tree/precompile_cache.rs @@ -1,10 +1,19 @@ //! Contains a precompile cache that is backed by a moka cache. use alloy_primitives::Bytes; +use parking_lot::Mutex; use reth_evm::precompiles::{DynPrecompile, Precompile}; use revm::precompile::{PrecompileOutput, PrecompileResult}; -use revm_primitives::{Address, HashMap}; -use std::{hash::Hash, sync::Arc}; +use revm_primitives::Address; +use schnellru::LruMap; +use std::{ + collections::HashMap, + hash::{Hash, Hasher}, + sync::Arc, +}; + +/// Default max cache size for [`PrecompileCache`] +const MAX_CACHE_SIZE: u32 = 10_000; /// Stores caches for each precompile. #[derive(Debug, Clone, Default)] @@ -22,10 +31,11 @@ where } /// Cache for precompiles, for each input stores the result. +/// +/// [`LruMap`] requires a mutable reference on `get` since it updates the LRU order, +/// so we use a [`Mutex`] instead of an `RwLock`. #[derive(Debug, Clone)] -pub struct PrecompileCache( - Arc, CacheEntry, alloy_primitives::map::DefaultHashBuilder>>, -) +pub struct PrecompileCache(Arc, CacheEntry>>>) where S: Eq + Hash + std::fmt::Debug + Send + Sync + Clone; @@ -34,10 +44,7 @@ where S: Eq + Hash + std::fmt::Debug + Send + Sync + Clone + 'static, { fn default() -> Self { - Self(Arc::new( - mini_moka::sync::CacheBuilder::new(100_000) - .build_with_hasher(alloy_primitives::map::DefaultHashBuilder::default()), - )) + Self(Arc::new(Mutex::new(LruMap::new(schnellru::ByLength::new(MAX_CACHE_SIZE))))) } } @@ -45,16 +52,15 @@ impl PrecompileCache where S: Eq + Hash + std::fmt::Debug + Send + Sync + Clone + 'static, { - fn get(&self, key: &CacheKey) -> Option { - self.0.get(key) - } - - fn insert(&self, key: CacheKey, value: CacheEntry) { - self.0.insert(key, value); + fn get(&self, key: &CacheKeyRef<'_, S>) -> Option { + self.0.lock().get(key).cloned() } - fn weighted_size(&self) -> u64 { - self.0.weighted_size() + /// Inserts the given key and value into the cache, returning the new cache size. + fn insert(&self, key: CacheKey, value: CacheEntry) -> usize { + let mut cache = self.0.lock(); + cache.insert(key, value); + cache.len() } } @@ -69,6 +75,29 @@ impl CacheKey { } } +/// Cache key reference, used to avoid cloning the input bytes when looking up using a [`CacheKey`]. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct CacheKeyRef<'a, S>((S, &'a [u8])); + +impl<'a, S> CacheKeyRef<'a, S> { + const fn new(spec_id: S, input: &'a [u8]) -> Self { + Self((spec_id, input)) + } +} + +impl PartialEq> for CacheKeyRef<'_, S> { + fn eq(&self, other: &CacheKey) -> bool { + self.0 .0 == other.0 .0 && self.0 .1 == other.0 .1.as_ref() + } +} + +impl<'a, S: Hash> Hash for CacheKeyRef<'a, S> { + fn hash(&self, state: &mut H) { + self.0 .0.hash(state); + self.0 .1.hash(state); + } +} + /// Cache entry, precompile successful output. #[derive(Debug, Clone, PartialEq, Eq)] pub struct CacheEntry(PrecompileOutput); @@ -129,11 +158,6 @@ where fn increment_by_one_precompile_errors(&self) { self.metrics.precompile_errors.increment(1); } - - fn update_precompile_cache_size(&self) { - let new_size = self.cache.weighted_size(); - self.metrics.precompile_cache_size.set(new_size as f64); - } } impl Precompile for CachedPrecompile @@ -141,7 +165,7 @@ where S: Eq + Hash + std::fmt::Debug + Send + Sync + Clone + 'static, { fn call(&self, data: &[u8], gas_limit: u64) -> PrecompileResult { - let key = CacheKey::new(self.spec_id.clone(), Bytes::copy_from_slice(data)); + let key = CacheKeyRef::new(self.spec_id.clone(), data); if let Some(entry) = &self.cache.get(&key) { self.increment_by_one_precompile_cache_hits(); @@ -154,15 +178,15 @@ where match &result { Ok(output) => { + let key = CacheKey::new(self.spec_id.clone(), Bytes::copy_from_slice(data)); + let size = self.cache.insert(key, CacheEntry(output.clone())); + self.metrics.precompile_cache_size.set(size as f64); self.increment_by_one_precompile_cache_misses(); - self.cache.insert(key, CacheEntry(output.clone())); } _ => { self.increment_by_one_precompile_errors(); } } - - self.update_precompile_cache_size(); result } } @@ -177,9 +201,7 @@ pub(crate) struct CachedPrecompileMetrics { /// Precompile cache misses precompile_cache_misses: metrics::Counter, - /// Precompile cache size - /// - /// NOTE: this uses the moka caches`weighted_size` method to calculate size. + /// Precompile cache size. Uses the LRU cache length as the size metric. precompile_cache_size: metrics::Gauge, /// Precompile execution errors. @@ -188,10 +210,29 @@ pub(crate) struct CachedPrecompileMetrics { #[cfg(test)] mod tests { + use std::hash::DefaultHasher; + use super::*; use revm::precompile::PrecompileOutput; use revm_primitives::hardfork::SpecId; + #[test] + fn test_cache_key_ref_hash() { + let key1 = CacheKey::new(SpecId::PRAGUE, b"test_input".into()); + let key2 = CacheKeyRef::new(SpecId::PRAGUE, b"test_input"); + assert!(PartialEq::eq(&key2, &key1)); + + let mut hasher = DefaultHasher::new(); + key1.hash(&mut hasher); + let hash1 = hasher.finish(); + + let mut hasher = DefaultHasher::new(); + key2.hash(&mut hasher); + let hash2 = hasher.finish(); + + assert_eq!(hash1, hash2); + } + #[test] fn test_precompile_cache_basic() { let dyn_precompile: DynPrecompile = |_input: &[u8], _gas: u64| -> PrecompileResult { @@ -202,16 +243,16 @@ mod tests { let cache = CachedPrecompile::new(dyn_precompile, PrecompileCache::default(), SpecId::PRAGUE); - let key = CacheKey::new(SpecId::PRAGUE, b"test_input".into()); - let output = PrecompileOutput { gas_used: 50, bytes: alloy_primitives::Bytes::copy_from_slice(b"cached_result"), }; + let key = CacheKey::new(SpecId::PRAGUE, b"test_input".into()); let expected = CacheEntry(output); - cache.cache.insert(key.clone(), expected.clone()); + cache.cache.insert(key, expected.clone()); + let key = CacheKeyRef::new(SpecId::PRAGUE, b"test_input"); let actual = cache.cache.get(&key).unwrap(); assert_eq!(actual, expected); diff --git a/crates/engine/tree/src/tree/state.rs b/crates/engine/tree/src/tree/state.rs index e88986bf746..7bc443db935 100644 --- a/crates/engine/tree/src/tree/state.rs +++ b/crates/engine/tree/src/tree/state.rs @@ -210,13 +210,20 @@ impl TreeState { while let Some(executed) = self.blocks_by_hash.get(¤t_block) { current_block = executed.recovered_block().parent_hash(); if executed.recovered_block().number() <= upper_bound { - debug!(target: "engine::tree", num_hash=?executed.recovered_block().num_hash(), "Attempting to remove block walking back from the head"); - if let Some((removed, _)) = self.remove_by_hash(executed.recovered_block().hash()) { - debug!(target: "engine::tree", num_hash=?removed.recovered_block().num_hash(), "Removed block walking back from the head"); + let num_hash = executed.recovered_block().num_hash(); + debug!(target: "engine::tree", ?num_hash, "Attempting to remove block walking back from the head"); + if let Some((mut removed, _)) = + self.remove_by_hash(executed.recovered_block().hash()) + { + debug!(target: "engine::tree", ?num_hash, "Removed block walking back from the head"); // finally, move the trie updates + let Some(trie_updates) = removed.trie.take_present() else { + debug!(target: "engine::tree", ?num_hash, "No trie updates found for persisted block"); + continue; + }; self.persisted_trie_updates.insert( removed.recovered_block().hash(), - (removed.recovered_block().number(), removed.trie), + (removed.recovered_block().number(), trie_updates), ); } } diff --git a/crates/engine/tree/src/tree/tests.rs b/crates/engine/tree/src/tree/tests.rs new file mode 100644 index 00000000000..9fa1d960486 --- /dev/null +++ b/crates/engine/tree/src/tree/tests.rs @@ -0,0 +1,1186 @@ +use super::*; +use crate::persistence::PersistenceAction; +use alloy_consensus::Header; +use alloy_primitives::{ + map::{HashMap, HashSet}, + Bytes, B256, +}; +use alloy_rlp::Decodable; +use alloy_rpc_types_engine::{ + CancunPayloadFields, ExecutionData, ExecutionPayloadSidecar, ExecutionPayloadV1, + ExecutionPayloadV3, +}; +use assert_matches::assert_matches; +use reth_chain_state::{test_utils::TestBlockBuilder, BlockState}; +use reth_chainspec::{ChainSpec, HOLESKY, MAINNET}; +use reth_engine_primitives::ForkchoiceStatus; +use reth_ethereum_consensus::EthBeaconConsensus; +use reth_ethereum_engine_primitives::EthEngineTypes; +use reth_ethereum_primitives::{Block, EthPrimitives}; +use reth_evm_ethereum::MockEvmConfig; +use reth_node_ethereum::EthereumEngineValidator; +use reth_primitives_traits::Block as _; +use reth_provider::test_utils::MockEthProvider; +use reth_trie::HashedPostState; +use std::{ + collections::BTreeMap, + str::FromStr, + sync::mpsc::{channel, Sender}, +}; + +/// This is a test channel that allows you to `release` any value that is in the channel. +/// +/// If nothing has been sent, then the next value will be immediately sent. +struct TestChannel { + /// If an item is sent to this channel, an item will be released in the wrapped channel + release: Receiver<()>, + /// The sender channel + tx: Sender, + /// The receiver channel + rx: Receiver, +} + +impl TestChannel { + /// Creates a new test channel + fn spawn_channel() -> (Sender, Receiver, TestChannelHandle) { + let (original_tx, original_rx) = channel(); + let (wrapped_tx, wrapped_rx) = channel(); + let (release_tx, release_rx) = channel(); + let handle = TestChannelHandle::new(release_tx); + let test_channel = Self { release: release_rx, tx: wrapped_tx, rx: original_rx }; + // spawn the task that listens and releases stuff + std::thread::spawn(move || test_channel.intercept_loop()); + (original_tx, wrapped_rx, handle) + } + + /// Runs the intercept loop, waiting for the handle to release a value + fn intercept_loop(&self) { + while self.release.recv() == Ok(()) { + let Ok(value) = self.rx.recv() else { return }; + + let _ = self.tx.send(value); + } + } +} + +struct TestChannelHandle { + /// The sender to use for releasing values + release: Sender<()>, +} + +impl TestChannelHandle { + /// Returns a [`TestChannelHandle`] + const fn new(release: Sender<()>) -> Self { + Self { release } + } + + /// Signals to the channel task that a value should be released + #[expect(dead_code)] + fn release(&self) { + let _ = self.release.send(()); + } +} + +struct TestHarness { + tree: EngineApiTreeHandler< + EthPrimitives, + MockEthProvider, + EthEngineTypes, + EthereumEngineValidator, + MockEvmConfig, + >, + to_tree_tx: Sender, Block>>, + from_tree_rx: UnboundedReceiver, + blocks: Vec, + action_rx: Receiver, + evm_config: MockEvmConfig, + block_builder: TestBlockBuilder, + provider: MockEthProvider, +} + +impl TestHarness { + fn new(chain_spec: Arc) -> Self { + let (action_tx, action_rx) = channel(); + Self::with_persistence_channel(chain_spec, action_tx, action_rx) + } + + #[expect(dead_code)] + fn with_test_channel(chain_spec: Arc) -> (Self, TestChannelHandle) { + let (action_tx, action_rx, handle) = TestChannel::spawn_channel(); + (Self::with_persistence_channel(chain_spec, action_tx, action_rx), handle) + } + + fn with_persistence_channel( + chain_spec: Arc, + action_tx: Sender, + action_rx: Receiver, + ) -> Self { + let persistence_handle = PersistenceHandle::new(action_tx); + + let consensus = Arc::new(EthBeaconConsensus::new(chain_spec.clone())); + + let provider = MockEthProvider::default(); + + let payload_validator = EthereumEngineValidator::new(chain_spec.clone()); + + let (from_tree_tx, from_tree_rx) = unbounded_channel(); + + let header = chain_spec.genesis_header().clone(); + let header = SealedHeader::seal_slow(header); + let engine_api_tree_state = + EngineApiTreeState::new(10, 10, header.num_hash(), EngineApiKind::Ethereum); + let canonical_in_memory_state = CanonicalInMemoryState::with_head(header, None, None); + + let (to_payload_service, _payload_command_rx) = unbounded_channel(); + let payload_builder = PayloadBuilderHandle::new(to_payload_service); + + let evm_config = MockEvmConfig::default(); + + let tree = EngineApiTreeHandler::new( + provider.clone(), + consensus, + payload_validator, + from_tree_tx, + engine_api_tree_state, + canonical_in_memory_state, + persistence_handle, + PersistenceState::default(), + payload_builder, + // TODO: fix tests for state root task https://github.com/paradigmxyz/reth/issues/14376 + // always assume enough parallelism for tests + TreeConfig::default().with_legacy_state_root(true).with_has_enough_parallelism(true), + EngineApiKind::Ethereum, + evm_config.clone(), + ); + + let block_builder = TestBlockBuilder::default().with_chain_spec((*chain_spec).clone()); + Self { + to_tree_tx: tree.incoming_tx.clone(), + tree, + from_tree_rx, + blocks: vec![], + action_rx, + evm_config, + block_builder, + provider, + } + } + + fn with_blocks(mut self, blocks: Vec) -> Self { + let mut blocks_by_hash = HashMap::default(); + let mut blocks_by_number = BTreeMap::new(); + let mut state_by_hash = HashMap::default(); + let mut hash_by_number = BTreeMap::new(); + let mut parent_to_child: HashMap> = HashMap::default(); + let mut parent_hash = B256::ZERO; + + for block in &blocks { + let sealed_block = block.recovered_block(); + let hash = sealed_block.hash(); + let number = sealed_block.number; + blocks_by_hash.insert(hash, block.clone()); + blocks_by_number.entry(number).or_insert_with(Vec::new).push(block.clone()); + state_by_hash.insert(hash, Arc::new(BlockState::new(block.clone()))); + hash_by_number.insert(number, hash); + parent_to_child.entry(parent_hash).or_default().insert(hash); + parent_hash = hash; + } + + self.tree.state.tree_state = TreeState { + blocks_by_hash, + blocks_by_number, + current_canonical_head: blocks.last().unwrap().recovered_block().num_hash(), + parent_to_child, + persisted_trie_updates: HashMap::default(), + engine_kind: EngineApiKind::Ethereum, + }; + + let last_executed_block = blocks.last().unwrap().clone(); + let pending = Some(BlockState::new(last_executed_block)); + self.tree.canonical_in_memory_state = + CanonicalInMemoryState::new(state_by_hash, hash_by_number, pending, None, None); + + self.blocks = blocks.clone(); + + let recovered_blocks = + blocks.iter().map(|b| b.recovered_block().clone()).collect::>(); + + self.persist_blocks(recovered_blocks); + + self + } + + const fn with_backfill_state(mut self, state: BackfillSyncState) -> Self { + self.tree.backfill_sync_state = state; + self + } + + fn extend_execution_outcome( + &self, + execution_outcomes: impl IntoIterator>, + ) { + self.evm_config.extend(execution_outcomes); + } + + fn insert_block( + &mut self, + block: RecoveredBlock, + ) -> Result> { + let execution_outcome = self.block_builder.get_execution_outcome(block.clone()); + self.extend_execution_outcome([execution_outcome]); + self.tree.provider.add_state_root(block.state_root); + self.tree.insert_block(block) + } + + async fn fcu_to(&mut self, block_hash: B256, fcu_status: impl Into) { + let fcu_status = fcu_status.into(); + + self.send_fcu(block_hash, fcu_status).await; + + self.check_fcu(block_hash, fcu_status).await; + } + + async fn send_fcu(&mut self, block_hash: B256, fcu_status: impl Into) { + let fcu_state = self.fcu_state(block_hash); + + let (tx, rx) = oneshot::channel(); + self.tree + .on_engine_message(FromEngine::Request( + BeaconEngineMessage::ForkchoiceUpdated { + state: fcu_state, + payload_attrs: None, + tx, + version: EngineApiMessageVersion::default(), + } + .into(), + )) + .unwrap(); + + let response = rx.await.unwrap().unwrap().await.unwrap(); + match fcu_status.into() { + ForkchoiceStatus::Valid => assert!(response.payload_status.is_valid()), + ForkchoiceStatus::Syncing => assert!(response.payload_status.is_syncing()), + ForkchoiceStatus::Invalid => assert!(response.payload_status.is_invalid()), + } + } + + async fn check_fcu(&mut self, block_hash: B256, fcu_status: impl Into) { + let fcu_state = self.fcu_state(block_hash); + + // check for ForkchoiceUpdated event + let event = self.from_tree_rx.recv().await.unwrap(); + match event { + EngineApiEvent::BeaconConsensus(BeaconConsensusEngineEvent::ForkchoiceUpdated( + state, + status, + )) => { + assert_eq!(state, fcu_state); + assert_eq!(status, fcu_status.into()); + } + _ => panic!("Unexpected event: {event:#?}"), + } + } + + const fn fcu_state(&self, block_hash: B256) -> ForkchoiceState { + ForkchoiceState { + head_block_hash: block_hash, + safe_block_hash: block_hash, + finalized_block_hash: block_hash, + } + } + + async fn send_new_payload(&mut self, block: RecoveredBlock) { + let payload = ExecutionPayloadV3::from_block_unchecked( + block.hash(), + &block.clone_sealed_block().into_block(), + ); + self.tree + .on_new_payload(ExecutionData { + payload: payload.into(), + sidecar: ExecutionPayloadSidecar::v3(CancunPayloadFields { + parent_beacon_block_root: block.parent_beacon_block_root.unwrap(), + versioned_hashes: vec![], + }), + }) + .unwrap(); + } + + async fn insert_chain( + &mut self, + chain: impl IntoIterator> + Clone, + ) { + for block in chain.clone() { + self.insert_block(block.clone()).unwrap(); + } + self.check_canon_chain_insertion(chain).await; + } + + async fn check_canon_commit(&mut self, hash: B256) { + let event = self.from_tree_rx.recv().await.unwrap(); + match event { + EngineApiEvent::BeaconConsensus( + BeaconConsensusEngineEvent::CanonicalChainCommitted(header, _), + ) => { + assert_eq!(header.hash(), hash); + } + _ => panic!("Unexpected event: {event:#?}"), + } + } + + async fn check_canon_chain_insertion( + &mut self, + chain: impl IntoIterator> + Clone, + ) { + for block in chain.clone() { + self.check_canon_block_added(block.hash()).await; + } + } + + async fn check_canon_block_added(&mut self, expected_hash: B256) { + let event = self.from_tree_rx.recv().await.unwrap(); + match event { + EngineApiEvent::BeaconConsensus(BeaconConsensusEngineEvent::CanonicalBlockAdded( + executed, + _, + )) => { + assert_eq!(executed.recovered_block.hash(), expected_hash); + } + _ => panic!("Unexpected event: {event:#?}"), + } + } + + async fn check_block_received(&mut self, hash: B256) { + let event = self.from_tree_rx.recv().await.unwrap(); + match event { + EngineApiEvent::BeaconConsensus(BeaconConsensusEngineEvent::BlockReceived( + num_hash, + )) => { + assert_eq!(num_hash.hash, hash); + } + _ => panic!("Unexpected event: {event:#?}"), + } + } + + fn persist_blocks(&self, blocks: Vec>) { + let mut block_data: Vec<(B256, Block)> = Vec::with_capacity(blocks.len()); + let mut headers_data: Vec<(B256, Header)> = Vec::with_capacity(blocks.len()); + + for block in &blocks { + block_data.push((block.hash(), block.clone_block())); + headers_data.push((block.hash(), block.header().clone())); + } + + self.provider.extend_blocks(block_data); + self.provider.extend_headers(headers_data); + } + + fn setup_range_insertion_for_valid_chain( + &mut self, + chain: Vec>, + ) { + self.setup_range_insertion_for_chain(chain, None) + } + + fn setup_range_insertion_for_invalid_chain( + &mut self, + chain: Vec>, + index: usize, + ) { + self.setup_range_insertion_for_chain(chain, Some(index)) + } + + fn setup_range_insertion_for_chain( + &mut self, + chain: Vec>, + invalid_index: Option, + ) { + // setting up execution outcomes for the chain, the blocks will be + // executed starting from the oldest, so we need to reverse. + let mut chain_rev = chain; + chain_rev.reverse(); + + let mut execution_outcomes = Vec::with_capacity(chain_rev.len()); + for (index, block) in chain_rev.iter().enumerate() { + let execution_outcome = self.block_builder.get_execution_outcome(block.clone()); + let state_root = if invalid_index.is_some() && invalid_index.unwrap() == index { + B256::random() + } else { + block.state_root + }; + self.tree.provider.add_state_root(state_root); + execution_outcomes.push(execution_outcome); + } + self.extend_execution_outcome(execution_outcomes); + } + + fn check_canon_head(&self, head_hash: B256) { + assert_eq!(self.tree.state.tree_state.canonical_head().hash, head_hash); + } +} + +#[test] +fn test_tree_persist_block_batch() { + let tree_config = TreeConfig::default(); + let chain_spec = MAINNET.clone(); + let mut test_block_builder = TestBlockBuilder::eth().with_chain_spec((*chain_spec).clone()); + + // we need more than tree_config.persistence_threshold() +1 blocks to + // trigger the persistence task. + let blocks: Vec<_> = test_block_builder + .get_executed_blocks(1..tree_config.persistence_threshold() + 2) + .collect(); + let mut test_harness = TestHarness::new(chain_spec).with_blocks(blocks); + + let mut blocks = vec![]; + for idx in 0..tree_config.max_execute_block_batch_size() * 2 { + blocks.push(test_block_builder.generate_random_block(idx as u64, B256::random())); + } + + test_harness.to_tree_tx.send(FromEngine::DownloadedBlocks(blocks)).unwrap(); + + // process the message + let msg = test_harness.tree.try_recv_engine_message().unwrap().unwrap(); + test_harness.tree.on_engine_message(msg).unwrap(); + + // we now should receive the other batch + let msg = test_harness.tree.try_recv_engine_message().unwrap().unwrap(); + match msg { + FromEngine::DownloadedBlocks(blocks) => { + assert_eq!(blocks.len(), tree_config.max_execute_block_batch_size()); + } + _ => panic!("unexpected message: {msg:#?}"), + } +} + +#[tokio::test] +async fn test_tree_persist_blocks() { + let tree_config = TreeConfig::default(); + let chain_spec = MAINNET.clone(); + let mut test_block_builder = TestBlockBuilder::eth().with_chain_spec((*chain_spec).clone()); + + // we need more than tree_config.persistence_threshold() +1 blocks to + // trigger the persistence task. + let blocks: Vec<_> = test_block_builder + .get_executed_blocks(1..tree_config.persistence_threshold() + 2) + .collect(); + let test_harness = TestHarness::new(chain_spec).with_blocks(blocks.clone()); + std::thread::Builder::new() + .name("Tree Task".to_string()) + .spawn(|| test_harness.tree.run()) + .unwrap(); + + // send a message to the tree to enter the main loop. + test_harness.to_tree_tx.send(FromEngine::DownloadedBlocks(vec![])).unwrap(); + + let received_action = + test_harness.action_rx.recv().expect("Failed to receive save blocks action"); + if let PersistenceAction::SaveBlocks(saved_blocks, _) = received_action { + // only blocks.len() - tree_config.memory_block_buffer_target() will be + // persisted + let expected_persist_len = blocks.len() - tree_config.memory_block_buffer_target() as usize; + assert_eq!(saved_blocks.len(), expected_persist_len); + assert_eq!(saved_blocks, blocks[..expected_persist_len]); + } else { + panic!("unexpected action received {received_action:?}"); + } +} + +#[tokio::test] +async fn test_in_memory_state_trait_impl() { + let blocks: Vec<_> = TestBlockBuilder::eth().get_executed_blocks(0..10).collect(); + let test_harness = TestHarness::new(MAINNET.clone()).with_blocks(blocks.clone()); + + for executed_block in blocks { + let sealed_block = executed_block.recovered_block(); + + let expected_state = BlockState::new(executed_block.clone()); + + let actual_state_by_hash = + test_harness.tree.canonical_in_memory_state.state_by_hash(sealed_block.hash()).unwrap(); + assert_eq!(expected_state, *actual_state_by_hash); + + let actual_state_by_number = test_harness + .tree + .canonical_in_memory_state + .state_by_number(sealed_block.number) + .unwrap(); + assert_eq!(expected_state, *actual_state_by_number); + } +} + +#[tokio::test] +async fn test_engine_request_during_backfill() { + let tree_config = TreeConfig::default(); + let blocks: Vec<_> = TestBlockBuilder::eth() + .get_executed_blocks(0..tree_config.persistence_threshold()) + .collect(); + let mut test_harness = TestHarness::new(MAINNET.clone()) + .with_blocks(blocks) + .with_backfill_state(BackfillSyncState::Active); + + let (tx, rx) = oneshot::channel(); + test_harness + .tree + .on_engine_message(FromEngine::Request( + BeaconEngineMessage::ForkchoiceUpdated { + state: ForkchoiceState { + head_block_hash: B256::random(), + safe_block_hash: B256::random(), + finalized_block_hash: B256::random(), + }, + payload_attrs: None, + tx, + version: EngineApiMessageVersion::default(), + } + .into(), + )) + .unwrap(); + + let resp = rx.await.unwrap().unwrap().await.unwrap(); + assert!(resp.payload_status.is_syncing()); +} + +#[test] +fn test_disconnected_payload() { + let s = include_str!("../../test-data/holesky/2.rlp"); + let data = Bytes::from_str(s).unwrap(); + let block = Block::decode(&mut data.as_ref()).unwrap(); + let sealed = block.seal_slow(); + let hash = sealed.hash(); + let payload = ExecutionPayloadV1::from_block_unchecked(hash, &sealed.clone().into_block()); + + let mut test_harness = TestHarness::new(HOLESKY.clone()); + + let outcome = test_harness + .tree + .on_new_payload(ExecutionData { + payload: payload.into(), + sidecar: ExecutionPayloadSidecar::none(), + }) + .unwrap(); + assert!(outcome.outcome.is_syncing()); + + // ensure block is buffered + let buffered = test_harness.tree.state.buffer.block(&hash).unwrap(); + assert_eq!(buffered.clone_sealed_block(), sealed); +} + +#[test] +fn test_disconnected_block() { + let s = include_str!("../../test-data/holesky/2.rlp"); + let data = Bytes::from_str(s).unwrap(); + let block = Block::decode(&mut data.as_ref()).unwrap(); + let sealed = block.seal_slow().try_recover().unwrap(); + + let mut test_harness = TestHarness::new(HOLESKY.clone()); + + let outcome = test_harness.tree.insert_block(sealed.clone()).unwrap(); + assert_eq!( + outcome, + InsertPayloadOk::Inserted(BlockStatus::Disconnected { + head: test_harness.tree.state.tree_state.current_canonical_head, + missing_ancestor: sealed.parent_num_hash() + }) + ); +} + +#[tokio::test] +async fn test_holesky_payload() { + let s = include_str!("../../test-data/holesky/1.rlp"); + let data = Bytes::from_str(s).unwrap(); + let block: Block = Block::decode(&mut data.as_ref()).unwrap(); + let sealed = block.seal_slow(); + let payload = + ExecutionPayloadV1::from_block_unchecked(sealed.hash(), &sealed.clone().into_block()); + + let mut test_harness = + TestHarness::new(HOLESKY.clone()).with_backfill_state(BackfillSyncState::Active); + + let (tx, rx) = oneshot::channel(); + test_harness + .tree + .on_engine_message(FromEngine::Request( + BeaconEngineMessage::NewPayload { + payload: ExecutionData { + payload: payload.clone().into(), + sidecar: ExecutionPayloadSidecar::none(), + }, + tx, + } + .into(), + )) + .unwrap(); + + let resp = rx.await.unwrap().unwrap(); + assert!(resp.is_syncing()); +} + +#[tokio::test] +async fn test_tree_state_on_new_head_reorg() { + reth_tracing::init_test_tracing(); + let chain_spec = MAINNET.clone(); + + // Set persistence_threshold to 1 + let mut test_harness = TestHarness::new(chain_spec); + test_harness.tree.config = + test_harness.tree.config.with_persistence_threshold(1).with_memory_block_buffer_target(1); + let mut test_block_builder = TestBlockBuilder::eth(); + let blocks: Vec<_> = test_block_builder.get_executed_blocks(1..6).collect(); + + for block in &blocks { + test_harness.tree.state.tree_state.insert_executed(block.clone()); + } + + // set block 3 as the current canonical head + test_harness.tree.state.tree_state.set_canonical_head(blocks[2].recovered_block().num_hash()); + + // create a fork from block 2 + let fork_block_3 = + test_block_builder.get_executed_block_with_number(3, blocks[1].recovered_block().hash()); + let fork_block_4 = + test_block_builder.get_executed_block_with_number(4, fork_block_3.recovered_block().hash()); + let fork_block_5 = + test_block_builder.get_executed_block_with_number(5, fork_block_4.recovered_block().hash()); + + test_harness.tree.state.tree_state.insert_executed(fork_block_3.clone()); + test_harness.tree.state.tree_state.insert_executed(fork_block_4.clone()); + test_harness.tree.state.tree_state.insert_executed(fork_block_5.clone()); + + // normal (non-reorg) case + let result = test_harness.tree.on_new_head(blocks[4].recovered_block().hash()).unwrap(); + assert!(matches!(result, Some(NewCanonicalChain::Commit { .. }))); + if let Some(NewCanonicalChain::Commit { new }) = result { + assert_eq!(new.len(), 2); + assert_eq!(new[0].recovered_block().hash(), blocks[3].recovered_block().hash()); + assert_eq!(new[1].recovered_block().hash(), blocks[4].recovered_block().hash()); + } + + // should be a None persistence action before we advance persistence + let current_action = test_harness.tree.persistence_state.current_action(); + assert_eq!(current_action, None); + + // let's attempt to persist and check that it attempts to save blocks + // + // since in-memory block buffer target and persistence_threshold are both 1, this should + // save all but the current tip of the canonical chain (up to blocks[1]) + test_harness.tree.advance_persistence().unwrap(); + let current_action = test_harness.tree.persistence_state.current_action().cloned(); + assert_eq!( + current_action, + Some(CurrentPersistenceAction::SavingBlocks { + highest: blocks[1].recovered_block().num_hash() + }) + ); + + // get rid of the prev action + let received_action = test_harness.action_rx.recv().unwrap(); + let PersistenceAction::SaveBlocks(saved_blocks, sender) = received_action else { + panic!("received wrong action"); + }; + assert_eq!(saved_blocks, vec![blocks[0].clone(), blocks[1].clone()]); + + // send the response so we can advance again + sender.send(Some(blocks[1].recovered_block().num_hash())).unwrap(); + + // we should be persisting blocks[1] because we threw out the prev action + let current_action = test_harness.tree.persistence_state.current_action().cloned(); + assert_eq!( + current_action, + Some(CurrentPersistenceAction::SavingBlocks { + highest: blocks[1].recovered_block().num_hash() + }) + ); + + // after advancing persistence, we should be at `None` for the next action + test_harness.tree.advance_persistence().unwrap(); + let current_action = test_harness.tree.persistence_state.current_action().cloned(); + assert_eq!(current_action, None); + + // reorg case + let result = test_harness.tree.on_new_head(fork_block_5.recovered_block().hash()).unwrap(); + assert!(matches!(result, Some(NewCanonicalChain::Reorg { .. }))); + + if let Some(NewCanonicalChain::Reorg { new, old }) = result { + assert_eq!(new.len(), 3); + assert_eq!(new[0].recovered_block().hash(), fork_block_3.recovered_block().hash()); + assert_eq!(new[1].recovered_block().hash(), fork_block_4.recovered_block().hash()); + assert_eq!(new[2].recovered_block().hash(), fork_block_5.recovered_block().hash()); + + assert_eq!(old.len(), 1); + assert_eq!(old[0].recovered_block().hash(), blocks[2].recovered_block().hash()); + } + + // The canonical block has not changed, so we will not get any active persistence action + test_harness.tree.advance_persistence().unwrap(); + let current_action = test_harness.tree.persistence_state.current_action().cloned(); + assert_eq!(current_action, None); + + // Let's change the canonical head and advance persistence + test_harness + .tree + .state + .tree_state + .set_canonical_head(fork_block_5.recovered_block().num_hash()); + + // The canonical block has changed now, we should get fork_block_4 due to the persistence + // threshold and in memory block buffer target + test_harness.tree.advance_persistence().unwrap(); + let current_action = test_harness.tree.persistence_state.current_action().cloned(); + assert_eq!( + current_action, + Some(CurrentPersistenceAction::SavingBlocks { + highest: fork_block_4.recovered_block().num_hash() + }) + ); +} + +#[test] +fn test_tree_state_on_new_head_deep_fork() { + reth_tracing::init_test_tracing(); + + let chain_spec = MAINNET.clone(); + let mut test_harness = TestHarness::new(chain_spec); + let mut test_block_builder = TestBlockBuilder::eth(); + + let blocks: Vec<_> = test_block_builder.get_executed_blocks(0..5).collect(); + + for block in &blocks { + test_harness.tree.state.tree_state.insert_executed(block.clone()); + } + + // set last block as the current canonical head + let last_block = blocks.last().unwrap().recovered_block().clone(); + + test_harness.tree.state.tree_state.set_canonical_head(last_block.num_hash()); + + // create a fork chain from last_block + let chain_a = test_block_builder.create_fork(&last_block, 10); + let chain_b = test_block_builder.create_fork(&last_block, 10); + + for block in &chain_a { + test_harness.tree.state.tree_state.insert_executed(ExecutedBlockWithTrieUpdates { + block: ExecutedBlock { + recovered_block: Arc::new(block.clone()), + execution_output: Arc::new(ExecutionOutcome::default()), + hashed_state: Arc::new(HashedPostState::default()), + }, + trie: ExecutedTrieUpdates::empty(), + }); + } + test_harness.tree.state.tree_state.set_canonical_head(chain_a.last().unwrap().num_hash()); + + for block in &chain_b { + test_harness.tree.state.tree_state.insert_executed(ExecutedBlockWithTrieUpdates { + block: ExecutedBlock { + recovered_block: Arc::new(block.clone()), + execution_output: Arc::new(ExecutionOutcome::default()), + hashed_state: Arc::new(HashedPostState::default()), + }, + trie: ExecutedTrieUpdates::empty(), + }); + } + + // for each block in chain_b, reorg to it and then back to canonical + let mut expected_new = Vec::new(); + for block in &chain_b { + // reorg to chain from block b + let result = test_harness.tree.on_new_head(block.hash()).unwrap(); + assert_matches!(result, Some(NewCanonicalChain::Reorg { .. })); + + expected_new.push(block); + if let Some(NewCanonicalChain::Reorg { new, old }) = result { + assert_eq!(new.len(), expected_new.len()); + for (index, block) in expected_new.iter().enumerate() { + assert_eq!(new[index].recovered_block().hash(), block.hash()); + } + + assert_eq!(old.len(), chain_a.len()); + for (index, block) in chain_a.iter().enumerate() { + assert_eq!(old[index].recovered_block().hash(), block.hash()); + } + } + + // set last block of chain a as canonical head + test_harness.tree.on_new_head(chain_a.last().unwrap().hash()).unwrap(); + } +} + +#[tokio::test] +async fn test_get_canonical_blocks_to_persist() { + let chain_spec = MAINNET.clone(); + let mut test_harness = TestHarness::new(chain_spec); + let mut test_block_builder = TestBlockBuilder::eth(); + + let canonical_head_number = 9; + let blocks: Vec<_> = + test_block_builder.get_executed_blocks(0..canonical_head_number + 1).collect(); + test_harness = test_harness.with_blocks(blocks.clone()); + + let last_persisted_block_number = 3; + test_harness.tree.persistence_state.last_persisted_block = + blocks[last_persisted_block_number as usize].recovered_block.num_hash(); + + let persistence_threshold = 4; + let memory_block_buffer_target = 3; + test_harness.tree.config = TreeConfig::default() + .with_persistence_threshold(persistence_threshold) + .with_memory_block_buffer_target(memory_block_buffer_target); + + let blocks_to_persist = test_harness.tree.get_canonical_blocks_to_persist().unwrap(); + + let expected_blocks_to_persist_length: usize = + (canonical_head_number - memory_block_buffer_target - last_persisted_block_number) + .try_into() + .unwrap(); + + assert_eq!(blocks_to_persist.len(), expected_blocks_to_persist_length); + for (i, item) in blocks_to_persist.iter().enumerate().take(expected_blocks_to_persist_length) { + assert_eq!(item.recovered_block().number, last_persisted_block_number + i as u64 + 1); + } + + // make sure only canonical blocks are included + let fork_block = test_block_builder.get_executed_block_with_number(4, B256::random()); + let fork_block_hash = fork_block.recovered_block().hash(); + test_harness.tree.state.tree_state.insert_executed(fork_block); + + assert!(test_harness.tree.state.tree_state.block_by_hash(fork_block_hash).is_some()); + + let blocks_to_persist = test_harness.tree.get_canonical_blocks_to_persist().unwrap(); + assert_eq!(blocks_to_persist.len(), expected_blocks_to_persist_length); + + // check that the fork block is not included in the blocks to persist + assert!(!blocks_to_persist.iter().any(|b| b.recovered_block().hash() == fork_block_hash)); + + // check that the original block 4 is still included + assert!(blocks_to_persist.iter().any(|b| b.recovered_block().number == 4 && + b.recovered_block().hash() == blocks[4].recovered_block().hash())); + + // check that if we advance persistence, the persistence action is the correct value + test_harness.tree.advance_persistence().expect("advancing persistence should succeed"); + assert_eq!( + test_harness.tree.persistence_state.current_action().cloned(), + Some(CurrentPersistenceAction::SavingBlocks { + highest: blocks_to_persist.last().unwrap().recovered_block().num_hash() + }) + ); +} + +#[tokio::test] +async fn test_engine_tree_fcu_missing_head() { + let chain_spec = MAINNET.clone(); + let mut test_harness = TestHarness::new(chain_spec.clone()); + + let mut test_block_builder = TestBlockBuilder::eth().with_chain_spec((*chain_spec).clone()); + + let blocks: Vec<_> = test_block_builder.get_executed_blocks(0..5).collect(); + test_harness = test_harness.with_blocks(blocks); + + let missing_block = test_block_builder + .generate_random_block(6, test_harness.blocks.last().unwrap().recovered_block().hash()); + + test_harness.fcu_to(missing_block.hash(), PayloadStatusEnum::Syncing).await; + + // after FCU we receive an EngineApiEvent::Download event to get the missing block. + let event = test_harness.from_tree_rx.recv().await.unwrap(); + match event { + EngineApiEvent::Download(DownloadRequest::BlockSet(actual_block_set)) => { + let expected_block_set = HashSet::from_iter([missing_block.hash()]); + assert_eq!(actual_block_set, expected_block_set); + } + _ => panic!("Unexpected event: {event:#?}"), + } +} + +#[tokio::test] +async fn test_engine_tree_live_sync_transition_required_blocks_requested() { + reth_tracing::init_test_tracing(); + + let chain_spec = MAINNET.clone(); + let mut test_harness = TestHarness::new(chain_spec.clone()); + + let base_chain: Vec<_> = test_harness.block_builder.get_executed_blocks(0..1).collect(); + test_harness = test_harness.with_blocks(base_chain.clone()); + + test_harness + .fcu_to(base_chain.last().unwrap().recovered_block().hash(), ForkchoiceStatus::Valid) + .await; + + // extend main chain with enough blocks to trigger pipeline run but don't insert them + let main_chain = test_harness + .block_builder + .create_fork(base_chain[0].recovered_block(), MIN_BLOCKS_FOR_PIPELINE_RUN + 10); + + let main_chain_last_hash = main_chain.last().unwrap().hash(); + test_harness.send_fcu(main_chain_last_hash, ForkchoiceStatus::Syncing).await; + + test_harness.check_fcu(main_chain_last_hash, ForkchoiceStatus::Syncing).await; + + // create event for backfill finished + let backfill_finished_block_number = MIN_BLOCKS_FOR_PIPELINE_RUN + 1; + let backfill_finished = FromOrchestrator::BackfillSyncFinished(ControlFlow::Continue { + block_number: backfill_finished_block_number, + }); + + let backfill_tip_block = main_chain[(backfill_finished_block_number - 1) as usize].clone(); + // add block to mock provider to enable persistence clean up. + test_harness.provider.add_block(backfill_tip_block.hash(), backfill_tip_block.into_block()); + test_harness.tree.on_engine_message(FromEngine::Event(backfill_finished)).unwrap(); + + let event = test_harness.from_tree_rx.recv().await.unwrap(); + match event { + EngineApiEvent::Download(DownloadRequest::BlockSet(hash_set)) => { + assert_eq!(hash_set, HashSet::from_iter([main_chain_last_hash])); + } + _ => panic!("Unexpected event: {event:#?}"), + } + + test_harness + .tree + .on_engine_message(FromEngine::DownloadedBlocks(vec![main_chain.last().unwrap().clone()])) + .unwrap(); + + let event = test_harness.from_tree_rx.recv().await.unwrap(); + match event { + EngineApiEvent::Download(DownloadRequest::BlockRange(initial_hash, total_blocks)) => { + assert_eq!( + total_blocks, + (main_chain.len() - backfill_finished_block_number as usize - 1) as u64 + ); + assert_eq!(initial_hash, main_chain.last().unwrap().parent_hash); + } + _ => panic!("Unexpected event: {event:#?}"), + } +} + +#[tokio::test] +async fn test_engine_tree_live_sync_transition_eventually_canonical() { + reth_tracing::init_test_tracing(); + + let chain_spec = MAINNET.clone(); + let mut test_harness = TestHarness::new(chain_spec.clone()); + test_harness.tree.config = test_harness.tree.config.with_max_execute_block_batch_size(100); + + // create base chain and setup test harness with it + let base_chain: Vec<_> = test_harness.block_builder.get_executed_blocks(0..1).collect(); + test_harness = test_harness.with_blocks(base_chain.clone()); + + // fcu to the tip of base chain + test_harness + .fcu_to(base_chain.last().unwrap().recovered_block().hash(), ForkchoiceStatus::Valid) + .await; + + // create main chain, extension of base chain, with enough blocks to + // trigger backfill sync + let main_chain = test_harness + .block_builder + .create_fork(base_chain[0].recovered_block(), MIN_BLOCKS_FOR_PIPELINE_RUN + 10); + + let main_chain_last = main_chain.last().unwrap(); + let main_chain_last_hash = main_chain_last.hash(); + let main_chain_backfill_target = main_chain.get(MIN_BLOCKS_FOR_PIPELINE_RUN as usize).unwrap(); + let main_chain_backfill_target_hash = main_chain_backfill_target.hash(); + + // fcu to the element of main chain that should trigger backfill sync + test_harness.send_fcu(main_chain_backfill_target_hash, ForkchoiceStatus::Syncing).await; + test_harness.check_fcu(main_chain_backfill_target_hash, ForkchoiceStatus::Syncing).await; + + // check download request for target + let event = test_harness.from_tree_rx.recv().await.unwrap(); + match event { + EngineApiEvent::Download(DownloadRequest::BlockSet(hash_set)) => { + assert_eq!(hash_set, HashSet::from_iter([main_chain_backfill_target_hash])); + } + _ => panic!("Unexpected event: {event:#?}"), + } + + // send message to tell the engine the requested block was downloaded + test_harness + .tree + .on_engine_message(FromEngine::DownloadedBlocks(vec![main_chain_backfill_target.clone()])) + .unwrap(); + + // check that backfill is triggered + let event = test_harness.from_tree_rx.recv().await.unwrap(); + match event { + EngineApiEvent::BackfillAction(BackfillAction::Start( + reth_stages::PipelineTarget::Sync(target_hash), + )) => { + assert_eq!(target_hash, main_chain_backfill_target_hash); + } + _ => panic!("Unexpected event: {event:#?}"), + } + + // persist blocks of main chain, same as the backfill operation would do + let backfilled_chain: Vec<_> = + main_chain.clone().drain(0..(MIN_BLOCKS_FOR_PIPELINE_RUN + 1) as usize).collect(); + test_harness.persist_blocks(backfilled_chain.clone()); + + test_harness.setup_range_insertion_for_valid_chain(backfilled_chain); + + // send message to mark backfill finished + test_harness + .tree + .on_engine_message(FromEngine::Event(FromOrchestrator::BackfillSyncFinished( + ControlFlow::Continue { block_number: main_chain_backfill_target.number }, + ))) + .unwrap(); + + // send fcu to the tip of main + test_harness.fcu_to(main_chain_last_hash, ForkchoiceStatus::Syncing).await; + + let event = test_harness.from_tree_rx.recv().await.unwrap(); + match event { + EngineApiEvent::Download(DownloadRequest::BlockSet(target_hash)) => { + assert_eq!(target_hash, HashSet::from_iter([main_chain_last_hash])); + } + _ => panic!("Unexpected event: {event:#?}"), + } + + // tell engine main chain tip downloaded + test_harness + .tree + .on_engine_message(FromEngine::DownloadedBlocks(vec![main_chain_last.clone()])) + .unwrap(); + + // check download range request + let event = test_harness.from_tree_rx.recv().await.unwrap(); + match event { + EngineApiEvent::Download(DownloadRequest::BlockRange(initial_hash, total_blocks)) => { + assert_eq!( + total_blocks, + (main_chain.len() - MIN_BLOCKS_FOR_PIPELINE_RUN as usize - 2) as u64 + ); + assert_eq!(initial_hash, main_chain_last.parent_hash); + } + _ => panic!("Unexpected event: {event:#?}"), + } + + let remaining: Vec<_> = main_chain + .clone() + .drain((MIN_BLOCKS_FOR_PIPELINE_RUN + 1) as usize..main_chain.len()) + .collect(); + + test_harness.setup_range_insertion_for_valid_chain(remaining.clone()); + + // tell engine block range downloaded + test_harness.tree.on_engine_message(FromEngine::DownloadedBlocks(remaining.clone())).unwrap(); + + test_harness.check_canon_chain_insertion(remaining).await; + + // check canonical chain committed event with the hash of the latest block + test_harness.check_canon_commit(main_chain_last_hash).await; + + // new head is the tip of the main chain + test_harness.check_canon_head(main_chain_last_hash); +} + +#[tokio::test] +async fn test_engine_tree_live_sync_fcu_extends_canon_chain() { + reth_tracing::init_test_tracing(); + + let chain_spec = MAINNET.clone(); + let mut test_harness = TestHarness::new(chain_spec.clone()); + + // create base chain and setup test harness with it + let base_chain: Vec<_> = test_harness.block_builder.get_executed_blocks(0..1).collect(); + test_harness = test_harness.with_blocks(base_chain.clone()); + + // fcu to the tip of base chain + test_harness + .fcu_to(base_chain.last().unwrap().recovered_block().hash(), ForkchoiceStatus::Valid) + .await; + + // create main chain, extension of base chain + let main_chain = test_harness.block_builder.create_fork(base_chain[0].recovered_block(), 10); + // determine target in the middle of main hain + let target = main_chain.get(5).unwrap(); + let target_hash = target.hash(); + let main_last = main_chain.last().unwrap(); + let main_last_hash = main_last.hash(); + + // insert main chain + test_harness.insert_chain(main_chain).await; + + // send fcu to target + test_harness.send_fcu(target_hash, ForkchoiceStatus::Valid).await; + + test_harness.check_canon_commit(target_hash).await; + test_harness.check_fcu(target_hash, ForkchoiceStatus::Valid).await; + + // send fcu to main tip + test_harness.send_fcu(main_last_hash, ForkchoiceStatus::Valid).await; + + test_harness.check_canon_commit(main_last_hash).await; + test_harness.check_fcu(main_last_hash, ForkchoiceStatus::Valid).await; + test_harness.check_canon_head(main_last_hash); +} + +#[tokio::test] +async fn test_engine_tree_buffered_blocks_are_eventually_connected() { + let chain_spec = MAINNET.clone(); + let mut test_harness = TestHarness::new(chain_spec.clone()); + + let base_chain: Vec<_> = test_harness.block_builder.get_executed_blocks(0..1).collect(); + test_harness = test_harness.with_blocks(base_chain.clone()); + + // side chain consisting of two blocks, the last will be inserted first + // so that we force it to be buffered + let side_chain = + test_harness.block_builder.create_fork(base_chain.last().unwrap().recovered_block(), 2); + + // buffer last block of side chain + let buffered_block = side_chain.last().unwrap(); + let buffered_block_hash = buffered_block.hash(); + + test_harness.setup_range_insertion_for_valid_chain(vec![buffered_block.clone()]); + test_harness.send_new_payload(buffered_block.clone()).await; + + assert!(test_harness.tree.state.buffer.block(&buffered_block_hash).is_some()); + + let non_buffered_block = side_chain.first().unwrap(); + let non_buffered_block_hash = non_buffered_block.hash(); + + // insert block that continues the canon chain, should not be buffered + test_harness.setup_range_insertion_for_valid_chain(vec![non_buffered_block.clone()]); + test_harness.send_new_payload(non_buffered_block.clone()).await; + assert!(test_harness.tree.state.buffer.block(&non_buffered_block_hash).is_none()); + + // the previously buffered block should be connected now + assert!(test_harness.tree.state.buffer.block(&buffered_block_hash).is_none()); + + // both blocks are added to the canon chain in order + // note that the buffered block is received first, but added last + test_harness.check_block_received(buffered_block_hash).await; + test_harness.check_block_received(non_buffered_block_hash).await; + test_harness.check_canon_block_added(non_buffered_block_hash).await; + test_harness.check_canon_block_added(buffered_block_hash).await; +} + +#[tokio::test] +async fn test_engine_tree_reorg_with_missing_ancestor_expecting_valid() { + reth_tracing::init_test_tracing(); + let chain_spec = MAINNET.clone(); + let mut test_harness = TestHarness::new(chain_spec.clone()); + + let base_chain: Vec<_> = test_harness.block_builder.get_executed_blocks(0..6).collect(); + test_harness = test_harness.with_blocks(base_chain.clone()); + + // create a side chain with an invalid block + let side_chain = + test_harness.block_builder.create_fork(base_chain.last().unwrap().recovered_block(), 15); + let invalid_index = 9; + + test_harness.setup_range_insertion_for_invalid_chain(side_chain.clone(), invalid_index); + + for (index, block) in side_chain.iter().enumerate() { + test_harness.send_new_payload(block.clone()).await; + + if index < side_chain.len() - invalid_index - 1 { + test_harness.send_fcu(block.hash(), ForkchoiceStatus::Valid).await; + } + } + + // Try to do a forkchoice update to a block after the invalid one + let fork_tip_hash = side_chain.last().unwrap().hash(); + test_harness.send_fcu(fork_tip_hash, ForkchoiceStatus::Invalid).await; +} diff --git a/crates/era-downloader/src/client.rs b/crates/era-downloader/src/client.rs index 1d67fc39c2b..752523c262f 100644 --- a/crates/era-downloader/src/client.rs +++ b/crates/era-downloader/src/client.rs @@ -1,4 +1,5 @@ -use alloy_primitives::{hex, hex::ToHexExt}; +use crate::BLOCKS_PER_FILE; +use alloy_primitives::{hex, hex::ToHexExt, BlockNumber}; use bytes::Bytes; use eyre::{eyre, OptionExt}; use futures_util::{stream::StreamExt, Stream, TryStreamExt}; @@ -41,6 +42,7 @@ pub struct EraClient { client: Http, url: Url, folder: Box, + start_from: Option, } impl EraClient { @@ -48,7 +50,15 @@ impl EraClient { /// Constructs [`EraClient`] using `client` to download from `url` into `folder`. pub const fn new(client: Http, url: Url, folder: Box) -> Self { - Self { client, url, folder } + Self { client, url, folder, start_from: None } + } + + /// Overrides the starting ERA file based on `block_number`. + /// + /// The normal behavior is that the index is recovered based on files contained in the `folder`. + pub const fn start_from(mut self, block_number: BlockNumber) -> Self { + self.start_from.replace(block_number / BLOCKS_PER_FILE); + self } /// Performs a GET request on `url` and stores the response body into a file located within @@ -67,16 +77,30 @@ impl EraClient { let number = self.file_name_to_number(file_name).ok_or_eyre("Cannot parse number from file name")?; - let mut stream = client.get(url).await?; - let mut file = File::create(&path).await?; - let mut hasher = Sha256::new(); - while let Some(item) = stream.next().await.transpose()? { - io::copy(&mut item.as_ref(), &mut file).await?; - hasher.update(item); + let mut tries = 1..3; + let mut actual_checksum: eyre::Result<_>; + loop { + actual_checksum = async { + let mut file = File::create(&path).await?; + let mut stream = client.get(url.clone()).await?; + let mut hasher = Sha256::new(); + + while let Some(item) = stream.next().await.transpose()? { + io::copy(&mut item.as_ref(), &mut file).await?; + hasher.update(item); + } + + Ok(hasher.finalize().to_vec()) + } + .await; + + if actual_checksum.is_ok() || tries.next().is_none() { + break; + } } - let actual_checksum = hasher.finalize().to_vec(); + let actual_checksum = actual_checksum?; let file = File::open(self.folder.join(Self::CHECKSUMS)).await?; let reader = io::BufReader::new(file); @@ -102,6 +126,10 @@ impl EraClient { /// Recovers index of file following the latest downloaded file from a different run. pub async fn recover_index(&self) -> u64 { + if let Some(block_number) = self.start_from { + return block_number; + } + let mut max = None; if let Ok(mut dir) = fs::read_dir(&self.folder).await { @@ -142,7 +170,7 @@ impl EraClient { /// Fetches the list of ERA1 files from `url` and stores it in a file located within `folder`. pub async fn fetch_file_list(&self) -> eyre::Result<()> { let (mut index, mut checksums) = try_join!( - self.client.get(self.url.clone().join("index.html")?), + self.client.get(self.url.clone()), self.client.get(self.url.clone().join(Self::CHECKSUMS)?), )?; diff --git a/crates/era-downloader/src/fs.rs b/crates/era-downloader/src/fs.rs index 076c2f40f8d..2fe40e86e7d 100644 --- a/crates/era-downloader/src/fs.rs +++ b/crates/era-downloader/src/fs.rs @@ -1,5 +1,5 @@ -use crate::EraMeta; -use alloy_primitives::{hex, hex::ToHexExt}; +use crate::{EraMeta, BLOCKS_PER_FILE}; +use alloy_primitives::{hex, hex::ToHexExt, BlockNumber}; use eyre::{eyre, OptionExt}; use futures_util::{stream, Stream}; use reth_fs_util as fs; @@ -9,6 +9,7 @@ use std::{fmt::Debug, io, io::BufRead, path::Path, str::FromStr}; /// Creates a new ordered asynchronous [`Stream`] of ERA1 files read from `dir`. pub fn read_dir( dir: impl AsRef + Send + Sync + 'static, + start_from: BlockNumber, ) -> eyre::Result> + Send + Sync + 'static + Unpin> { let mut checksums = None; let mut entries = fs::read_dir(dir)? @@ -44,27 +45,29 @@ pub fn read_dir( entries.sort_by(|(left, _), (right, _)| left.cmp(right)); - Ok(stream::iter(entries.into_iter().map(move |(_, path)| { - let expected_checksum = - checksums.next().transpose()?.ok_or_eyre("Got less checksums than ERA files")?; - let expected_checksum = hex::decode(expected_checksum)?; - - let mut hasher = Sha256::new(); - let mut reader = io::BufReader::new(fs::open(&path)?); - - io::copy(&mut reader, &mut hasher)?; - let actual_checksum = hasher.finalize().to_vec(); - - if actual_checksum != expected_checksum { - return Err(eyre!( - "Checksum mismatch, got: {}, expected: {}", - actual_checksum.encode_hex(), - expected_checksum.encode_hex() - )); - } - - Ok(EraLocalMeta::new(path)) - }))) + Ok(stream::iter(entries.into_iter().skip((start_from / BLOCKS_PER_FILE) as usize).map( + move |(_, path)| { + let expected_checksum = + checksums.next().transpose()?.ok_or_eyre("Got less checksums than ERA files")?; + let expected_checksum = hex::decode(expected_checksum)?; + + let mut hasher = Sha256::new(); + let mut reader = io::BufReader::new(fs::open(&path)?); + + io::copy(&mut reader, &mut hasher)?; + let actual_checksum = hasher.finalize().to_vec(); + + if actual_checksum != expected_checksum { + return Err(eyre!( + "Checksum mismatch, got: {}, expected: {}", + actual_checksum.encode_hex(), + expected_checksum.encode_hex() + )); + } + + Ok(EraLocalMeta::new(path)) + }, + ))) } /// Contains information about an ERA file that is on the local file-system and is read-only. @@ -93,7 +96,11 @@ impl AsRef for EraLocalMeta { impl EraMeta for EraLocalMeta { /// A no-op. - fn mark_as_processed(self) -> eyre::Result<()> { + fn mark_as_processed(&self) -> eyre::Result<()> { Ok(()) } + + fn path(&self) -> &Path { + &self.path + } } diff --git a/crates/era-downloader/src/lib.rs b/crates/era-downloader/src/lib.rs index 6aaec5ba0a1..01147e41e1c 100644 --- a/crates/era-downloader/src/lib.rs +++ b/crates/era-downloader/src/lib.rs @@ -41,3 +41,5 @@ mod stream; pub use client::{EraClient, HttpClient}; pub use fs::read_dir; pub use stream::{EraMeta, EraStream, EraStreamConfig}; + +pub(crate) const BLOCKS_PER_FILE: u64 = 8192; diff --git a/crates/era-downloader/src/stream.rs b/crates/era-downloader/src/stream.rs index 51278aa3b61..336085a2682 100644 --- a/crates/era-downloader/src/stream.rs +++ b/crates/era-downloader/src/stream.rs @@ -1,4 +1,5 @@ use crate::{client::HttpClient, EraClient}; +use alloy_primitives::BlockNumber; use futures_util::{stream::FuturesOrdered, FutureExt, Stream, StreamExt}; use reqwest::Url; use reth_fs_util as fs; @@ -23,11 +24,12 @@ use std::{ pub struct EraStreamConfig { max_files: usize, max_concurrent_downloads: usize, + start_from: Option, } impl Default for EraStreamConfig { fn default() -> Self { - Self { max_files: 5, max_concurrent_downloads: 3 } + Self { max_files: 5, max_concurrent_downloads: 3, start_from: None } } } @@ -43,6 +45,15 @@ impl EraStreamConfig { self.max_concurrent_downloads = max_concurrent_downloads; self } + + /// Overrides the starting ERA file index to be the first one that contains `block_number`. + /// + /// The normal behavior is that the ERA file index is recovered from the last file inside the + /// download folder. + pub const fn start_from(mut self, block_number: BlockNumber) -> Self { + self.start_from.replace(block_number); + self + } } /// An asynchronous stream of ERA1 files. @@ -50,12 +61,13 @@ impl EraStreamConfig { /// # Examples /// ``` /// use futures_util::StreamExt; -/// use reth_era_downloader::{EraStream, HttpClient}; +/// use reth_era_downloader::{EraMeta, EraStream, HttpClient}; /// /// # async fn import(mut stream: EraStream) -> eyre::Result<()> { -/// while let Some(file) = stream.next().await { -/// let file = file?; -/// // Process `file: Box` +/// while let Some(meta) = stream.next().await { +/// let meta = meta?; +/// // Process file at `meta.path(): &Path` +/// meta.mark_as_processed()?; /// } /// # Ok(()) /// # } @@ -93,13 +105,28 @@ impl EraStream { } /// Contains information about an ERA file. -pub trait EraMeta: AsRef { +pub trait EraMeta: Debug { /// Marking this particular ERA file as "processed" lets the caller hint that it is no longer /// going to be using it. /// /// The meaning of that is up to the implementation. The caller should assume that after this /// point is no longer possible to safely read it. - fn mark_as_processed(self) -> eyre::Result<()>; + fn mark_as_processed(&self) -> eyre::Result<()>; + + /// A path to the era file. + /// + /// File should be openable and treated as read-only. + fn path(&self) -> &Path; +} + +impl EraMeta for Box { + fn mark_as_processed(&self) -> eyre::Result<()> { + T::mark_as_processed(self) + } + + fn path(&self) -> &Path { + T::path(self) + } } /// Contains information about ERA file that is hosted remotely and represented by a temporary @@ -123,8 +150,12 @@ impl AsRef for EraRemoteMeta { impl EraMeta for EraRemoteMeta { /// Removes a temporary local file representation of the remotely hosted original. - fn mark_as_processed(self) -> eyre::Result<()> { - Ok(fs::remove_file(self.path)?) + fn mark_as_processed(&self) -> eyre::Result<()> { + Ok(fs::remove_file(&self.path)?) + } + + fn path(&self) -> &Path { + &self.path } } diff --git a/crates/era-downloader/tests/it/checksums.rs b/crates/era-downloader/tests/it/checksums.rs index 511fbc6b65e..70a78345dbd 100644 --- a/crates/era-downloader/tests/it/checksums.rs +++ b/crates/era-downloader/tests/it/checksums.rs @@ -9,7 +9,7 @@ use test_case::test_case; #[test_case("https://mainnet.era1.nimbus.team/"; "nimbus")] #[test_case("https://era1.ethportal.net/"; "ethportal")] -#[test_case("https://era.ithaca.xyz/era1/"; "ithaca")] +#[test_case("https://era.ithaca.xyz/era1/index.html"; "ithaca")] #[tokio::test] async fn test_invalid_checksum_returns_error(url: &str) { let base_url = Url::from_str(url).unwrap(); @@ -57,13 +57,13 @@ impl HttpClient for FailingClient { async move { match url.to_string().as_str() { - "https://mainnet.era1.nimbus.team/index.html" => { + "https://mainnet.era1.nimbus.team/" => { Ok(Box::new(futures::stream::once(Box::pin(async move { Ok(bytes::Bytes::from(crate::NIMBUS)) }))) as Box> + Send + Sync + Unpin>) } - "https://era1.ethportal.net/index.html" => { + "https://era1.ethportal.net/" => { Ok(Box::new(futures::stream::once(Box::pin(async move { Ok(bytes::Bytes::from(crate::ETH_PORTAL)) }))) diff --git a/crates/era-downloader/tests/it/download.rs b/crates/era-downloader/tests/it/download.rs index dba658b4eb1..5502874fc1f 100644 --- a/crates/era-downloader/tests/it/download.rs +++ b/crates/era-downloader/tests/it/download.rs @@ -8,17 +8,17 @@ use test_case::test_case; #[test_case("https://mainnet.era1.nimbus.team/"; "nimbus")] #[test_case("https://era1.ethportal.net/"; "ethportal")] -#[test_case("https://era.ithaca.xyz/era1/"; "ithaca")] +#[test_case("https://era.ithaca.xyz/era1/index.html"; "ithaca")] #[tokio::test] async fn test_getting_file_url_after_fetching_file_list(url: &str) { let base_url = Url::from_str(url).unwrap(); let folder = tempdir().unwrap(); let folder = folder.path().to_owned().into_boxed_path(); - let client = EraClient::new(StubClient, base_url, folder); + let client = EraClient::new(StubClient, base_url.clone(), folder); client.fetch_file_list().await.unwrap(); - let expected_url = Some(Url::from_str(&format!("{url}mainnet-00000-5ec1ffb8.era1")).unwrap()); + let expected_url = Some(base_url.join("mainnet-00000-5ec1ffb8.era1").unwrap()); let actual_url = client.url(0).await.unwrap(); assert_eq!(actual_url, expected_url); @@ -26,7 +26,7 @@ async fn test_getting_file_url_after_fetching_file_list(url: &str) { #[test_case("https://mainnet.era1.nimbus.team/"; "nimbus")] #[test_case("https://era1.ethportal.net/"; "ethportal")] -#[test_case("https://era.ithaca.xyz/era1/"; "ithaca")] +#[test_case("https://era.ithaca.xyz/era1/index.html"; "ithaca")] #[tokio::test] async fn test_getting_file_after_fetching_file_list(url: &str) { let base_url = Url::from_str(url).unwrap(); diff --git a/crates/era-downloader/tests/it/fs.rs b/crates/era-downloader/tests/it/fs.rs index 5ad7ba28007..00a36124745 100644 --- a/crates/era-downloader/tests/it/fs.rs +++ b/crates/era-downloader/tests/it/fs.rs @@ -70,7 +70,7 @@ async fn test_streaming_from_local_directory( fs::write(folder.join("mainnet-00001-a5364e9a.era1"), CONTENTS_1).await.unwrap(); let folder = folder.into_boxed_path(); - let actual = read_dir(folder.clone()); + let actual = read_dir(folder.clone(), 0); match checksums { Ok(_) => match actual { diff --git a/crates/era-downloader/tests/it/list.rs b/crates/era-downloader/tests/it/list.rs index cfb0e3b8b42..adc0df7e1cb 100644 --- a/crates/era-downloader/tests/it/list.rs +++ b/crates/era-downloader/tests/it/list.rs @@ -8,7 +8,7 @@ use test_case::test_case; #[test_case("https://mainnet.era1.nimbus.team/"; "nimbus")] #[test_case("https://era1.ethportal.net/"; "ethportal")] -#[test_case("https://era.ithaca.xyz/era1/"; "ithaca")] +#[test_case("https://era.ithaca.xyz/era1/index.html"; "ithaca")] #[tokio::test] async fn test_getting_file_name_after_fetching_file_list(url: &str) { let url = Url::from_str(url).unwrap(); diff --git a/crates/era-downloader/tests/it/main.rs b/crates/era-downloader/tests/it/main.rs index f40e5ddb30a..26ba4e6143e 100644 --- a/crates/era-downloader/tests/it/main.rs +++ b/crates/era-downloader/tests/it/main.rs @@ -38,13 +38,13 @@ impl HttpClient for StubClient { async move { match url.to_string().as_str() { - "https://mainnet.era1.nimbus.team/index.html" => { + "https://mainnet.era1.nimbus.team/" => { Ok(Box::new(futures::stream::once(Box::pin(async move { Ok(bytes::Bytes::from(NIMBUS)) }))) as Box> + Send + Sync + Unpin>) } - "https://era1.ethportal.net/index.html" => { + "https://era1.ethportal.net/" => { Ok(Box::new(futures::stream::once(Box::pin(async move { Ok(bytes::Bytes::from(ETH_PORTAL)) }))) diff --git a/crates/era-downloader/tests/it/stream.rs b/crates/era-downloader/tests/it/stream.rs index 24fb43b7250..5c7b812b9d7 100644 --- a/crates/era-downloader/tests/it/stream.rs +++ b/crates/era-downloader/tests/it/stream.rs @@ -9,7 +9,7 @@ use test_case::test_case; #[test_case("https://mainnet.era1.nimbus.team/"; "nimbus")] #[test_case("https://era1.ethportal.net/"; "ethportal")] -#[test_case("https://era.ithaca.xyz/era1/"; "ithaca")] +#[test_case("https://era.ithaca.xyz/era1/index.html"; "ithaca")] #[tokio::test] async fn test_streaming_files_after_fetching_file_list(url: &str) { let base_url = Url::from_str(url).unwrap(); diff --git a/crates/era-utils/src/history.rs b/crates/era-utils/src/history.rs index 58637286bf9..029b310b820 100644 --- a/crates/era-utils/src/history.rs +++ b/crates/era-utils/src/history.rs @@ -1,4 +1,4 @@ -use alloy_primitives::{BlockHash, BlockNumber}; +use alloy_primitives::{BlockHash, BlockNumber, U256}; use futures_util::{Stream, StreamExt}; use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW}, @@ -7,16 +7,29 @@ use reth_db_api::{ transaction::{DbTx, DbTxMut}, RawKey, RawTable, RawValue, }; -use reth_era::{era1_file::Era1Reader, execution_types::DecodeCompressed}; +use reth_era::{ + e2s_types::E2sError, + era1_file::{BlockTupleIterator, Era1Reader}, + execution_types::{BlockTuple, DecodeCompressed}, +}; use reth_era_downloader::EraMeta; use reth_etl::Collector; use reth_fs_util as fs; use reth_primitives_traits::{Block, FullBlockBody, FullBlockHeader, NodePrimitives}; use reth_provider::{ - BlockWriter, ProviderError, StaticFileProviderFactory, StaticFileSegment, StaticFileWriter, + providers::StaticFileProviderRWRefMut, BlockWriter, ProviderError, StaticFileProviderFactory, + StaticFileSegment, StaticFileWriter, }; use reth_storage_api::{DBProvider, HeaderProvider, NodePrimitivesProvider, StorageLocation}; -use std::sync::mpsc; +use std::{ + collections::Bound, + error::Error, + fmt::{Display, Formatter}, + io::{Read, Seek}, + iter::Map, + ops::RangeBounds, + sync::mpsc, +}; use tracing::info; /// Imports blocks from `downloader` using `provider`. @@ -25,7 +38,7 @@ use tracing::info; pub fn import( mut downloader: Downloader, provider: &P, - mut hash_collector: Collector, + hash_collector: &mut Collector, ) -> eyre::Result where B: Block

, @@ -67,44 +80,216 @@ where let mut writer = static_file_provider.latest_writer(StaticFileSegment::Headers)?; while let Some(meta) = rx.recv()? { - let meta = meta?; - let file = fs::open(meta.as_ref())?; - let mut reader = Era1Reader::new(file); - - for block in reader.iter() { - let block = block?; - let header: BH = block.header.decode()?; - let body: BB = block.body.decode()?; - let number = header.number(); - - if number == 0 { - continue; - } + last_header_number = + process(&meta?, &mut writer, provider, hash_collector, &mut td, last_header_number..)?; + } + + build_index(provider, hash_collector)?; + + Ok(last_header_number) +} + +/// Extracts block headers and bodies from `meta` and appends them using `writer` and `provider`. +/// +/// Adds on to `total_difficulty` and collects hash to height using `hash_collector`. +/// +/// Skips all blocks below the [`start_bound`] of `block_numbers` and stops when reaching past the +/// [`end_bound`] or the end of the file. +/// +/// Returns last block height. +/// +/// [`start_bound`]: RangeBounds::start_bound +/// [`end_bound`]: RangeBounds::end_bound +pub fn process( + meta: &Era, + writer: &mut StaticFileProviderRWRefMut<'_,

::Primitives>, + provider: &P, + hash_collector: &mut Collector, + total_difficulty: &mut U256, + block_numbers: impl RangeBounds, +) -> eyre::Result +where + B: Block

, + BH: FullBlockHeader + Value, + BB: FullBlockBody< + Transaction = <

::Primitives as NodePrimitives>::SignedTx, + OmmerHeader = BH, + >, + Era: EraMeta + ?Sized, + P: DBProvider + StaticFileProviderFactory + BlockWriter, +

::Primitives: NodePrimitives, +{ + let reader = open(meta)?; + let iter = + reader + .iter() + .map(Box::new(decode) + as Box) -> eyre::Result<(BH, BB)>>); + let iter = ProcessIter { iter, era: meta }; + + process_iter(iter, writer, provider, hash_collector, total_difficulty, block_numbers) +} + +type ProcessInnerIter = + Map, Box) -> eyre::Result<(BH, BB)>>>; + +/// An iterator that wraps era file extraction. After the final item [`EraMeta::mark_as_processed`] +/// is called to ensure proper cleanup. +#[derive(Debug)] +pub struct ProcessIter<'a, Era: ?Sized, R: Read, BH, BB> +where + BH: FullBlockHeader + Value, + BB: FullBlockBody, +{ + iter: ProcessInnerIter, + era: &'a Era, +} + +impl<'a, Era: EraMeta + ?Sized, R: Read, BH, BB> Display for ProcessIter<'a, Era, R, BH, BB> +where + BH: FullBlockHeader + Value, + BB: FullBlockBody, +{ + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + Display::fmt(&self.era.path().to_string_lossy(), f) + } +} + +impl<'a, Era, R, BH, BB> Iterator for ProcessIter<'a, Era, R, BH, BB> +where + R: Read + Seek, + Era: EraMeta + ?Sized, + BH: FullBlockHeader + Value, + BB: FullBlockBody, +{ + type Item = eyre::Result<(BH, BB)>; + + fn next(&mut self) -> Option { + match self.iter.next() { + Some(item) => Some(item), + None => match self.era.mark_as_processed() { + Ok(..) => None, + Err(e) => Some(Err(e)), + }, + } + } +} + +/// Opens the era file described by `meta`. +pub fn open(meta: &Era) -> eyre::Result> +where + Era: EraMeta + ?Sized, +{ + let file = fs::open(meta.path())?; + let reader = Era1Reader::new(file); + + Ok(reader) +} - let hash = header.hash_slow(); - last_header_number = number; +/// Extracts a pair of [`FullBlockHeader`] and [`FullBlockBody`] from [`BlockTuple`]. +pub fn decode(block: Result) -> eyre::Result<(BH, BB)> +where + BH: FullBlockHeader + Value, + BB: FullBlockBody, + E: From + Error + Send + Sync + 'static, +{ + let block = block?; + let header: BH = block.header.decode()?; + let body: BB = block.body.decode()?; - // Increase total difficulty - td += header.difficulty(); + Ok((header, body)) +} - // Append to Headers segment - writer.append_header(&header, td, &hash)?; +/// Extracts block headers and bodies from `iter` and appends them using `writer` and `provider`. +/// +/// Adds on to `total_difficulty` and collects hash to height using `hash_collector`. +/// +/// Skips all blocks below the [`start_bound`] of `block_numbers` and stops when reaching past the +/// [`end_bound`] or the end of the file. +/// +/// Returns last block height. +/// +/// [`start_bound`]: RangeBounds::start_bound +/// [`end_bound`]: RangeBounds::end_bound +pub fn process_iter( + mut iter: impl Iterator>, + writer: &mut StaticFileProviderRWRefMut<'_,

::Primitives>, + provider: &P, + hash_collector: &mut Collector, + total_difficulty: &mut U256, + block_numbers: impl RangeBounds, +) -> eyre::Result +where + B: Block

, + BH: FullBlockHeader + Value, + BB: FullBlockBody< + Transaction = <

::Primitives as NodePrimitives>::SignedTx, + OmmerHeader = BH, + >, + P: DBProvider + StaticFileProviderFactory + BlockWriter, +

::Primitives: NodePrimitives, +{ + let mut last_header_number = match block_numbers.start_bound() { + Bound::Included(&number) => number, + Bound::Excluded(&number) => number.saturating_sub(1), + Bound::Unbounded => 0, + }; + let target = match block_numbers.end_bound() { + Bound::Included(&number) => Some(number), + Bound::Excluded(&number) => Some(number.saturating_add(1)), + Bound::Unbounded => None, + }; - // Write bodies to database. - provider.append_block_bodies( - vec![(header.number(), Some(body))], - // We are writing transactions directly to static files. - StorageLocation::StaticFiles, - )?; + for block in &mut iter { + let (header, body) = block?; + let number = header.number(); - hash_collector.insert(hash, number)?; + if number <= last_header_number { + continue; } + if let Some(target) = target { + if number > target { + break; + } + } + + let hash = header.hash_slow(); + last_header_number = number; - info!(target: "era::history::import", "Processed {}", meta.as_ref().to_string_lossy()); + // Increase total difficulty + *total_difficulty += header.difficulty(); - meta.mark_as_processed()?; + // Append to Headers segment + writer.append_header(&header, *total_difficulty, &hash)?; + + // Write bodies to database. + provider.append_block_bodies( + vec![(header.number(), Some(body))], + // We are writing transactions directly to static files. + StorageLocation::StaticFiles, + )?; + + hash_collector.insert(hash, number)?; } + Ok(last_header_number) +} + +/// Dumps the contents of `hash_collector` into [`tables::HeaderNumbers`]. +pub fn build_index( + provider: &P, + hash_collector: &mut Collector, +) -> eyre::Result<()> +where + B: Block

, + BH: FullBlockHeader + Value, + BB: FullBlockBody< + Transaction = <

::Primitives as NodePrimitives>::SignedTx, + OmmerHeader = BH, + >, + P: DBProvider + StaticFileProviderFactory + BlockWriter, +

::Primitives: NodePrimitives, +{ let total_headers = hash_collector.len(); info!(target: "era::history::import", total = total_headers, "Writing headers hash index"); @@ -125,13 +310,13 @@ where } } - let interval = (total_headers / 10).max(1); + let interval = (total_headers / 10).max(8192); // Build block hash to block number index for (index, hash_to_number) in hash_collector.iter()?.enumerate() { let (hash, number) = hash_to_number?; - if index > 0 && index % interval == 0 && total_headers > 100 { + if index != 0 && index % interval == 0 { info!(target: "era::history::import", progress = %format!("{:.2}%", (index as f64 / total_headers as f64) * 100.0), "Writing headers hash index"); } @@ -145,5 +330,5 @@ where } } - Ok(last_header_number) + Ok(()) } diff --git a/crates/era-utils/src/lib.rs b/crates/era-utils/src/lib.rs index b72f0eb0c0c..ce3e70246e7 100644 --- a/crates/era-utils/src/lib.rs +++ b/crates/era-utils/src/lib.rs @@ -5,4 +5,4 @@ mod history; /// Imports history from ERA files. -pub use history::import; +pub use history::{build_index, decode, import, open, process, process_iter, ProcessIter}; diff --git a/crates/era-utils/tests/it/history.rs b/crates/era-utils/tests/it/history.rs index 5dcf91f1c6b..d3d447615b9 100644 --- a/crates/era-utils/tests/it/history.rs +++ b/crates/era-utils/tests/it/history.rs @@ -11,7 +11,7 @@ use tempfile::tempdir; #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_history_imports_from_fresh_state_successfully() { // URL where the ERA1 files are hosted - let url = Url::from_str("https://era.ithaca.xyz/era1/").unwrap(); + let url = Url::from_str("https://era.ithaca.xyz/era1/index.html").unwrap(); // Directory where the ERA1 files will be downloaded to let folder = tempdir().unwrap(); @@ -28,11 +28,11 @@ async fn test_history_imports_from_fresh_state_successfully() { let folder = tempdir().unwrap(); let folder = Some(folder.path().to_owned()); - let hash_collector = Collector::new(4096, folder); + let mut hash_collector = Collector::new(4096, folder); let expected_block_number = 8191; let actual_block_number = - reth_era_utils::import(stream, &pf.provider_rw().unwrap().0, hash_collector).unwrap(); + reth_era_utils::import(stream, &pf.provider_rw().unwrap().0, &mut hash_collector).unwrap(); assert_eq!(actual_block_number, expected_block_number); } diff --git a/crates/era/src/era1_file.rs b/crates/era/src/era1_file.rs index 7f3b558ca8b..547d770f06d 100644 --- a/crates/era/src/era1_file.rs +++ b/crates/era/src/era1_file.rs @@ -67,8 +67,8 @@ pub struct Era1Reader { /// An iterator of [`BlockTuple`] streaming from [`E2StoreReader`]. #[derive(Debug)] -pub struct BlockTupleIterator<'r, R: Read> { - reader: &'r mut E2StoreReader, +pub struct BlockTupleIterator { + reader: E2StoreReader, headers: VecDeque, bodies: VecDeque, receipts: VecDeque, @@ -78,8 +78,8 @@ pub struct BlockTupleIterator<'r, R: Read> { block_index: Option, } -impl<'r, R: Read> BlockTupleIterator<'r, R> { - fn new(reader: &'r mut E2StoreReader) -> Self { +impl BlockTupleIterator { + fn new(reader: E2StoreReader) -> Self { Self { reader, headers: Default::default(), @@ -93,7 +93,7 @@ impl<'r, R: Read> BlockTupleIterator<'r, R> { } } -impl<'r, R: Read + Seek> Iterator for BlockTupleIterator<'r, R> { +impl Iterator for BlockTupleIterator { type Item = Result; fn next(&mut self) -> Option { @@ -101,7 +101,7 @@ impl<'r, R: Read + Seek> Iterator for BlockTupleIterator<'r, R> { } } -impl<'r, R: Read + Seek> BlockTupleIterator<'r, R> { +impl BlockTupleIterator { fn next_result(&mut self) -> Result, E2sError> { loop { let Some(entry) = self.reader.read_next_entry()? else { @@ -161,13 +161,13 @@ impl Era1Reader { } /// Returns an iterator of [`BlockTuple`] streaming from `reader`. - pub fn iter(&mut self) -> BlockTupleIterator<'_, R> { - BlockTupleIterator::new(&mut self.reader) + pub fn iter(self) -> BlockTupleIterator { + BlockTupleIterator::new(self.reader) } /// Reads and parses an Era1 file from the underlying reader, assembling all components /// into a complete [`Era1File`] with an [`Era1Id`] that includes the provided network name. - pub fn read(&mut self, network_name: String) -> Result { + pub fn read(mut self, network_name: String) -> Result { // Validate version entry let _version_entry = match self.reader.read_version()? { Some(entry) if entry.is_version() => entry, @@ -230,7 +230,7 @@ impl Era1Reader { network_name: impl Into, ) -> Result { let file = File::open(path).map_err(E2sError::Io)?; - let mut reader = Self::new(file); + let reader = Self::new(file); reader.read(network_name.into()) } } @@ -468,7 +468,7 @@ mod tests { } // Read back from memory buffer - let mut reader = Era1Reader::new(Cursor::new(&buffer)); + let reader = Era1Reader::new(Cursor::new(&buffer)); let read_era1 = reader.read("testnet".to_string())?; // Verify core properties diff --git a/crates/era/tests/it/dd.rs b/crates/era/tests/it/dd.rs index 73d5c9e9b96..7aa0afb6e20 100644 --- a/crates/era/tests/it/dd.rs +++ b/crates/era/tests/it/dd.rs @@ -97,7 +97,7 @@ async fn test_mainnet_era1_only_file_decompression_and_decoding() -> eyre::Resul } // Read back from buffer - let mut reader = Era1Reader::new(Cursor::new(&buffer)); + let reader = Era1Reader::new(Cursor::new(&buffer)); let read_back_file = reader.read(file.id.network_name.clone())?; // Verify basic properties are preserved diff --git a/crates/era/tests/it/main.rs b/crates/era/tests/it/main.rs index ca95b38b541..e27e25e1658 100644 --- a/crates/era/tests/it/main.rs +++ b/crates/era/tests/it/main.rs @@ -32,7 +32,7 @@ const fn main() {} const MAINNET: &str = "mainnet"; /// Default mainnet url /// for downloading mainnet `.era1` files -const MAINNET_URL: &str = "https://era.ithaca.xyz/era1/"; +const MAINNET_URL: &str = "https://era.ithaca.xyz/era1/index.html"; /// Succinct list of mainnet files we want to download /// from diff --git a/crates/era/tests/it/roundtrip.rs b/crates/era/tests/it/roundtrip.rs index 3ff83001d9a..a444fe9c570 100644 --- a/crates/era/tests/it/roundtrip.rs +++ b/crates/era/tests/it/roundtrip.rs @@ -46,7 +46,7 @@ async fn test_file_roundtrip( } // Read back from buffer - let mut reader = Era1Reader::new(Cursor::new(&buffer)); + let reader = Era1Reader::new(Cursor::new(&buffer)); let roundtrip_file = reader.read(network.to_string())?; assert_eq!( @@ -203,7 +203,7 @@ async fn test_file_roundtrip( writer.write_era1_file(&new_file)?; } - let mut reader = Era1Reader::new(Cursor::new(&recompressed_buffer)); + let reader = Era1Reader::new(Cursor::new(&recompressed_buffer)); let recompressed_file = reader.read(network.to_string())?; let recompressed_first_block = &recompressed_file.group.blocks[0]; diff --git a/crates/ethereum/cli/src/debug_cmd/execution.rs b/crates/ethereum/cli/src/debug_cmd/execution.rs index 99b27137fa3..63a9cc3a80e 100644 --- a/crates/ethereum/cli/src/debug_cmd/execution.rs +++ b/crates/ethereum/cli/src/debug_cmd/execution.rs @@ -24,7 +24,7 @@ use reth_network_api::NetworkInfo; use reth_network_p2p::{headers::client::HeadersClient, EthBlockClient}; use reth_node_api::NodeTypesWithDBAdapter; use reth_node_core::{args::NetworkArgs, utils::get_single_header}; -use reth_node_ethereum::{consensus::EthBeaconConsensus, EthExecutorProvider}; +use reth_node_ethereum::{consensus::EthBeaconConsensus, EthEvmConfig}; use reth_node_events::node::NodeEvent; use reth_provider::{ providers::ProviderNodeTypes, ChainSpecProvider, ProviderFactory, StageCheckpointReader, @@ -86,7 +86,7 @@ impl> Command { let prune_modes = config.prune.clone().map(|prune| prune.segments).unwrap_or_default(); let (tip_tx, tip_rx) = watch::channel(B256::ZERO); - let executor = EthExecutorProvider::ethereum(provider_factory.chain_spec()); + let executor = EthEvmConfig::ethereum(provider_factory.chain_spec()); let pipeline = Pipeline::::builder() .with_tip_sender(tip_tx) @@ -100,6 +100,7 @@ impl> Command { executor.clone(), stage_conf.clone(), prune_modes, + None, ) .set(ExecutionStage::new( executor, diff --git a/crates/ethereum/cli/src/debug_cmd/merkle.rs b/crates/ethereum/cli/src/debug_cmd/merkle.rs index b9d58f81676..09c435b6f36 100644 --- a/crates/ethereum/cli/src/debug_cmd/merkle.rs +++ b/crates/ethereum/cli/src/debug_cmd/merkle.rs @@ -18,7 +18,7 @@ use reth_network_api::NetworkInfo; use reth_network_p2p::full_block::FullBlockClient; use reth_node_api::{BlockTy, NodePrimitives}; use reth_node_core::{args::NetworkArgs, utils::get_single_header}; -use reth_node_ethereum::{consensus::EthBeaconConsensus, EthExecutorProvider}; +use reth_node_ethereum::{consensus::EthBeaconConsensus, EthEvmConfig}; use reth_provider::{ providers::ProviderNodeTypes, BlockNumReader, BlockWriter, ChainSpecProvider, DatabaseProviderFactory, LatestStateProviderRef, OriginalValuesKnown, ProviderFactory, @@ -109,7 +109,7 @@ impl> Command { ) .await?; - let executor_provider = EthExecutorProvider::ethereum(provider_factory.chain_spec()); + let executor_provider = EthEvmConfig::ethereum(provider_factory.chain_spec()); // Initialize the fetch client info!(target: "reth::cli", target_block_number = self.to, "Downloading tip of block range"); diff --git a/crates/ethereum/cli/src/interface.rs b/crates/ethereum/cli/src/interface.rs index ea4858801d2..46fb8720238 100644 --- a/crates/ethereum/cli/src/interface.rs +++ b/crates/ethereum/cli/src/interface.rs @@ -18,7 +18,7 @@ use reth_node_core::{ args::LogArgs, version::{LONG_VERSION, SHORT_VERSION}, }; -use reth_node_ethereum::{consensus::EthBeaconConsensus, EthExecutorProvider, EthereumNode}; +use reth_node_ethereum::{consensus::EthBeaconConsensus, EthEvmConfig, EthereumNode}; use reth_node_metrics::recorder::install_prometheus_recorder; use reth_tracing::FileWorkerGuard; use std::{ffi::OsString, fmt, future::Future, sync::Arc}; @@ -149,7 +149,7 @@ impl, Ext: clap::Args + fmt::Debug> Cl let _ = install_prometheus_recorder(); let components = |spec: Arc| { - (EthExecutorProvider::ethereum(spec.clone()), EthBeaconConsensus::new(spec)) + (EthEvmConfig::ethereum(spec.clone()), EthBeaconConsensus::new(spec)) }; match self.command { Commands::Node(command) => runner.run_command_until_exit(|ctx| { diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs deleted file mode 100644 index 072f314ce7a..00000000000 --- a/crates/ethereum/evm/src/execute.rs +++ /dev/null @@ -1,863 +0,0 @@ -//! Ethereum block execution strategy. - -/// Helper type with backwards compatible methods to obtain Ethereum executor -/// providers. -pub type EthExecutorProvider = crate::EthEvmConfig; - -#[cfg(test)] -mod tests { - use crate::EthEvmConfig; - use alloy_consensus::{constants::ETH_TO_WEI, Header, TxLegacy}; - use alloy_eips::{ - eip2935::{HISTORY_SERVE_WINDOW, HISTORY_STORAGE_ADDRESS, HISTORY_STORAGE_CODE}, - eip4788::{BEACON_ROOTS_ADDRESS, BEACON_ROOTS_CODE, SYSTEM_ADDRESS}, - eip4895::Withdrawal, - eip7002::{WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS, WITHDRAWAL_REQUEST_PREDEPLOY_CODE}, - eip7685::EMPTY_REQUESTS_HASH, - }; - use alloy_evm::block::BlockValidationError; - use alloy_primitives::{b256, fixed_bytes, keccak256, Bytes, TxKind, B256, U256}; - use reth_chainspec::{ChainSpec, ChainSpecBuilder, EthereumHardfork, ForkCondition, MAINNET}; - use reth_ethereum_primitives::{Block, BlockBody, Transaction}; - use reth_evm::{execute::Executor, ConfigureEvm}; - use reth_execution_types::BlockExecutionResult; - use reth_primitives_traits::{ - crypto::secp256k1::public_key_to_address, Block as _, RecoveredBlock, - }; - use reth_testing_utils::generators::{self, sign_tx_with_key_pair}; - use revm::{ - database::{CacheDB, EmptyDB, TransitionState}, - primitives::address, - state::{AccountInfo, Bytecode, EvmState}, - Database, - }; - use std::sync::{mpsc, Arc}; - - fn create_database_with_beacon_root_contract() -> CacheDB { - let mut db = CacheDB::new(Default::default()); - - let beacon_root_contract_account = AccountInfo { - balance: U256::ZERO, - code_hash: keccak256(BEACON_ROOTS_CODE.clone()), - nonce: 1, - code: Some(Bytecode::new_raw(BEACON_ROOTS_CODE.clone())), - }; - - db.insert_account_info(BEACON_ROOTS_ADDRESS, beacon_root_contract_account); - - db - } - - fn create_database_with_withdrawal_requests_contract() -> CacheDB { - let mut db = CacheDB::new(Default::default()); - - let withdrawal_requests_contract_account = AccountInfo { - nonce: 1, - balance: U256::ZERO, - code_hash: keccak256(WITHDRAWAL_REQUEST_PREDEPLOY_CODE.clone()), - code: Some(Bytecode::new_raw(WITHDRAWAL_REQUEST_PREDEPLOY_CODE.clone())), - }; - - db.insert_account_info( - WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS, - withdrawal_requests_contract_account, - ); - - db - } - - fn evm_config(chain_spec: Arc) -> EthEvmConfig { - EthEvmConfig::new(chain_spec) - } - - #[test] - fn eip_4788_non_genesis_call() { - let mut header = - Header { timestamp: 1, number: 1, excess_blob_gas: Some(0), ..Header::default() }; - - let db = create_database_with_beacon_root_contract(); - - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(1)) - .build(), - ); - - let provider = evm_config(chain_spec); - - let mut executor = provider.batch_executor(db); - - // attempt to execute a block without parent beacon block root, expect err - let err = executor - .execute_one(&RecoveredBlock::new_unhashed( - Block { - header: header.clone(), - body: BlockBody { transactions: vec![], ommers: vec![], withdrawals: None }, - }, - vec![], - )) - .expect_err( - "Executing cancun block without parent beacon block root field should fail", - ); - - assert!(matches!( - err.as_validation().unwrap(), - BlockValidationError::MissingParentBeaconBlockRoot - )); - - // fix header, set a gas limit - header.parent_beacon_block_root = Some(B256::with_last_byte(0x69)); - - // Now execute a block with the fixed header, ensure that it does not fail - executor - .execute_one(&RecoveredBlock::new_unhashed( - Block { - header: header.clone(), - body: BlockBody { transactions: vec![], ommers: vec![], withdrawals: None }, - }, - vec![], - )) - .unwrap(); - - // check the actual storage of the contract - it should be: - // * The storage value at header.timestamp % HISTORY_BUFFER_LENGTH should be - // header.timestamp - // * The storage value at header.timestamp % HISTORY_BUFFER_LENGTH + HISTORY_BUFFER_LENGTH - // // should be parent_beacon_block_root - let history_buffer_length = 8191u64; - let timestamp_index = header.timestamp % history_buffer_length; - let parent_beacon_block_root_index = - timestamp_index % history_buffer_length + history_buffer_length; - - let timestamp_storage = executor.with_state_mut(|state| { - state.storage(BEACON_ROOTS_ADDRESS, U256::from(timestamp_index)).unwrap() - }); - assert_eq!(timestamp_storage, U256::from(header.timestamp)); - - // get parent beacon block root storage and compare - let parent_beacon_block_root_storage = executor.with_state_mut(|state| { - state - .storage(BEACON_ROOTS_ADDRESS, U256::from(parent_beacon_block_root_index)) - .expect("storage value should exist") - }); - assert_eq!(parent_beacon_block_root_storage, U256::from(0x69)); - } - - #[test] - fn eip_4788_no_code_cancun() { - // This test ensures that we "silently fail" when cancun is active and there is no code at - // // BEACON_ROOTS_ADDRESS - let header = Header { - timestamp: 1, - number: 1, - parent_beacon_block_root: Some(B256::with_last_byte(0x69)), - excess_blob_gas: Some(0), - ..Header::default() - }; - - let db = CacheDB::new(EmptyDB::default()); - - // DON'T deploy the contract at genesis - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(1)) - .build(), - ); - - let provider = evm_config(chain_spec); - - // attempt to execute an empty block with parent beacon block root, this should not fail - provider - .batch_executor(db) - .execute_one(&RecoveredBlock::new_unhashed( - Block { - header, - body: BlockBody { transactions: vec![], ommers: vec![], withdrawals: None }, - }, - vec![], - )) - .expect( - "Executing a block with no transactions while cancun is active should not fail", - ); - } - - #[test] - fn eip_4788_empty_account_call() { - // This test ensures that we do not increment the nonce of an empty SYSTEM_ADDRESS account - // // during the pre-block call - - let mut db = create_database_with_beacon_root_contract(); - - // insert an empty SYSTEM_ADDRESS - db.insert_account_info(SYSTEM_ADDRESS, Default::default()); - - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(1)) - .build(), - ); - - let provider = evm_config(chain_spec); - - // construct the header for block one - let header = Header { - timestamp: 1, - number: 1, - parent_beacon_block_root: Some(B256::with_last_byte(0x69)), - excess_blob_gas: Some(0), - ..Header::default() - }; - - let mut executor = provider.batch_executor(db); - - // attempt to execute an empty block with parent beacon block root, this should not fail - executor - .execute_one(&RecoveredBlock::new_unhashed( - Block { - header, - body: BlockBody { transactions: vec![], ommers: vec![], withdrawals: None }, - }, - vec![], - )) - .expect( - "Executing a block with no transactions while cancun is active should not fail", - ); - - // ensure that the nonce of the system address account has not changed - let nonce = - executor.with_state_mut(|state| state.basic(SYSTEM_ADDRESS).unwrap().unwrap().nonce); - assert_eq!(nonce, 0); - } - - #[test] - fn eip_4788_genesis_call() { - let db = create_database_with_beacon_root_contract(); - - // activate cancun at genesis - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(0)) - .build(), - ); - - let mut header = chain_spec.genesis_header().clone(); - let provider = evm_config(chain_spec); - let mut executor = provider.batch_executor(db); - - // attempt to execute the genesis block with non-zero parent beacon block root, expect err - header.parent_beacon_block_root = Some(B256::with_last_byte(0x69)); - let _err = executor - .execute_one(&RecoveredBlock::new_unhashed( - Block { header: header.clone(), body: Default::default() }, - vec![], - )) - .expect_err( - "Executing genesis cancun block with non-zero parent beacon block root field - should fail", - ); - - // fix header - header.parent_beacon_block_root = Some(B256::ZERO); - - // now try to process the genesis block again, this time ensuring that a system contract - // call does not occur - executor - .execute_one(&RecoveredBlock::new_unhashed( - Block { header, body: Default::default() }, - vec![], - )) - .unwrap(); - - // there is no system contract call so there should be NO STORAGE CHANGES - // this means we'll check the transition state - let transition_state = executor.with_state_mut(|state| { - state - .transition_state - .take() - .expect("the evm should be initialized with bundle updates") - }); - - // assert that it is the default (empty) transition state - assert_eq!(transition_state, TransitionState::default()); - } - - #[test] - fn eip_4788_high_base_fee() { - // This test ensures that if we have a base fee, then we don't return an error when the - // system contract is called, due to the gas price being less than the base fee. - let header = Header { - timestamp: 1, - number: 1, - parent_beacon_block_root: Some(B256::with_last_byte(0x69)), - base_fee_per_gas: Some(u64::MAX), - excess_blob_gas: Some(0), - ..Header::default() - }; - - let db = create_database_with_beacon_root_contract(); - - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(1)) - .build(), - ); - - let provider = evm_config(chain_spec); - - // execute header - let mut executor = provider.batch_executor(db); - - // Now execute a block with the fixed header, ensure that it does not fail - executor - .execute_one(&RecoveredBlock::new_unhashed( - Block { header: header.clone(), body: Default::default() }, - vec![], - )) - .unwrap(); - - // check the actual storage of the contract - it should be: - // * The storage value at header.timestamp % HISTORY_BUFFER_LENGTH should be - // header.timestamp - // * The storage value at header.timestamp % HISTORY_BUFFER_LENGTH + HISTORY_BUFFER_LENGTH - // // should be parent_beacon_block_root - let history_buffer_length = 8191u64; - let timestamp_index = header.timestamp % history_buffer_length; - let parent_beacon_block_root_index = - timestamp_index % history_buffer_length + history_buffer_length; - - // get timestamp storage and compare - let timestamp_storage = executor.with_state_mut(|state| { - state.storage(BEACON_ROOTS_ADDRESS, U256::from(timestamp_index)).unwrap() - }); - assert_eq!(timestamp_storage, U256::from(header.timestamp)); - - // get parent beacon block root storage and compare - let parent_beacon_block_root_storage = executor.with_state_mut(|state| { - state.storage(BEACON_ROOTS_ADDRESS, U256::from(parent_beacon_block_root_index)).unwrap() - }); - assert_eq!(parent_beacon_block_root_storage, U256::from(0x69)); - } - - /// Create a state provider with blockhashes and the EIP-2935 system contract. - fn create_database_with_block_hashes(latest_block: u64) -> CacheDB { - let mut db = CacheDB::new(Default::default()); - for block_number in 0..=latest_block { - db.cache - .block_hashes - .insert(U256::from(block_number), keccak256(block_number.to_string())); - } - - let blockhashes_contract_account = AccountInfo { - balance: U256::ZERO, - code_hash: keccak256(HISTORY_STORAGE_CODE.clone()), - code: Some(Bytecode::new_raw(HISTORY_STORAGE_CODE.clone())), - nonce: 1, - }; - - db.insert_account_info(HISTORY_STORAGE_ADDRESS, blockhashes_contract_account); - - db - } - #[test] - fn eip_2935_pre_fork() { - let db = create_database_with_block_hashes(1); - - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(EthereumHardfork::Prague, ForkCondition::Never) - .build(), - ); - - let provider = evm_config(chain_spec); - let mut executor = provider.batch_executor(db); - - // construct the header for block one - let header = Header { timestamp: 1, number: 1, ..Header::default() }; - - // attempt to execute an empty block, this should not fail - executor - .execute_one(&RecoveredBlock::new_unhashed( - Block { header, body: Default::default() }, - vec![], - )) - .expect( - "Executing a block with no transactions while Prague is active should not fail", - ); - - // ensure that the block hash was *not* written to storage, since this is before the fork - // was activated - // - // we load the account first, because revm expects it to be - // loaded - executor.with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap()); - assert!(executor.with_state_mut(|state| { - state.storage(HISTORY_STORAGE_ADDRESS, U256::ZERO).unwrap().is_zero() - })); - } - - #[test] - fn eip_2935_fork_activation_genesis() { - let db = create_database_with_block_hashes(0); - - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .cancun_activated() - .prague_activated() - .build(), - ); - - let header = chain_spec.genesis_header().clone(); - let provider = evm_config(chain_spec); - let mut executor = provider.batch_executor(db); - - // attempt to execute genesis block, this should not fail - executor - .execute_one(&RecoveredBlock::new_unhashed( - Block { header, body: Default::default() }, - vec![], - )) - .expect( - "Executing a block with no transactions while Prague is active should not fail", - ); - - // ensure that the block hash was *not* written to storage, since there are no blocks - // preceding genesis - // - // we load the account first, because revm expects it to be - // loaded - executor.with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap()); - assert!(executor.with_state_mut(|state| { - state.storage(HISTORY_STORAGE_ADDRESS, U256::ZERO).unwrap().is_zero() - })); - } - - #[test] - fn eip_2935_fork_activation_within_window_bounds() { - let fork_activation_block = (HISTORY_SERVE_WINDOW - 10) as u64; - let db = create_database_with_block_hashes(fork_activation_block); - - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .cancun_activated() - .with_fork(EthereumHardfork::Prague, ForkCondition::Timestamp(1)) - .build(), - ); - - let header = Header { - parent_hash: B256::random(), - timestamp: 1, - number: fork_activation_block, - requests_hash: Some(EMPTY_REQUESTS_HASH), - excess_blob_gas: Some(0), - parent_beacon_block_root: Some(B256::random()), - ..Header::default() - }; - let provider = evm_config(chain_spec); - let mut executor = provider.batch_executor(db); - - // attempt to execute the fork activation block, this should not fail - executor - .execute_one(&RecoveredBlock::new_unhashed( - Block { header, body: Default::default() }, - vec![], - )) - .expect( - "Executing a block with no transactions while Prague is active should not fail", - ); - - // the hash for the ancestor of the fork activation block should be present - assert!(executor - .with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some())); - assert_ne!( - executor.with_state_mut(|state| state - .storage(HISTORY_STORAGE_ADDRESS, U256::from(fork_activation_block - 1)) - .unwrap()), - U256::ZERO - ); - - // the hash of the block itself should not be in storage - assert!(executor.with_state_mut(|state| { - state - .storage(HISTORY_STORAGE_ADDRESS, U256::from(fork_activation_block)) - .unwrap() - .is_zero() - })); - } - - // - #[test] - fn eip_2935_fork_activation_outside_window_bounds() { - let fork_activation_block = (HISTORY_SERVE_WINDOW + 256) as u64; - let db = create_database_with_block_hashes(fork_activation_block); - - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .cancun_activated() - .with_fork(EthereumHardfork::Prague, ForkCondition::Timestamp(1)) - .build(), - ); - - let provider = evm_config(chain_spec); - let mut executor = provider.batch_executor(db); - - let header = Header { - parent_hash: B256::random(), - timestamp: 1, - number: fork_activation_block, - requests_hash: Some(EMPTY_REQUESTS_HASH), - excess_blob_gas: Some(0), - parent_beacon_block_root: Some(B256::random()), - ..Header::default() - }; - - // attempt to execute the fork activation block, this should not fail - executor - .execute_one(&RecoveredBlock::new_unhashed( - Block { header, body: Default::default() }, - vec![], - )) - .expect( - "Executing a block with no transactions while Prague is active should not fail", - ); - - // the hash for the ancestor of the fork activation block should be present - assert!(executor - .with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some())); - } - - #[test] - fn eip_2935_state_transition_inside_fork() { - let db = create_database_with_block_hashes(2); - - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .cancun_activated() - .prague_activated() - .build(), - ); - - let header = chain_spec.genesis_header().clone(); - let header_hash = header.hash_slow(); - - let provider = evm_config(chain_spec); - let mut executor = provider.batch_executor(db); - - // attempt to execute the genesis block, this should not fail - executor - .execute_one(&RecoveredBlock::new_unhashed( - Block { header, body: Default::default() }, - vec![], - )) - .expect( - "Executing a block with no transactions while Prague is active should not fail", - ); - - // nothing should be written as the genesis has no ancestors - // - // we load the account first, because revm expects it to be - // loaded - executor.with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap()); - assert!(executor.with_state_mut(|state| { - state.storage(HISTORY_STORAGE_ADDRESS, U256::ZERO).unwrap().is_zero() - })); - - // attempt to execute block 1, this should not fail - let header = Header { - parent_hash: header_hash, - timestamp: 1, - number: 1, - requests_hash: Some(EMPTY_REQUESTS_HASH), - excess_blob_gas: Some(0), - parent_beacon_block_root: Some(B256::random()), - ..Header::default() - }; - let header_hash = header.hash_slow(); - - executor - .execute_one(&RecoveredBlock::new_unhashed( - Block { header, body: Default::default() }, - vec![], - )) - .expect( - "Executing a block with no transactions while Prague is active should not fail", - ); - - // the block hash of genesis should now be in storage, but not block 1 - assert!(executor - .with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some())); - assert_ne!( - executor.with_state_mut(|state| state - .storage(HISTORY_STORAGE_ADDRESS, U256::ZERO) - .unwrap()), - U256::ZERO - ); - assert!(executor.with_state_mut(|state| { - state.storage(HISTORY_STORAGE_ADDRESS, U256::from(1)).unwrap().is_zero() - })); - - // attempt to execute block 2, this should not fail - let header = Header { - parent_hash: header_hash, - timestamp: 1, - number: 2, - requests_hash: Some(EMPTY_REQUESTS_HASH), - excess_blob_gas: Some(0), - parent_beacon_block_root: Some(B256::random()), - ..Header::default() - }; - - executor - .execute_one(&RecoveredBlock::new_unhashed( - Block { header, body: Default::default() }, - vec![], - )) - .expect( - "Executing a block with no transactions while Prague is active should not fail", - ); - - // the block hash of genesis and block 1 should now be in storage, but not block 2 - assert!(executor - .with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some())); - assert_ne!( - executor.with_state_mut(|state| state - .storage(HISTORY_STORAGE_ADDRESS, U256::ZERO) - .unwrap()), - U256::ZERO - ); - assert_ne!( - executor.with_state_mut(|state| state - .storage(HISTORY_STORAGE_ADDRESS, U256::from(1)) - .unwrap()), - U256::ZERO - ); - assert!(executor.with_state_mut(|state| { - state.storage(HISTORY_STORAGE_ADDRESS, U256::from(2)).unwrap().is_zero() - })); - } - - #[test] - fn eip_7002() { - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .cancun_activated() - .prague_activated() - .build(), - ); - - let mut db = create_database_with_withdrawal_requests_contract(); - - let sender_key_pair = generators::generate_key(&mut generators::rng()); - let sender_address = public_key_to_address(sender_key_pair.public_key()); - - db.insert_account_info( - sender_address, - AccountInfo { nonce: 1, balance: U256::from(ETH_TO_WEI), ..Default::default() }, - ); - - // https://github.com/lightclient/sys-asm/blob/9282bdb9fd64e024e27f60f507486ffb2183cba2/test/Withdrawal.t.sol.in#L36 - let validator_public_key = fixed_bytes!( - "111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111" - ); - let withdrawal_amount = fixed_bytes!("0203040506070809"); - let input: Bytes = [&validator_public_key[..], &withdrawal_amount[..]].concat().into(); - assert_eq!(input.len(), 56); - - let mut header = chain_spec.genesis_header().clone(); - header.gas_limit = 1_500_000; - // measured - header.gas_used = 135_856; - header.receipts_root = - b256!("0xb31a3e47b902e9211c4d349af4e4c5604ce388471e79ca008907ae4616bb0ed3"); - - let tx = sign_tx_with_key_pair( - sender_key_pair, - Transaction::Legacy(TxLegacy { - chain_id: Some(chain_spec.chain.id()), - nonce: 1, - gas_price: header.base_fee_per_gas.unwrap().into(), - gas_limit: header.gas_used, - to: TxKind::Call(WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS), - // `MIN_WITHDRAWAL_REQUEST_FEE` - value: U256::from(2), - input, - }), - ); - - let provider = evm_config(chain_spec); - - let mut executor = provider.batch_executor(db); - - let BlockExecutionResult { receipts, requests, .. } = executor - .execute_one( - &Block { header, body: BlockBody { transactions: vec![tx], ..Default::default() } } - .try_into_recovered() - .unwrap(), - ) - .unwrap(); - - let receipt = receipts.first().unwrap(); - assert!(receipt.success); - - // There should be exactly one entry with withdrawal requests - assert_eq!(requests.len(), 1); - assert_eq!(requests[0][0], 1); - } - - #[test] - fn block_gas_limit_error() { - // Create a chain specification with fork conditions set for Prague - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(EthereumHardfork::Prague, ForkCondition::Timestamp(0)) - .build(), - ); - - // Create a state provider with the withdrawal requests contract pre-deployed - let mut db = create_database_with_withdrawal_requests_contract(); - - // Generate a new key pair for the sender - let sender_key_pair = generators::generate_key(&mut generators::rng()); - // Get the sender's address from the public key - let sender_address = public_key_to_address(sender_key_pair.public_key()); - - // Insert the sender account into the state with a nonce of 1 and a balance of 1 ETH in Wei - db.insert_account_info( - sender_address, - AccountInfo { nonce: 1, balance: U256::from(ETH_TO_WEI), ..Default::default() }, - ); - - // Define the validator public key and withdrawal amount as fixed bytes - let validator_public_key = fixed_bytes!( - "111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111" - ); - let withdrawal_amount = fixed_bytes!("2222222222222222"); - // Concatenate the validator public key and withdrawal amount into a single byte array - let input: Bytes = [&validator_public_key[..], &withdrawal_amount[..]].concat().into(); - // Ensure the input length is 56 bytes - assert_eq!(input.len(), 56); - - // Create a genesis block header with a specified gas limit and gas used - let mut header = chain_spec.genesis_header().clone(); - header.gas_limit = 1_500_000; - header.gas_used = 134_807; - header.receipts_root = - b256!("0xb31a3e47b902e9211c4d349af4e4c5604ce388471e79ca008907ae4616bb0ed3"); - - // Create a transaction with a gas limit higher than the block gas limit - let tx = sign_tx_with_key_pair( - sender_key_pair, - Transaction::Legacy(TxLegacy { - chain_id: Some(chain_spec.chain.id()), - nonce: 1, - gas_price: header.base_fee_per_gas.unwrap().into(), - gas_limit: 2_500_000, // higher than block gas limit - to: TxKind::Call(WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS), - value: U256::from(1), - input, - }), - ); - - // Create an executor from the state provider - let evm_config = evm_config(chain_spec); - let mut executor = evm_config.batch_executor(db); - - // Execute the block and capture the result - let exec_result = executor.execute_one( - &Block { header, body: BlockBody { transactions: vec![tx], ..Default::default() } } - .try_into_recovered() - .unwrap(), - ); - - // Check if the execution result is an error and assert the specific error type - match exec_result { - Ok(_) => panic!("Expected block gas limit error"), - Err(err) => assert!(matches!( - *err.as_validation().unwrap(), - BlockValidationError::TransactionGasLimitMoreThanAvailableBlockGas { - transaction_gas_limit: 2_500_000, - block_available_gas: 1_500_000, - } - )), - } - } - - #[test] - fn test_balance_increment_not_duplicated() { - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .cancun_activated() - .prague_activated() - .build(), - ); - - let withdrawal_recipient = address!("0x1000000000000000000000000000000000000000"); - - let mut db = CacheDB::new(EmptyDB::default()); - let initial_balance = 100; - db.insert_account_info( - withdrawal_recipient, - AccountInfo { balance: U256::from(initial_balance), nonce: 1, ..Default::default() }, - ); - - let withdrawal = - Withdrawal { index: 0, validator_index: 0, address: withdrawal_recipient, amount: 1 }; - - let header = Header { - timestamp: 1, - number: 1, - excess_blob_gas: Some(0), - parent_beacon_block_root: Some(B256::random()), - ..Header::default() - }; - - let block = &RecoveredBlock::new_unhashed( - Block { - header, - body: BlockBody { - transactions: vec![], - ommers: vec![], - withdrawals: Some(vec![withdrawal].into()), - }, - }, - vec![], - ); - - let provider = evm_config(chain_spec); - let executor = provider.batch_executor(db); - - let (tx, rx) = mpsc::channel(); - let tx_clone = tx.clone(); - - let _output = executor - .execute_with_state_hook(block, move |_, state: &EvmState| { - if let Some(account) = state.get(&withdrawal_recipient) { - let _ = tx_clone.send(account.info.balance); - } - }) - .expect("Block execution should succeed"); - - drop(tx); - let balance_changes: Vec = rx.try_iter().collect(); - - if let Some(final_balance) = balance_changes.last() { - let expected_final_balance = U256::from(initial_balance) + U256::from(1_000_000_000); // initial + 1 Gwei in Wei - assert_eq!( - *final_balance, expected_final_balance, - "Final balance should match expected value after withdrawal" - ); - } - } -} diff --git a/crates/ethereum/evm/src/lib.rs b/crates/ethereum/evm/src/lib.rs index 8a133ca9ad2..ad77ae74ea4 100644 --- a/crates/ethereum/evm/src/lib.rs +++ b/crates/ethereum/evm/src/lib.rs @@ -44,7 +44,15 @@ use alloy_eips::{eip1559::INITIAL_BASE_FEE, eip7840::BlobParams}; pub use config::{revm_spec, revm_spec_by_timestamp_and_block_number}; use reth_ethereum_forks::EthereumHardfork; -pub mod execute; +/// Helper type with backwards compatible methods to obtain Ethereum executor +/// providers. +#[doc(hidden)] +pub mod execute { + use crate::EthEvmConfig; + + #[deprecated(note = "Use `EthEvmConfig` instead")] + pub type EthExecutorProvider = EthEvmConfig; +} mod build; pub use build::EthBlockAssembler; diff --git a/crates/ethereum/evm/tests/execute.rs b/crates/ethereum/evm/tests/execute.rs new file mode 100644 index 00000000000..c7f408f3f16 --- /dev/null +++ b/crates/ethereum/evm/tests/execute.rs @@ -0,0 +1,825 @@ +//! Execution tests. + +use alloy_consensus::{constants::ETH_TO_WEI, Header, TxLegacy}; +use alloy_eips::{ + eip2935::{HISTORY_SERVE_WINDOW, HISTORY_STORAGE_ADDRESS, HISTORY_STORAGE_CODE}, + eip4788::{BEACON_ROOTS_ADDRESS, BEACON_ROOTS_CODE, SYSTEM_ADDRESS}, + eip4895::Withdrawal, + eip7002::{WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS, WITHDRAWAL_REQUEST_PREDEPLOY_CODE}, + eip7685::EMPTY_REQUESTS_HASH, +}; +use alloy_evm::block::BlockValidationError; +use alloy_primitives::{b256, fixed_bytes, keccak256, Bytes, TxKind, B256, U256}; +use reth_chainspec::{ChainSpecBuilder, EthereumHardfork, ForkCondition, MAINNET}; +use reth_ethereum_primitives::{Block, BlockBody, Transaction}; +use reth_evm::{execute::Executor, ConfigureEvm}; +use reth_evm_ethereum::EthEvmConfig; +use reth_execution_types::BlockExecutionResult; +use reth_primitives_traits::{ + crypto::secp256k1::public_key_to_address, Block as _, RecoveredBlock, +}; +use reth_testing_utils::generators::{self, sign_tx_with_key_pair}; +use revm::{ + database::{CacheDB, EmptyDB, TransitionState}, + primitives::address, + state::{AccountInfo, Bytecode, EvmState}, + Database, +}; +use std::sync::{mpsc, Arc}; + +fn create_database_with_beacon_root_contract() -> CacheDB { + let mut db = CacheDB::new(Default::default()); + + let beacon_root_contract_account = AccountInfo { + balance: U256::ZERO, + code_hash: keccak256(BEACON_ROOTS_CODE.clone()), + nonce: 1, + code: Some(Bytecode::new_raw(BEACON_ROOTS_CODE.clone())), + }; + + db.insert_account_info(BEACON_ROOTS_ADDRESS, beacon_root_contract_account); + + db +} + +fn create_database_with_withdrawal_requests_contract() -> CacheDB { + let mut db = CacheDB::new(Default::default()); + + let withdrawal_requests_contract_account = AccountInfo { + nonce: 1, + balance: U256::ZERO, + code_hash: keccak256(WITHDRAWAL_REQUEST_PREDEPLOY_CODE.clone()), + code: Some(Bytecode::new_raw(WITHDRAWAL_REQUEST_PREDEPLOY_CODE.clone())), + }; + + db.insert_account_info( + WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS, + withdrawal_requests_contract_account, + ); + + db +} + +#[test] +fn eip_4788_non_genesis_call() { + let mut header = + Header { timestamp: 1, number: 1, excess_blob_gas: Some(0), ..Header::default() }; + + let db = create_database_with_beacon_root_contract(); + + let chain_spec = Arc::new( + ChainSpecBuilder::from(&*MAINNET) + .shanghai_activated() + .with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(1)) + .build(), + ); + + let provider = EthEvmConfig::new(chain_spec); + + let mut executor = provider.batch_executor(db); + + // attempt to execute a block without parent beacon block root, expect err + let err = executor + .execute_one(&RecoveredBlock::new_unhashed( + Block { + header: header.clone(), + body: BlockBody { transactions: vec![], ommers: vec![], withdrawals: None }, + }, + vec![], + )) + .expect_err("Executing cancun block without parent beacon block root field should fail"); + + assert!(matches!( + err.as_validation().unwrap(), + BlockValidationError::MissingParentBeaconBlockRoot + )); + + // fix header, set a gas limit + header.parent_beacon_block_root = Some(B256::with_last_byte(0x69)); + + // Now execute a block with the fixed header, ensure that it does not fail + executor + .execute_one(&RecoveredBlock::new_unhashed( + Block { + header: header.clone(), + body: BlockBody { transactions: vec![], ommers: vec![], withdrawals: None }, + }, + vec![], + )) + .unwrap(); + + // check the actual storage of the contract - it should be: + // * The storage value at header.timestamp % HISTORY_BUFFER_LENGTH should be + // header.timestamp + // * The storage value at header.timestamp % HISTORY_BUFFER_LENGTH + HISTORY_BUFFER_LENGTH // + // should be parent_beacon_block_root + let history_buffer_length = 8191u64; + let timestamp_index = header.timestamp % history_buffer_length; + let parent_beacon_block_root_index = + timestamp_index % history_buffer_length + history_buffer_length; + + let timestamp_storage = executor.with_state_mut(|state| { + state.storage(BEACON_ROOTS_ADDRESS, U256::from(timestamp_index)).unwrap() + }); + assert_eq!(timestamp_storage, U256::from(header.timestamp)); + + // get parent beacon block root storage and compare + let parent_beacon_block_root_storage = executor.with_state_mut(|state| { + state + .storage(BEACON_ROOTS_ADDRESS, U256::from(parent_beacon_block_root_index)) + .expect("storage value should exist") + }); + assert_eq!(parent_beacon_block_root_storage, U256::from(0x69)); +} + +#[test] +fn eip_4788_no_code_cancun() { + // This test ensures that we "silently fail" when cancun is active and there is no code at + // // BEACON_ROOTS_ADDRESS + let header = Header { + timestamp: 1, + number: 1, + parent_beacon_block_root: Some(B256::with_last_byte(0x69)), + excess_blob_gas: Some(0), + ..Header::default() + }; + + let db = CacheDB::new(EmptyDB::default()); + + // DON'T deploy the contract at genesis + let chain_spec = Arc::new( + ChainSpecBuilder::from(&*MAINNET) + .shanghai_activated() + .with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(1)) + .build(), + ); + + let provider = EthEvmConfig::new(chain_spec); + + // attempt to execute an empty block with parent beacon block root, this should not fail + provider + .batch_executor(db) + .execute_one(&RecoveredBlock::new_unhashed( + Block { + header, + body: BlockBody { transactions: vec![], ommers: vec![], withdrawals: None }, + }, + vec![], + )) + .expect("Executing a block with no transactions while cancun is active should not fail"); +} + +#[test] +fn eip_4788_empty_account_call() { + // This test ensures that we do not increment the nonce of an empty SYSTEM_ADDRESS account + // // during the pre-block call + + let mut db = create_database_with_beacon_root_contract(); + + // insert an empty SYSTEM_ADDRESS + db.insert_account_info(SYSTEM_ADDRESS, Default::default()); + + let chain_spec = Arc::new( + ChainSpecBuilder::from(&*MAINNET) + .shanghai_activated() + .with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(1)) + .build(), + ); + + let provider = EthEvmConfig::new(chain_spec); + + // construct the header for block one + let header = Header { + timestamp: 1, + number: 1, + parent_beacon_block_root: Some(B256::with_last_byte(0x69)), + excess_blob_gas: Some(0), + ..Header::default() + }; + + let mut executor = provider.batch_executor(db); + + // attempt to execute an empty block with parent beacon block root, this should not fail + executor + .execute_one(&RecoveredBlock::new_unhashed( + Block { + header, + body: BlockBody { transactions: vec![], ommers: vec![], withdrawals: None }, + }, + vec![], + )) + .expect("Executing a block with no transactions while cancun is active should not fail"); + + // ensure that the nonce of the system address account has not changed + let nonce = + executor.with_state_mut(|state| state.basic(SYSTEM_ADDRESS).unwrap().unwrap().nonce); + assert_eq!(nonce, 0); +} + +#[test] +fn eip_4788_genesis_call() { + let db = create_database_with_beacon_root_contract(); + + // activate cancun at genesis + let chain_spec = Arc::new( + ChainSpecBuilder::from(&*MAINNET) + .shanghai_activated() + .with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(0)) + .build(), + ); + + let mut header = chain_spec.genesis_header().clone(); + let provider = EthEvmConfig::new(chain_spec); + let mut executor = provider.batch_executor(db); + + // attempt to execute the genesis block with non-zero parent beacon block root, expect err + header.parent_beacon_block_root = Some(B256::with_last_byte(0x69)); + let _err = executor + .execute_one(&RecoveredBlock::new_unhashed( + Block { header: header.clone(), body: Default::default() }, + vec![], + )) + .expect_err( + "Executing genesis cancun block with non-zero parent beacon block root field + should fail", + ); + + // fix header + header.parent_beacon_block_root = Some(B256::ZERO); + + // now try to process the genesis block again, this time ensuring that a system contract + // call does not occur + executor + .execute_one(&RecoveredBlock::new_unhashed( + Block { header, body: Default::default() }, + vec![], + )) + .unwrap(); + + // there is no system contract call so there should be NO STORAGE CHANGES + // this means we'll check the transition state + let transition_state = executor.with_state_mut(|state| { + state.transition_state.take().expect("the evm should be initialized with bundle updates") + }); + + // assert that it is the default (empty) transition state + assert_eq!(transition_state, TransitionState::default()); +} + +#[test] +fn eip_4788_high_base_fee() { + // This test ensures that if we have a base fee, then we don't return an error when the + // system contract is called, due to the gas price being less than the base fee. + let header = Header { + timestamp: 1, + number: 1, + parent_beacon_block_root: Some(B256::with_last_byte(0x69)), + base_fee_per_gas: Some(u64::MAX), + excess_blob_gas: Some(0), + ..Header::default() + }; + + let db = create_database_with_beacon_root_contract(); + + let chain_spec = Arc::new( + ChainSpecBuilder::from(&*MAINNET) + .shanghai_activated() + .with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(1)) + .build(), + ); + + let provider = EthEvmConfig::new(chain_spec); + + // execute header + let mut executor = provider.batch_executor(db); + + // Now execute a block with the fixed header, ensure that it does not fail + executor + .execute_one(&RecoveredBlock::new_unhashed( + Block { header: header.clone(), body: Default::default() }, + vec![], + )) + .unwrap(); + + // check the actual storage of the contract - it should be: + // * The storage value at header.timestamp % HISTORY_BUFFER_LENGTH should be + // header.timestamp + // * The storage value at header.timestamp % HISTORY_BUFFER_LENGTH + HISTORY_BUFFER_LENGTH // + // should be parent_beacon_block_root + let history_buffer_length = 8191u64; + let timestamp_index = header.timestamp % history_buffer_length; + let parent_beacon_block_root_index = + timestamp_index % history_buffer_length + history_buffer_length; + + // get timestamp storage and compare + let timestamp_storage = executor.with_state_mut(|state| { + state.storage(BEACON_ROOTS_ADDRESS, U256::from(timestamp_index)).unwrap() + }); + assert_eq!(timestamp_storage, U256::from(header.timestamp)); + + // get parent beacon block root storage and compare + let parent_beacon_block_root_storage = executor.with_state_mut(|state| { + state.storage(BEACON_ROOTS_ADDRESS, U256::from(parent_beacon_block_root_index)).unwrap() + }); + assert_eq!(parent_beacon_block_root_storage, U256::from(0x69)); +} + +/// Create a state provider with blockhashes and the EIP-2935 system contract. +fn create_database_with_block_hashes(latest_block: u64) -> CacheDB { + let mut db = CacheDB::new(Default::default()); + for block_number in 0..=latest_block { + db.cache.block_hashes.insert(U256::from(block_number), keccak256(block_number.to_string())); + } + + let blockhashes_contract_account = AccountInfo { + balance: U256::ZERO, + code_hash: keccak256(HISTORY_STORAGE_CODE.clone()), + code: Some(Bytecode::new_raw(HISTORY_STORAGE_CODE.clone())), + nonce: 1, + }; + + db.insert_account_info(HISTORY_STORAGE_ADDRESS, blockhashes_contract_account); + + db +} +#[test] +fn eip_2935_pre_fork() { + let db = create_database_with_block_hashes(1); + + let chain_spec = Arc::new( + ChainSpecBuilder::from(&*MAINNET) + .shanghai_activated() + .with_fork(EthereumHardfork::Prague, ForkCondition::Never) + .build(), + ); + + let provider = EthEvmConfig::new(chain_spec); + let mut executor = provider.batch_executor(db); + + // construct the header for block one + let header = Header { timestamp: 1, number: 1, ..Header::default() }; + + // attempt to execute an empty block, this should not fail + executor + .execute_one(&RecoveredBlock::new_unhashed( + Block { header, body: Default::default() }, + vec![], + )) + .expect("Executing a block with no transactions while Prague is active should not fail"); + + // ensure that the block hash was *not* written to storage, since this is before the fork + // was activated + // + // we load the account first, because revm expects it to be + // loaded + executor.with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap()); + assert!(executor.with_state_mut(|state| { + state.storage(HISTORY_STORAGE_ADDRESS, U256::ZERO).unwrap().is_zero() + })); +} + +#[test] +fn eip_2935_fork_activation_genesis() { + let db = create_database_with_block_hashes(0); + + let chain_spec = Arc::new( + ChainSpecBuilder::from(&*MAINNET) + .shanghai_activated() + .cancun_activated() + .prague_activated() + .build(), + ); + + let header = chain_spec.genesis_header().clone(); + let provider = EthEvmConfig::new(chain_spec); + let mut executor = provider.batch_executor(db); + + // attempt to execute genesis block, this should not fail + executor + .execute_one(&RecoveredBlock::new_unhashed( + Block { header, body: Default::default() }, + vec![], + )) + .expect("Executing a block with no transactions while Prague is active should not fail"); + + // ensure that the block hash was *not* written to storage, since there are no blocks + // preceding genesis + // + // we load the account first, because revm expects it to be + // loaded + executor.with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap()); + assert!(executor.with_state_mut(|state| { + state.storage(HISTORY_STORAGE_ADDRESS, U256::ZERO).unwrap().is_zero() + })); +} + +#[test] +fn eip_2935_fork_activation_within_window_bounds() { + let fork_activation_block = (HISTORY_SERVE_WINDOW - 10) as u64; + let db = create_database_with_block_hashes(fork_activation_block); + + let chain_spec = Arc::new( + ChainSpecBuilder::from(&*MAINNET) + .shanghai_activated() + .cancun_activated() + .with_fork(EthereumHardfork::Prague, ForkCondition::Timestamp(1)) + .build(), + ); + + let header = Header { + parent_hash: B256::random(), + timestamp: 1, + number: fork_activation_block, + requests_hash: Some(EMPTY_REQUESTS_HASH), + excess_blob_gas: Some(0), + parent_beacon_block_root: Some(B256::random()), + ..Header::default() + }; + let provider = EthEvmConfig::new(chain_spec); + let mut executor = provider.batch_executor(db); + + // attempt to execute the fork activation block, this should not fail + executor + .execute_one(&RecoveredBlock::new_unhashed( + Block { header, body: Default::default() }, + vec![], + )) + .expect("Executing a block with no transactions while Prague is active should not fail"); + + // the hash for the ancestor of the fork activation block should be present + assert!( + executor.with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some()) + ); + assert_ne!( + executor.with_state_mut(|state| state + .storage(HISTORY_STORAGE_ADDRESS, U256::from(fork_activation_block - 1)) + .unwrap()), + U256::ZERO + ); + + // the hash of the block itself should not be in storage + assert!(executor.with_state_mut(|state| { + state.storage(HISTORY_STORAGE_ADDRESS, U256::from(fork_activation_block)).unwrap().is_zero() + })); +} + +// +#[test] +fn eip_2935_fork_activation_outside_window_bounds() { + let fork_activation_block = (HISTORY_SERVE_WINDOW + 256) as u64; + let db = create_database_with_block_hashes(fork_activation_block); + + let chain_spec = Arc::new( + ChainSpecBuilder::from(&*MAINNET) + .shanghai_activated() + .cancun_activated() + .with_fork(EthereumHardfork::Prague, ForkCondition::Timestamp(1)) + .build(), + ); + + let provider = EthEvmConfig::new(chain_spec); + let mut executor = provider.batch_executor(db); + + let header = Header { + parent_hash: B256::random(), + timestamp: 1, + number: fork_activation_block, + requests_hash: Some(EMPTY_REQUESTS_HASH), + excess_blob_gas: Some(0), + parent_beacon_block_root: Some(B256::random()), + ..Header::default() + }; + + // attempt to execute the fork activation block, this should not fail + executor + .execute_one(&RecoveredBlock::new_unhashed( + Block { header, body: Default::default() }, + vec![], + )) + .expect("Executing a block with no transactions while Prague is active should not fail"); + + // the hash for the ancestor of the fork activation block should be present + assert!( + executor.with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some()) + ); +} + +#[test] +fn eip_2935_state_transition_inside_fork() { + let db = create_database_with_block_hashes(2); + + let chain_spec = Arc::new( + ChainSpecBuilder::from(&*MAINNET) + .shanghai_activated() + .cancun_activated() + .prague_activated() + .build(), + ); + + let header = chain_spec.genesis_header().clone(); + let header_hash = header.hash_slow(); + + let provider = EthEvmConfig::new(chain_spec); + let mut executor = provider.batch_executor(db); + + // attempt to execute the genesis block, this should not fail + executor + .execute_one(&RecoveredBlock::new_unhashed( + Block { header, body: Default::default() }, + vec![], + )) + .expect("Executing a block with no transactions while Prague is active should not fail"); + + // nothing should be written as the genesis has no ancestors + // + // we load the account first, because revm expects it to be + // loaded + executor.with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap()); + assert!(executor.with_state_mut(|state| { + state.storage(HISTORY_STORAGE_ADDRESS, U256::ZERO).unwrap().is_zero() + })); + + // attempt to execute block 1, this should not fail + let header = Header { + parent_hash: header_hash, + timestamp: 1, + number: 1, + requests_hash: Some(EMPTY_REQUESTS_HASH), + excess_blob_gas: Some(0), + parent_beacon_block_root: Some(B256::random()), + ..Header::default() + }; + let header_hash = header.hash_slow(); + + executor + .execute_one(&RecoveredBlock::new_unhashed( + Block { header, body: Default::default() }, + vec![], + )) + .expect("Executing a block with no transactions while Prague is active should not fail"); + + // the block hash of genesis should now be in storage, but not block 1 + assert!( + executor.with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some()) + ); + assert_ne!( + executor + .with_state_mut(|state| state.storage(HISTORY_STORAGE_ADDRESS, U256::ZERO).unwrap()), + U256::ZERO + ); + assert!(executor.with_state_mut(|state| { + state.storage(HISTORY_STORAGE_ADDRESS, U256::from(1)).unwrap().is_zero() + })); + + // attempt to execute block 2, this should not fail + let header = Header { + parent_hash: header_hash, + timestamp: 1, + number: 2, + requests_hash: Some(EMPTY_REQUESTS_HASH), + excess_blob_gas: Some(0), + parent_beacon_block_root: Some(B256::random()), + ..Header::default() + }; + + executor + .execute_one(&RecoveredBlock::new_unhashed( + Block { header, body: Default::default() }, + vec![], + )) + .expect("Executing a block with no transactions while Prague is active should not fail"); + + // the block hash of genesis and block 1 should now be in storage, but not block 2 + assert!( + executor.with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some()) + ); + assert_ne!( + executor + .with_state_mut(|state| state.storage(HISTORY_STORAGE_ADDRESS, U256::ZERO).unwrap()), + U256::ZERO + ); + assert_ne!( + executor + .with_state_mut(|state| state.storage(HISTORY_STORAGE_ADDRESS, U256::from(1)).unwrap()), + U256::ZERO + ); + assert!(executor.with_state_mut(|state| { + state.storage(HISTORY_STORAGE_ADDRESS, U256::from(2)).unwrap().is_zero() + })); +} + +#[test] +fn eip_7002() { + let chain_spec = Arc::new( + ChainSpecBuilder::from(&*MAINNET) + .shanghai_activated() + .cancun_activated() + .prague_activated() + .build(), + ); + + let mut db = create_database_with_withdrawal_requests_contract(); + + let sender_key_pair = generators::generate_key(&mut generators::rng()); + let sender_address = public_key_to_address(sender_key_pair.public_key()); + + db.insert_account_info( + sender_address, + AccountInfo { nonce: 1, balance: U256::from(ETH_TO_WEI), ..Default::default() }, + ); + + // https://github.com/lightclient/sys-asm/blob/9282bdb9fd64e024e27f60f507486ffb2183cba2/test/Withdrawal.t.sol.in#L36 + let validator_public_key = fixed_bytes!( + "111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111" + ); + let withdrawal_amount = fixed_bytes!("0203040506070809"); + let input: Bytes = [&validator_public_key[..], &withdrawal_amount[..]].concat().into(); + assert_eq!(input.len(), 56); + + let mut header = chain_spec.genesis_header().clone(); + header.gas_limit = 1_500_000; + // measured + header.gas_used = 135_856; + header.receipts_root = + b256!("0xb31a3e47b902e9211c4d349af4e4c5604ce388471e79ca008907ae4616bb0ed3"); + + let tx = sign_tx_with_key_pair( + sender_key_pair, + Transaction::Legacy(TxLegacy { + chain_id: Some(chain_spec.chain.id()), + nonce: 1, + gas_price: header.base_fee_per_gas.unwrap().into(), + gas_limit: header.gas_used, + to: TxKind::Call(WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS), + // `MIN_WITHDRAWAL_REQUEST_FEE` + value: U256::from(2), + input, + }), + ); + + let provider = EthEvmConfig::new(chain_spec); + + let mut executor = provider.batch_executor(db); + + let BlockExecutionResult { receipts, requests, .. } = executor + .execute_one( + &Block { header, body: BlockBody { transactions: vec![tx], ..Default::default() } } + .try_into_recovered() + .unwrap(), + ) + .unwrap(); + + let receipt = receipts.first().unwrap(); + assert!(receipt.success); + + // There should be exactly one entry with withdrawal requests + assert_eq!(requests.len(), 1); + assert_eq!(requests[0][0], 1); +} + +#[test] +fn block_gas_limit_error() { + // Create a chain specification with fork conditions set for Prague + let chain_spec = Arc::new( + ChainSpecBuilder::from(&*MAINNET) + .shanghai_activated() + .with_fork(EthereumHardfork::Prague, ForkCondition::Timestamp(0)) + .build(), + ); + + // Create a state provider with the withdrawal requests contract pre-deployed + let mut db = create_database_with_withdrawal_requests_contract(); + + // Generate a new key pair for the sender + let sender_key_pair = generators::generate_key(&mut generators::rng()); + // Get the sender's address from the public key + let sender_address = public_key_to_address(sender_key_pair.public_key()); + + // Insert the sender account into the state with a nonce of 1 and a balance of 1 ETH in Wei + db.insert_account_info( + sender_address, + AccountInfo { nonce: 1, balance: U256::from(ETH_TO_WEI), ..Default::default() }, + ); + + // Define the validator public key and withdrawal amount as fixed bytes + let validator_public_key = fixed_bytes!( + "111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111" + ); + let withdrawal_amount = fixed_bytes!("2222222222222222"); + // Concatenate the validator public key and withdrawal amount into a single byte array + let input: Bytes = [&validator_public_key[..], &withdrawal_amount[..]].concat().into(); + // Ensure the input length is 56 bytes + assert_eq!(input.len(), 56); + + // Create a genesis block header with a specified gas limit and gas used + let mut header = chain_spec.genesis_header().clone(); + header.gas_limit = 1_500_000; + header.gas_used = 134_807; + header.receipts_root = + b256!("0xb31a3e47b902e9211c4d349af4e4c5604ce388471e79ca008907ae4616bb0ed3"); + + // Create a transaction with a gas limit higher than the block gas limit + let tx = sign_tx_with_key_pair( + sender_key_pair, + Transaction::Legacy(TxLegacy { + chain_id: Some(chain_spec.chain.id()), + nonce: 1, + gas_price: header.base_fee_per_gas.unwrap().into(), + gas_limit: 2_500_000, // higher than block gas limit + to: TxKind::Call(WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS), + value: U256::from(1), + input, + }), + ); + + // Create an executor from the state provider + let evm_config = EthEvmConfig::new(chain_spec); + let mut executor = evm_config.batch_executor(db); + + // Execute the block and capture the result + let exec_result = executor.execute_one( + &Block { header, body: BlockBody { transactions: vec![tx], ..Default::default() } } + .try_into_recovered() + .unwrap(), + ); + + // Check if the execution result is an error and assert the specific error type + match exec_result { + Ok(_) => panic!("Expected block gas limit error"), + Err(err) => assert!(matches!( + *err.as_validation().unwrap(), + BlockValidationError::TransactionGasLimitMoreThanAvailableBlockGas { + transaction_gas_limit: 2_500_000, + block_available_gas: 1_500_000, + } + )), + } +} + +#[test] +fn test_balance_increment_not_duplicated() { + let chain_spec = Arc::new( + ChainSpecBuilder::from(&*MAINNET) + .shanghai_activated() + .cancun_activated() + .prague_activated() + .build(), + ); + + let withdrawal_recipient = address!("0x1000000000000000000000000000000000000000"); + + let mut db = CacheDB::new(EmptyDB::default()); + let initial_balance = 100; + db.insert_account_info( + withdrawal_recipient, + AccountInfo { balance: U256::from(initial_balance), nonce: 1, ..Default::default() }, + ); + + let withdrawal = + Withdrawal { index: 0, validator_index: 0, address: withdrawal_recipient, amount: 1 }; + + let header = Header { + timestamp: 1, + number: 1, + excess_blob_gas: Some(0), + parent_beacon_block_root: Some(B256::random()), + ..Header::default() + }; + + let block = &RecoveredBlock::new_unhashed( + Block { + header, + body: BlockBody { + transactions: vec![], + ommers: vec![], + withdrawals: Some(vec![withdrawal].into()), + }, + }, + vec![], + ); + + let provider = EthEvmConfig::new(chain_spec); + let executor = provider.batch_executor(db); + + let (tx, rx) = mpsc::channel(); + let tx_clone = tx.clone(); + + let _output = executor + .execute_with_state_hook(block, move |_, state: &EvmState| { + if let Some(account) = state.get(&withdrawal_recipient) { + let _ = tx_clone.send(account.info.balance); + } + }) + .expect("Block execution should succeed"); + + drop(tx); + let balance_changes: Vec = rx.try_iter().collect(); + + if let Some(final_balance) = balance_changes.last() { + let expected_final_balance = U256::from(initial_balance) + U256::from(1_000_000_000); // initial + 1 Gwei in Wei + assert_eq!( + *final_balance, expected_final_balance, + "Final balance should match expected value after withdrawal" + ); + } +} diff --git a/crates/ethereum/node/src/evm.rs b/crates/ethereum/node/src/evm.rs index 99fd6dfd691..4e8fd99f82b 100644 --- a/crates/ethereum/node/src/evm.rs +++ b/crates/ethereum/node/src/evm.rs @@ -1,6 +1,7 @@ //! Ethereum EVM support #[doc(inline)] +#[allow(deprecated)] pub use reth_evm_ethereum::execute::EthExecutorProvider; #[doc(inline)] pub use reth_evm_ethereum::{EthEvm, EthEvmConfig}; diff --git a/crates/ethereum/node/src/lib.rs b/crates/ethereum/node/src/lib.rs index 0f6d8ed312d..1d4096f33f6 100644 --- a/crates/ethereum/node/src/lib.rs +++ b/crates/ethereum/node/src/lib.rs @@ -17,11 +17,14 @@ use revm as _; pub use reth_ethereum_engine_primitives::EthEngineTypes; pub mod evm; -pub use evm::{EthEvmConfig, EthExecutorProvider}; +pub use evm::EthEvmConfig; + +#[allow(deprecated)] +pub use evm::EthExecutorProvider; pub use reth_ethereum_consensus as consensus; pub mod node; -pub use node::{EthereumEthApiBuilder, EthereumNode}; +pub use node::*; pub mod payload; diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index 177a9c7fabc..e8c9d002eb6 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -3,20 +3,22 @@ pub use crate::{payload::EthereumPayloadBuilder, EthereumEngineValidator}; use crate::{EthEngineTypes, EthEvmConfig}; use alloy_eips::{eip7840::BlobParams, merge::EPOCH_SLOTS}; -use reth_chainspec::{ChainSpec, EthChainSpec, EthereumHardforks}; +use reth_chainspec::{ChainSpec, EthChainSpec, EthereumHardforks, Hardforks}; use reth_consensus::{ConsensusError, FullConsensus}; use reth_ethereum_consensus::EthBeaconConsensus; use reth_ethereum_engine_primitives::{ EthBuiltPayload, EthPayloadAttributes, EthPayloadBuilderAttributes, }; -use reth_ethereum_primitives::{EthPrimitives, PooledTransactionVariant, TransactionSigned}; +use reth_ethereum_primitives::{EthPrimitives, TransactionSigned}; use reth_evm::{ConfigureEvm, EvmFactory, EvmFactoryFor, NextBlockEnvAttributes}; -use reth_network::{EthNetworkPrimitives, NetworkHandle, PeersInfo}; -use reth_node_api::{AddOnsContext, FullNodeComponents, NodeAddOns, NodePrimitives, TxTy}; +use reth_network::{primitives::BasicNetworkPrimitives, NetworkHandle, PeersInfo}; +use reth_node_api::{ + AddOnsContext, FullNodeComponents, NodeAddOns, NodePrimitives, PrimitivesTy, TxTy, +}; use reth_node_builder::{ components::{ BasicPayloadServiceBuilder, ComponentsBuilder, ConsensusBuilder, ExecutorBuilder, - NetworkBuilder, PoolBuilder, + NetworkBuilder, PoolBuilder, TxPoolBuilder, }, node::{FullNodeTypes, NodeTypes}, rpc::{ @@ -26,7 +28,7 @@ use reth_node_builder::{ BuilderContext, DebugNode, Node, NodeAdapter, NodeComponentsBuilder, PayloadBuilderConfig, PayloadTypes, }; -use reth_provider::{providers::ProviderFactoryBuilder, CanonStateSubscriptions, EthStorage}; +use reth_provider::{providers::ProviderFactoryBuilder, EthStorage}; use reth_rpc::{eth::core::EthApiFor, ValidationApi}; use reth_rpc_api::{eth::FullEthApiServer, servers::BlockSubmissionValidationApiServer}; use reth_rpc_builder::config::RethRpcServerConfig; @@ -34,8 +36,8 @@ use reth_rpc_eth_types::{error::FromEvmError, EthApiError}; use reth_rpc_server_types::RethRpcModule; use reth_tracing::tracing::{debug, info}; use reth_transaction_pool::{ - blobstore::{DiskFileBlobStore, DiskFileBlobStoreConfig}, - EthTransactionPool, PoolTransaction, TransactionPool, TransactionValidationTaskExecutor, + blobstore::DiskFileBlobStore, EthTransactionPool, PoolPooledTx, PoolTransaction, + TransactionPool, TransactionValidationTaskExecutor, }; use reth_trie_db::MerklePatriciaTrie; use revm::context::TxEnv; @@ -195,8 +197,8 @@ where ); self.inner - .launch_add_ons_with(ctx, move |modules, _, _| { - modules.merge_if_module_configured( + .launch_add_ons_with(ctx, move |container| { + container.modules.merge_if_module_configured( RethRpcModule::Flashbots, validation_api.into_rpc(), )?; @@ -275,18 +277,7 @@ impl> DebugNode for EthereumNode { type RpcBlock = alloy_rpc_types_eth::Block; fn rpc_to_primitive_block(rpc_block: Self::RpcBlock) -> reth_ethereum_primitives::Block { - let alloy_rpc_types_eth::Block { header, transactions, withdrawals, .. } = rpc_block; - reth_ethereum_primitives::Block { - header: header.inner, - body: reth_ethereum_primitives::BlockBody { - transactions: transactions - .into_transactions() - .map(|tx| tx.inner.into_inner().into()) - .collect(), - ommers: Default::default(), - withdrawals, - }, - } + rpc_block.into_consensus().convert_transactions() } } @@ -330,11 +321,10 @@ where type Pool = EthTransactionPool; async fn build_pool(self, ctx: &BuilderContext) -> eyre::Result { - let data_dir = ctx.config().datadir(); let pool_config = ctx.pool_config(); let blob_cache_size = if let Some(blob_cache_size) = pool_config.blob_cache_size { - blob_cache_size + Some(blob_cache_size) } else { // get the current blob params for the current timestamp, fallback to default Cancun // params @@ -347,13 +337,12 @@ where // Derive the blob cache size from the target blob count, to auto scale it by // multiplying it with the slot count for 2 epochs: 384 for pectra - (blob_params.target_blob_count * EPOCH_SLOTS * 2) as u32 + Some((blob_params.target_blob_count * EPOCH_SLOTS * 2) as u32) }; - let custom_config = - DiskFileBlobStoreConfig::default().with_max_cached_entries(blob_cache_size); + let blob_store = + reth_node_builder::components::create_blob_store_with_cache(ctx, blob_cache_size)?; - let blob_store = DiskFileBlobStore::open(data_dir.blobstore(), custom_config)?; let validator = TransactionValidationTaskExecutor::eth_builder(ctx.provider().clone()) .with_head_timestamp(ctx.head().timestamp) .kzg_settings(ctx.kzg_settings()?) @@ -362,60 +351,12 @@ where .with_additional_tasks(ctx.config().txpool.additional_validation_tasks) .build_with_tasks(ctx.task_executor().clone(), blob_store.clone()); - let transaction_pool = - reth_transaction_pool::Pool::eth_pool(validator, blob_store, pool_config); - info!(target: "reth::cli", "Transaction pool initialized"); + let transaction_pool = TxPoolBuilder::new(ctx) + .with_validator(validator) + .build_and_spawn_maintenance_task(blob_store, pool_config)?; - // spawn txpool maintenance task - { - let pool = transaction_pool.clone(); - let chain_events = ctx.provider().canonical_state_stream(); - let client = ctx.provider().clone(); - // Only spawn backup task if not disabled - if !ctx.config().txpool.disable_transactions_backup { - // Use configured backup path or default to data dir - let transactions_path = ctx - .config() - .txpool - .transactions_backup_path - .clone() - .unwrap_or_else(|| data_dir.txpool_transactions()); - - let transactions_backup_config = - reth_transaction_pool::maintain::LocalTransactionBackupConfig::with_local_txs_backup(transactions_path); - - ctx.task_executor().spawn_critical_with_graceful_shutdown_signal( - "local transactions backup task", - |shutdown| { - reth_transaction_pool::maintain::backup_local_transactions_task( - shutdown, - pool.clone(), - transactions_backup_config, - ) - }, - ); - } - - // spawn the maintenance task - ctx.task_executor().spawn_critical( - "txpool maintenance task", - reth_transaction_pool::maintain::maintain_transaction_pool_future( - client, - pool, - chain_events, - ctx.task_executor().clone(), - reth_transaction_pool::maintain::MaintainPoolConfig { - max_tx_lifetime: transaction_pool.config().max_queued_lifetime, - no_local_exemptions: transaction_pool - .config() - .local_transactions_config - .no_exemptions, - ..Default::default() - }, - ), - ); - debug!(target: "reth::cli", "Spawned txpool maintenance task"); - } + info!(target: "reth::cli", "Transaction pool initialized"); + debug!(target: "reth::cli", "Spawned txpool maintenance task"); Ok(transaction_pool) } @@ -429,16 +370,13 @@ pub struct EthereumNetworkBuilder { impl NetworkBuilder for EthereumNetworkBuilder where - Node: FullNodeTypes>, - Pool: TransactionPool< - Transaction: PoolTransaction< - Consensus = TxTy, - Pooled = PooledTransactionVariant, - >, - > + Unpin + Node: FullNodeTypes>, + Pool: TransactionPool>> + + Unpin + 'static, { - type Network = NetworkHandle; + type Network = + NetworkHandle, PoolPooledTx>>; async fn build_network( self, diff --git a/crates/ethereum/node/tests/e2e/p2p.rs b/crates/ethereum/node/tests/e2e/p2p.rs index fc00df9e719..34a42105381 100644 --- a/crates/ethereum/node/tests/e2e/p2p.rs +++ b/crates/ethereum/node/tests/e2e/p2p.rs @@ -66,8 +66,14 @@ async fn e2e_test_send_transactions() -> eyre::Result<()> { .build(), ); - let (mut nodes, _tasks, _) = - setup_engine::(2, chain_spec.clone(), false, eth_payload_attributes).await?; + let (mut nodes, _tasks, _) = setup_engine::( + 2, + chain_spec.clone(), + false, + Default::default(), + eth_payload_attributes, + ) + .await?; let mut node = nodes.pop().unwrap(); let provider = ProviderBuilder::new().connect_http(node.rpc_url()); @@ -102,8 +108,14 @@ async fn test_long_reorg() -> eyre::Result<()> { .build(), ); - let (mut nodes, _tasks, _) = - setup_engine::(2, chain_spec.clone(), false, eth_payload_attributes).await?; + let (mut nodes, _tasks, _) = setup_engine::( + 2, + chain_spec.clone(), + false, + Default::default(), + eth_payload_attributes, + ) + .await?; let mut first_node = nodes.pop().unwrap(); let mut second_node = nodes.pop().unwrap(); @@ -152,8 +164,14 @@ async fn test_reorg_through_backfill() -> eyre::Result<()> { .build(), ); - let (mut nodes, _tasks, _) = - setup_engine::(2, chain_spec.clone(), false, eth_payload_attributes).await?; + let (mut nodes, _tasks, _) = setup_engine::( + 2, + chain_spec.clone(), + false, + Default::default(), + eth_payload_attributes, + ) + .await?; let mut first_node = nodes.pop().unwrap(); let mut second_node = nodes.pop().unwrap(); @@ -167,9 +185,9 @@ async fn test_reorg_through_backfill() -> eyre::Result<()> { let head = first_provider.get_block_by_number(20.into()).await?.unwrap(); second_node.sync_to(head.header.hash).await?; - // Produce an unfinalized fork chain with 5 blocks + // Produce an unfinalized fork chain with 30 blocks second_node.payload.timestamp = head.header.timestamp; - advance_with_random_transactions(&mut second_node, 5, &mut rng, false).await?; + advance_with_random_transactions(&mut second_node, 30, &mut rng, false).await?; // Now reorg second node to the finalized canonical head let head = first_provider.get_block_by_number(100.into()).await?.unwrap(); diff --git a/crates/ethereum/node/tests/e2e/rpc.rs b/crates/ethereum/node/tests/e2e/rpc.rs index b8eefea3d85..57462fbfc6d 100644 --- a/crates/ethereum/node/tests/e2e/rpc.rs +++ b/crates/ethereum/node/tests/e2e/rpc.rs @@ -45,8 +45,14 @@ async fn test_fee_history() -> eyre::Result<()> { .build(), ); - let (mut nodes, _tasks, wallet) = - setup_engine::(1, chain_spec.clone(), false, eth_payload_attributes).await?; + let (mut nodes, _tasks, wallet) = setup_engine::( + 1, + chain_spec.clone(), + false, + Default::default(), + eth_payload_attributes, + ) + .await?; let mut node = nodes.pop().unwrap(); let provider = ProviderBuilder::new() .wallet(EthereumWallet::new(wallet.wallet_gen().swap_remove(0))) @@ -127,8 +133,14 @@ async fn test_flashbots_validate_v3() -> eyre::Result<()> { .build(), ); - let (mut nodes, _tasks, wallet) = - setup_engine::(1, chain_spec.clone(), false, eth_payload_attributes).await?; + let (mut nodes, _tasks, wallet) = setup_engine::( + 1, + chain_spec.clone(), + false, + Default::default(), + eth_payload_attributes, + ) + .await?; let mut node = nodes.pop().unwrap(); let provider = ProviderBuilder::new() .wallet(EthereumWallet::new(wallet.wallet_gen().swap_remove(0))) @@ -203,8 +215,14 @@ async fn test_flashbots_validate_v4() -> eyre::Result<()> { .build(), ); - let (mut nodes, _tasks, wallet) = - setup_engine::(1, chain_spec.clone(), false, eth_payload_attributes).await?; + let (mut nodes, _tasks, wallet) = setup_engine::( + 1, + chain_spec.clone(), + false, + Default::default(), + eth_payload_attributes, + ) + .await?; let mut node = nodes.pop().unwrap(); let provider = ProviderBuilder::new() .wallet(EthereumWallet::new(wallet.wallet_gen().swap_remove(0))) diff --git a/crates/ethereum/primitives/src/receipt.rs b/crates/ethereum/primitives/src/receipt.rs index 22c99209cf5..4d947661f5c 100644 --- a/crates/ethereum/primitives/src/receipt.rs +++ b/crates/ethereum/primitives/src/receipt.rs @@ -285,6 +285,46 @@ impl InMemorySize for Receipt { impl reth_primitives_traits::Receipt for Receipt {} +impl From> for Receipt +where + T: Into, +{ + fn from(value: alloy_consensus::ReceiptEnvelope) -> Self { + let value = value.into_primitives_receipt(); + Self { + tx_type: value.tx_type(), + success: value.is_success(), + cumulative_gas_used: value.cumulative_gas_used(), + // TODO: remove after + logs: value.logs().to_vec(), + } + } +} + +impl From for alloy_consensus::Receipt { + fn from(value: Receipt) -> Self { + Self { + status: value.success.into(), + cumulative_gas_used: value.cumulative_gas_used, + logs: value.logs, + } + } +} + +impl From for alloy_consensus::ReceiptEnvelope { + fn from(value: Receipt) -> Self { + let tx_type = value.tx_type; + let receipt = value.into_with_bloom().map_receipt(Into::into); + match tx_type { + TxType::Legacy => Self::Legacy(receipt), + TxType::Eip2930 => Self::Eip2930(receipt), + TxType::Eip1559 => Self::Eip1559(receipt), + TxType::Eip4844 => Self::Eip4844(receipt), + TxType::Eip7702 => Self::Eip7702(receipt), + } + } +} + #[cfg(all(feature = "serde", feature = "serde-bincode-compat"))] pub(super) mod serde_bincode_compat { use alloc::{borrow::Cow, vec::Vec}; diff --git a/crates/ethereum/reth/Cargo.toml b/crates/ethereum/reth/Cargo.toml index 95673c82854..f6f45922583 100644 --- a/crates/ethereum/reth/Cargo.toml +++ b/crates/ethereum/reth/Cargo.toml @@ -35,6 +35,7 @@ reth-exex = { workspace = true, optional = true } reth-trie = { workspace = true, optional = true } reth-node-builder = { workspace = true, optional = true } reth-tasks = { workspace = true, optional = true } +reth-cli-util = { workspace = true, optional = true } # reth-ethereum reth-ethereum-primitives.workspace = true @@ -104,7 +105,7 @@ full = [ "network", ] -cli = ["dep:reth-ethereum-cli"] +cli = ["dep:reth-ethereum-cli", "dep:reth-cli-util"] consensus = [ "dep:reth-consensus", "dep:reth-consensus-common", diff --git a/crates/ethereum/reth/src/lib.rs b/crates/ethereum/reth/src/lib.rs index c6f3b74e335..421cabe9968 100644 --- a/crates/ethereum/reth/src/lib.rs +++ b/crates/ethereum/reth/src/lib.rs @@ -21,7 +21,12 @@ pub mod primitives { /// Re-exported cli types #[cfg(feature = "cli")] -pub use reth_ethereum_cli as cli; +pub mod cli { + #[doc(inline)] + pub use reth_cli_util::*; + #[doc(inline)] + pub use reth_ethereum_cli::*; +} /// Re-exported pool types #[cfg(feature = "pool")] diff --git a/crates/evm/execution-types/src/chain.rs b/crates/evm/execution-types/src/chain.rs index facbe115c78..dc7218631f9 100644 --- a/crates/evm/execution-types/src/chain.rs +++ b/crates/evm/execution-types/src/chain.rs @@ -242,16 +242,25 @@ impl Chain { N::SignedTx: Encodable2718, { let mut receipt_attach = Vec::with_capacity(self.blocks().len()); - for ((block_num, block), receipts) in - self.blocks().iter().zip(self.execution_outcome.receipts().iter()) - { - let mut tx_receipts = Vec::with_capacity(receipts.len()); - for (tx, receipt) in block.body().transactions().iter().zip(receipts.iter()) { - tx_receipts.push((tx.trie_hash(), receipt.clone())); - } - let block_num_hash = BlockNumHash::new(*block_num, block.hash()); - receipt_attach.push(BlockReceipts { block: block_num_hash, tx_receipts }); - } + + self.blocks_and_receipts().for_each(|(block, receipts)| { + let block_num_hash = BlockNumHash::new(block.number(), block.hash()); + + let tx_receipts = block + .body() + .transactions() + .iter() + .zip(receipts) + .map(|(tx, receipt)| (tx.trie_hash(), receipt.clone())) + .collect(); + + receipt_attach.push(BlockReceipts { + block: block_num_hash, + tx_receipts, + timestamp: block.timestamp(), + }); + }); + receipt_attach } @@ -400,6 +409,8 @@ pub struct BlockReceipts { pub block: BlockNumHash, /// Transaction identifier and receipt. pub tx_receipts: Vec<(TxHash, T)>, + /// Block timestamp + pub timestamp: u64, } /// Bincode-compatible [`Chain`] serde implementation. diff --git a/crates/exex/exex/Cargo.toml b/crates/exex/exex/Cargo.toml index a98eac62783..8d380199002 100644 --- a/crates/exex/exex/Cargo.toml +++ b/crates/exex/exex/Cargo.toml @@ -81,4 +81,5 @@ serde = [ "reth-prune-types/serde", "reth-config/serde", "reth-ethereum-primitives/serde", + "reth-chain-state/serde", ] diff --git a/crates/exex/exex/src/backfill/job.rs b/crates/exex/exex/src/backfill/job.rs index 393d00c62ee..bbfd6c2a894 100644 --- a/crates/exex/exex/src/backfill/job.rs +++ b/crates/exex/exex/src/backfill/job.rs @@ -247,7 +247,7 @@ mod tests { BackfillJobFactory, }; use reth_db_common::init::init_genesis; - use reth_evm_ethereum::execute::EthExecutorProvider; + use reth_evm_ethereum::EthEvmConfig; use reth_primitives_traits::crypto::secp256k1::public_key_to_address; use reth_provider::{ providers::BlockchainProvider, test_utils::create_test_provider_factory_with_chain_spec, @@ -264,7 +264,7 @@ mod tests { let chain_spec = chain_spec(address); - let executor = EthExecutorProvider::ethereum(chain_spec.clone()); + let executor = EthEvmConfig::ethereum(chain_spec.clone()); let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); init_genesis(&provider_factory)?; let blockchain_db = BlockchainProvider::new(provider_factory.clone())?; @@ -300,7 +300,7 @@ mod tests { let chain_spec = chain_spec(address); - let executor = EthExecutorProvider::ethereum(chain_spec.clone()); + let executor = EthEvmConfig::ethereum(chain_spec.clone()); let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); init_genesis(&provider_factory)?; let blockchain_db = BlockchainProvider::new(provider_factory.clone())?; diff --git a/crates/exex/exex/src/backfill/stream.rs b/crates/exex/exex/src/backfill/stream.rs index d9328db1833..2525f804224 100644 --- a/crates/exex/exex/src/backfill/stream.rs +++ b/crates/exex/exex/src/backfill/stream.rs @@ -247,7 +247,7 @@ mod tests { }; use futures::StreamExt; use reth_db_common::init::init_genesis; - use reth_evm_ethereum::execute::EthExecutorProvider; + use reth_evm_ethereum::EthEvmConfig; use reth_primitives_traits::crypto::secp256k1::public_key_to_address; use reth_provider::{ providers::BlockchainProvider, test_utils::create_test_provider_factory_with_chain_spec, @@ -265,7 +265,7 @@ mod tests { let chain_spec = chain_spec(address); - let executor = EthExecutorProvider::ethereum(chain_spec.clone()); + let executor = EthEvmConfig::ethereum(chain_spec.clone()); let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); init_genesis(&provider_factory)?; let blockchain_db = BlockchainProvider::new(provider_factory.clone())?; @@ -302,7 +302,7 @@ mod tests { let chain_spec = chain_spec(address); - let executor = EthExecutorProvider::ethereum(chain_spec.clone()); + let executor = EthEvmConfig::ethereum(chain_spec.clone()); let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); init_genesis(&provider_factory)?; let blockchain_db = BlockchainProvider::new(provider_factory.clone())?; diff --git a/crates/exex/exex/src/backfill/test_utils.rs b/crates/exex/exex/src/backfill/test_utils.rs index 00bfbd94ee4..0485257fa2e 100644 --- a/crates/exex/exex/src/backfill/test_utils.rs +++ b/crates/exex/exex/src/backfill/test_utils.rs @@ -9,7 +9,7 @@ use reth_evm::{ execute::{BlockExecutionOutput, Executor}, ConfigureEvm, }; -use reth_evm_ethereum::{execute::EthExecutorProvider, EthEvmConfig}; +use reth_evm_ethereum::EthEvmConfig; use reth_node_api::FullNodePrimitives; use reth_primitives_traits::{Block as _, RecoveredBlock}; use reth_provider::{ @@ -68,7 +68,7 @@ where let provider = provider_factory.provider()?; // Execute the block to produce a block execution output - let mut block_execution_output = EthExecutorProvider::ethereum(chain_spec) + let mut block_execution_output = EthEvmConfig::ethereum(chain_spec) .batch_executor(StateProviderDatabase::new(LatestStateProviderRef::new(&provider))) .execute(block)?; block_execution_output.state.reverts.sort(); diff --git a/crates/exex/exex/src/manager.rs b/crates/exex/exex/src/manager.rs index 9e839d7cf7d..d5006dd9f19 100644 --- a/crates/exex/exex/src/manager.rs +++ b/crates/exex/exex/src/manager.rs @@ -663,7 +663,7 @@ mod tests { use futures::{StreamExt, TryStreamExt}; use rand::Rng; use reth_db_common::init::init_genesis; - use reth_evm_ethereum::{execute::EthExecutorProvider, EthEvmConfig}; + use reth_evm_ethereum::EthEvmConfig; use reth_primitives_traits::RecoveredBlock; use reth_provider::{ providers::BlockchainProvider, test_utils::create_test_provider_factory, BlockReader, @@ -1107,7 +1107,7 @@ mod tests { "test_exex".to_string(), Default::default(), provider, - EthExecutorProvider::mainnet(), + EthEvmConfig::mainnet(), wal.handle(), ); @@ -1162,7 +1162,7 @@ mod tests { "test_exex".to_string(), Default::default(), provider, - EthExecutorProvider::mainnet(), + EthEvmConfig::mainnet(), wal.handle(), ); @@ -1212,7 +1212,7 @@ mod tests { "test_exex".to_string(), Default::default(), provider, - EthExecutorProvider::mainnet(), + EthEvmConfig::mainnet(), wal.handle(), ); @@ -1255,7 +1255,7 @@ mod tests { "test_exex".to_string(), Default::default(), provider, - EthExecutorProvider::mainnet(), + EthEvmConfig::mainnet(), wal.handle(), ); @@ -1315,7 +1315,7 @@ mod tests { "test_exex".to_string(), Default::default(), provider.clone(), - EthExecutorProvider::mainnet(), + EthEvmConfig::mainnet(), wal.handle(), ); diff --git a/crates/exex/exex/src/notifications.rs b/crates/exex/exex/src/notifications.rs index eac5208f2fb..651bd7d5b29 100644 --- a/crates/exex/exex/src/notifications.rs +++ b/crates/exex/exex/src/notifications.rs @@ -453,7 +453,7 @@ mod tests { use futures::StreamExt; use reth_db_common::init::init_genesis; use reth_ethereum_primitives::Block; - use reth_evm_ethereum::execute::EthExecutorProvider; + use reth_evm_ethereum::EthEvmConfig; use reth_primitives_traits::Block as _; use reth_provider::{ providers::BlockchainProvider, test_utils::create_test_provider_factory, BlockWriter, @@ -511,7 +511,7 @@ mod tests { let mut notifications = ExExNotificationsWithoutHead::new( node_head, provider, - EthExecutorProvider::mainnet(), + EthEvmConfig::mainnet(), notifications_rx, wal.handle(), ) @@ -579,7 +579,7 @@ mod tests { let mut notifications = ExExNotificationsWithoutHead::new( node_head, provider, - EthExecutorProvider::mainnet(), + EthEvmConfig::mainnet(), notifications_rx, wal.handle(), ) @@ -618,7 +618,7 @@ mod tests { provider_rw.commit()?; let node_head_notification = ExExNotification::ChainCommitted { new: Arc::new( - BackfillJobFactory::new(EthExecutorProvider::mainnet(), provider.clone()) + BackfillJobFactory::new(EthEvmConfig::mainnet(), provider.clone()) .backfill(node_head.number..=node_head.number) .next() .ok_or_else(|| eyre::eyre!("failed to backfill"))??, @@ -660,7 +660,7 @@ mod tests { let mut notifications = ExExNotificationsWithoutHead::new( node_head, provider, - EthExecutorProvider::mainnet(), + EthEvmConfig::mainnet(), notifications_rx, wal.handle(), ) @@ -736,7 +736,7 @@ mod tests { let mut notifications = ExExNotificationsWithoutHead::new( node_head, provider, - EthExecutorProvider::mainnet(), + EthEvmConfig::mainnet(), notifications_rx, wal.handle(), ) diff --git a/crates/exex/types/Cargo.toml b/crates/exex/types/Cargo.toml index 9fe58fb8690..11dec0246fe 100644 --- a/crates/exex/types/Cargo.toml +++ b/crates/exex/types/Cargo.toml @@ -42,6 +42,7 @@ serde = [ "rand/serde", "reth-primitives-traits/serde", "reth-ethereum-primitives/serde", + "reth-chain-state/serde", ] serde-bincode-compat = [ "reth-execution-types/serde-bincode-compat", diff --git a/crates/net/discv5/src/config.rs b/crates/net/discv5/src/config.rs index e2a93a2a647..ef89e72da57 100644 --- a/crates/net/discv5/src/config.rs +++ b/crates/net/discv5/src/config.rs @@ -14,7 +14,7 @@ use discv5::{ }; use reth_ethereum_forks::{EnrForkIdEntry, ForkId}; use reth_network_peers::NodeRecord; -use tracing::warn; +use tracing::debug; use crate::{enr::discv4_id_to_multiaddr_id, filter::MustNotIncludeKeys, NetworkStackId}; @@ -413,7 +413,7 @@ pub fn discv5_sockets_wrt_rlpx_addr( if let Some(discv5_addr) = discv5_addr_ipv4 { if discv5_addr != rlpx_addr { - warn!(target: "net::discv5", + debug!(target: "net::discv5", %discv5_addr, %rlpx_addr, "Overwriting discv5 IPv4 address with RLPx IPv4 address, limited to one advertised IP address per IP version" @@ -432,7 +432,7 @@ pub fn discv5_sockets_wrt_rlpx_addr( if let Some(discv5_addr) = discv5_addr_ipv6 { if discv5_addr != rlpx_addr { - warn!(target: "net::discv5", + debug!(target: "net::discv5", %discv5_addr, %rlpx_addr, "Overwriting discv5 IPv6 address with RLPx IPv6 address, limited to one advertised IP address per IP version" diff --git a/crates/net/downloaders/src/file_client.rs b/crates/net/downloaders/src/file_client.rs index d1026954e90..a0b83d72d44 100644 --- a/crates/net/downloaders/src/file_client.rs +++ b/crates/net/downloaders/src/file_client.rs @@ -14,7 +14,7 @@ use reth_network_p2p::{ }; use reth_network_peers::PeerId; use reth_primitives_traits::{Block, BlockBody, FullBlock, SealedBlock, SealedHeader}; -use std::{collections::HashMap, io, path::Path, sync::Arc}; +use std::{collections::HashMap, io, ops::RangeInclusive, path::Path, sync::Arc}; use thiserror::Error; use tokio::{fs::File, io::AsyncReadExt}; use tokio_stream::StreamExt; @@ -354,10 +354,11 @@ impl BodiesClient for FileClient { type Body = B::Body; type Output = BodiesFut; - fn get_block_bodies_with_priority( + fn get_block_bodies_with_priority_and_range_hint( &self, hashes: Vec, _priority: Priority, + _range_hint: Option>, ) -> Self::Output { // this just searches the buffer, and fails if it can't find the block let mut bodies = Vec::new(); diff --git a/crates/net/downloaders/src/test_utils/bodies_client.rs b/crates/net/downloaders/src/test_utils/bodies_client.rs index fed86b989d1..6b0c65a38a9 100644 --- a/crates/net/downloaders/src/test_utils/bodies_client.rs +++ b/crates/net/downloaders/src/test_utils/bodies_client.rs @@ -9,6 +9,7 @@ use reth_network_peers::PeerId; use std::{ collections::HashMap, fmt::Debug, + ops::RangeInclusive, sync::{ atomic::{AtomicU64, Ordering}, Arc, @@ -81,10 +82,11 @@ impl BodiesClient for TestBodiesClient { type Body = BlockBody; type Output = BodiesFut; - fn get_block_bodies_with_priority( + fn get_block_bodies_with_priority_and_range_hint( &self, hashes: Vec, _priority: Priority, + _range_hint: Option>, ) -> Self::Output { let should_delay = self.should_delay; let bodies = self.bodies.clone(); diff --git a/crates/net/eth-wire-types/src/broadcast.rs b/crates/net/eth-wire-types/src/broadcast.rs index 3865f2910ee..fac61392711 100644 --- a/crates/net/eth-wire-types/src/broadcast.rs +++ b/crates/net/eth-wire-types/src/broadcast.rs @@ -9,11 +9,11 @@ use alloy_primitives::{ use alloy_rlp::{ Decodable, Encodable, RlpDecodable, RlpDecodableWrapper, RlpEncodable, RlpEncodableWrapper, }; -use core::mem; +use core::{fmt::Debug, mem}; use derive_more::{Constructor, Deref, DerefMut, From, IntoIterator}; use reth_codecs_derive::{add_arbitrary_tests, generate_tests}; use reth_ethereum_primitives::TransactionSigned; -use reth_primitives_traits::SignedTransaction; +use reth_primitives_traits::{Block, SignedTransaction}; /// This informs peers of new blocks that have appeared on the network. #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] @@ -64,6 +64,17 @@ impl From for Vec { } } +/// A trait for block payloads transmitted through p2p. +pub trait NewBlockPayload: + Encodable + Decodable + Clone + Eq + Debug + Send + Sync + Unpin + 'static +{ + /// The block type. + type Block: Block; + + /// Returns a reference to the block. + fn block(&self) -> &Self::Block; +} + /// A new block with the current total difficulty, which includes the difficulty of the returned /// block. #[derive(Clone, Debug, PartialEq, Eq, RlpEncodable, RlpDecodable, Default)] @@ -76,6 +87,14 @@ pub struct NewBlock { pub td: U128, } +impl NewBlockPayload for NewBlock { + type Block = B; + + fn block(&self) -> &Self::Block { + &self.block + } +} + generate_tests!(#[rlp, 25] NewBlock, EthNewBlockTests); /// This informs peers of transactions that have appeared on the network and are not yet included diff --git a/crates/net/eth-wire-types/src/message.rs b/crates/net/eth-wire-types/src/message.rs index d3500a62250..7c47618b5dc 100644 --- a/crates/net/eth-wire-types/src/message.rs +++ b/crates/net/eth-wire-types/src/message.rs @@ -4,11 +4,11 @@ //! //! Examples include creating, encoding, and decoding protocol messages. //! -//! Reference: [Ethereum Wire Protocol](https://github.com/ethereum/wiki/wiki/Ethereum-Wire-Protocol). +//! Reference: [Ethereum Wire Protocol](https://github.com/ethereum/devp2p/blob/master/caps/eth.md). use super::{ broadcast::NewBlockHashes, BlockBodies, BlockHeaders, GetBlockBodies, GetBlockHeaders, - GetNodeData, GetPooledTransactions, GetReceipts, NewBlock, NewPooledTransactionHashes66, + GetNodeData, GetPooledTransactions, GetReceipts, NewPooledTransactionHashes66, NewPooledTransactionHashes68, NodeData, PooledTransactions, Receipts, Status, StatusEth69, Transactions, }; @@ -78,7 +78,7 @@ impl ProtocolMessage { if version.is_eth69() { return Err(MessageError::Invalid(version, EthMessageID::NewBlock)); } - EthMessage::NewBlock(Box::new(NewBlock::decode(buf)?)) + EthMessage::NewBlock(Box::new(N::NewBlockPayload::decode(buf)?)) } EthMessageID::Transactions => EthMessage::Transactions(Transactions::decode(buf)?), EthMessageID::NewPooledTransactionHashes => { @@ -218,9 +218,9 @@ pub enum EthMessage { /// Represents a `NewBlock` message broadcast to the network. #[cfg_attr( feature = "serde", - serde(bound = "N::Block: serde::Serialize + serde::de::DeserializeOwned") + serde(bound = "N::NewBlockPayload: serde::Serialize + serde::de::DeserializeOwned") )] - NewBlock(Box>), + NewBlock(Box), /// Represents a Transactions message broadcast to the network. #[cfg_attr( feature = "serde", @@ -394,7 +394,7 @@ impl Encodable for EthMessage { #[derive(Clone, Debug, PartialEq, Eq)] pub enum EthBroadcastMessage { /// Represents a new block broadcast message. - NewBlock(Arc>), + NewBlock(Arc), /// Represents a transactions broadcast message. Transactions(SharedTransactions), } diff --git a/crates/net/eth-wire-types/src/primitives.rs b/crates/net/eth-wire-types/src/primitives.rs index 3a9ba27865e..7fc1000339d 100644 --- a/crates/net/eth-wire-types/src/primitives.rs +++ b/crates/net/eth-wire-types/src/primitives.rs @@ -1,9 +1,13 @@ //! Abstraction over primitive types in network messages. +use crate::NewBlockPayload; use alloy_consensus::{RlpDecodableReceipt, RlpEncodableReceipt, TxReceipt}; use alloy_rlp::{Decodable, Encodable}; use core::fmt::Debug; -use reth_primitives_traits::{Block, BlockBody, BlockHeader, NodePrimitives, SignedTransaction}; +use reth_ethereum_primitives::{EthPrimitives, PooledTransactionVariant}; +use reth_primitives_traits::{ + Block, BlockBody, BlockHeader, BlockTy, NodePrimitives, SignedTransaction, +}; /// Abstraction over primitive types which might appear in network messages. See /// [`crate::EthMessage`] for more context. @@ -36,6 +40,9 @@ pub trait NetworkPrimitives: Send + Sync + Unpin + Clone + Debug + 'static { + Decodable + Unpin + 'static; + + /// The payload type for the `NewBlock` message. + type NewBlockPayload: NewBlockPayload; } /// This is a helper trait for use in bounds, where some of the [`NetworkPrimitives`] associated @@ -62,16 +69,27 @@ where { } -/// Network primitive types used by Ethereum networks. +/// Basic implementation of [`NetworkPrimitives`] combining [`NodePrimitives`] and a pooled +/// transaction. #[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Hash)] -#[non_exhaustive] -pub struct EthNetworkPrimitives; +pub struct BasicNetworkPrimitives>>( + core::marker::PhantomData<(N, Pooled, NewBlock)>, +); -impl NetworkPrimitives for EthNetworkPrimitives { - type BlockHeader = alloy_consensus::Header; - type BlockBody = reth_ethereum_primitives::BlockBody; - type Block = reth_ethereum_primitives::Block; - type BroadcastedTransaction = reth_ethereum_primitives::TransactionSigned; - type PooledTransaction = reth_ethereum_primitives::PooledTransactionVariant; - type Receipt = reth_ethereum_primitives::Receipt; +impl NetworkPrimitives for BasicNetworkPrimitives +where + N: NodePrimitives, + Pooled: SignedTransaction + TryFrom + 'static, + NewBlock: NewBlockPayload, +{ + type BlockHeader = N::BlockHeader; + type BlockBody = N::BlockBody; + type Block = N::Block; + type BroadcastedTransaction = N::SignedTx; + type PooledTransaction = Pooled; + type Receipt = N::Receipt; + type NewBlockPayload = NewBlock; } + +/// Network primitive types used by Ethereum networks. +pub type EthNetworkPrimitives = BasicNetworkPrimitives; diff --git a/crates/net/network-types/src/lib.rs b/crates/net/network-types/src/lib.rs index 1e8ad581d28..8bbf8182d1d 100644 --- a/crates/net/network-types/src/lib.rs +++ b/crates/net/network-types/src/lib.rs @@ -25,7 +25,10 @@ pub use backoff::BackoffKind; pub use peers::{ addr::PeerAddr, kind::PeerKind, - reputation::{is_banned_reputation, ReputationChangeOutcome, DEFAULT_REPUTATION}, + reputation::{ + is_banned_reputation, is_connection_failed_reputation, ReputationChangeOutcome, + DEFAULT_REPUTATION, + }, state::PeerConnectionState, ConnectionsConfig, Peer, PeersConfig, }; diff --git a/crates/net/network-types/src/peers/config.rs b/crates/net/network-types/src/peers/config.rs index 221705c3846..1fe685b0e81 100644 --- a/crates/net/network-types/src/peers/config.rs +++ b/crates/net/network-types/src/peers/config.rs @@ -131,6 +131,9 @@ pub struct PeersConfig { /// Connect to or accept from trusted nodes only? #[cfg_attr(feature = "serde", serde(alias = "connect_trusted_nodes_only"))] pub trusted_nodes_only: bool, + /// Interval to update trusted nodes DNS resolution + #[cfg_attr(feature = "serde", serde(with = "humantime_serde"))] + pub trusted_nodes_resolution_interval: Duration, /// Maximum number of backoff attempts before we give up on a peer and dropping. /// /// The max time spent of a peer before it's removed from the set is determined by the @@ -177,6 +180,7 @@ impl Default for PeersConfig { backoff_durations: Default::default(), trusted_nodes: Default::default(), trusted_nodes_only: false, + trusted_nodes_resolution_interval: Duration::from_secs(60 * 60), basic_nodes: Default::default(), max_backoff_count: 5, incoming_ip_throttle_duration: INBOUND_IP_THROTTLE_DURATION, diff --git a/crates/net/network-types/src/peers/reputation.rs b/crates/net/network-types/src/peers/reputation.rs index 91035d8d45a..cf4b555b23c 100644 --- a/crates/net/network-types/src/peers/reputation.rs +++ b/crates/net/network-types/src/peers/reputation.rs @@ -13,7 +13,7 @@ pub const BANNED_REPUTATION: i32 = 50 * REPUTATION_UNIT; const REMOTE_DISCONNECT_REPUTATION_CHANGE: i32 = 4 * REPUTATION_UNIT; /// The reputation change to apply to a peer that we failed to connect to. -const FAILED_TO_CONNECT_REPUTATION_CHANGE: i32 = 25 * REPUTATION_UNIT; +pub const FAILED_TO_CONNECT_REPUTATION_CHANGE: i32 = 25 * REPUTATION_UNIT; /// The reputation change to apply to a peer that failed to respond in time. const TIMEOUT_REPUTATION_CHANGE: i32 = 4 * REPUTATION_UNIT; @@ -48,6 +48,13 @@ pub const fn is_banned_reputation(reputation: i32) -> bool { reputation < BANNED_REPUTATION } +/// Returns `true` if the given reputation is below the [`FAILED_TO_CONNECT_REPUTATION_CHANGE`] +/// threshold +#[inline] +pub const fn is_connection_failed_reputation(reputation: i32) -> bool { + reputation < FAILED_TO_CONNECT_REPUTATION_CHANGE +} + /// The type that tracks the reputation score. pub type Reputation = i32; diff --git a/crates/net/network/Cargo.toml b/crates/net/network/Cargo.toml index 2aa2e627739..167fe4f26da 100644 --- a/crates/net/network/Cargo.toml +++ b/crates/net/network/Cargo.toml @@ -124,6 +124,7 @@ serde = [ "reth-ethereum-primitives/serde", "reth-network-api/serde", "rand_08/serde", + "reth-storage-api/serde", ] test-utils = [ "reth-transaction-pool/test-utils", diff --git a/crates/net/network/src/builder.rs b/crates/net/network/src/builder.rs index 65775342a26..3f36b1bdc80 100644 --- a/crates/net/network/src/builder.rs +++ b/crates/net/network/src/builder.rs @@ -1,8 +1,14 @@ //! Builder support for configuring the entire setup. +use std::fmt::Debug; + use crate::{ eth_requests::EthRequestHandler, - transactions::{TransactionPropagationPolicy, TransactionsManager, TransactionsManagerConfig}, + transactions::{ + config::{StrictEthAnnouncementFilter, TransactionPropagationKind}, + policy::NetworkPolicies, + TransactionPropagationPolicy, TransactionsManager, TransactionsManagerConfig, + }, NetworkHandle, NetworkManager, }; use reth_eth_wire::{EthNetworkPrimitives, NetworkPrimitives}; @@ -71,27 +77,49 @@ impl NetworkBuilder { self, pool: Pool, transactions_manager_config: TransactionsManagerConfig, - ) -> NetworkBuilder, Eth, N> { - self.transactions_with_policy(pool, transactions_manager_config, Default::default()) + ) -> NetworkBuilder< + TransactionsManager< + Pool, + N, + NetworkPolicies, + >, + Eth, + N, + > { + self.transactions_with_policy( + pool, + transactions_manager_config, + TransactionPropagationKind::default(), + ) } /// Creates a new [`TransactionsManager`] and wires it to the network. - pub fn transactions_with_policy( + pub fn transactions_with_policy< + Pool: TransactionPool, + P: TransactionPropagationPolicy + Debug, + >( self, pool: Pool, transactions_manager_config: TransactionsManagerConfig, propagation_policy: P, - ) -> NetworkBuilder, Eth, N> { + ) -> NetworkBuilder< + TransactionsManager>, + Eth, + N, + > { let Self { mut network, request_handler, .. } = self; let (tx, rx) = mpsc::unbounded_channel(); network.set_transactions(tx); let handle = network.handle().clone(); + let announcement_policy = StrictEthAnnouncementFilter::default(); + let policies = NetworkPolicies::new(propagation_policy, announcement_policy); + let transactions = TransactionsManager::with_policy( handle, pool, rx, transactions_manager_config, - propagation_policy, + policies, ); NetworkBuilder { network, request_handler, transactions } } diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index 3355f76ca47..aee5dce37fb 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -69,7 +69,7 @@ pub struct NetworkConfig { /// first hardfork, `Frontier` for mainnet. pub fork_filter: ForkFilter, /// The block importer type. - pub block_import: Box>, + pub block_import: Box>, /// The default mode of the network. pub network_mode: NetworkMode, /// The executor to use for spawning tasks. @@ -212,7 +212,7 @@ pub struct NetworkConfigBuilder { /// Whether tx gossip is disabled tx_gossip_disabled: bool, /// The block importer type - block_import: Option>>, + block_import: Option>>, /// How to instantiate transactions manager. transactions_manager_config: TransactionsManagerConfig, /// The NAT resolver for external IP @@ -542,7 +542,7 @@ impl NetworkConfigBuilder { } /// Sets the block import type. - pub fn block_import(mut self, block_import: Box>) -> Self { + pub fn block_import(mut self, block_import: Box>) -> Self { self.block_import = Some(block_import); self } diff --git a/crates/net/network/src/error.rs b/crates/net/network/src/error.rs index f88d8bd8158..96ba2ff85ec 100644 --- a/crates/net/network/src/error.rs +++ b/crates/net/network/src/error.rs @@ -53,8 +53,8 @@ pub enum NetworkError { error: io::Error, }, /// IO error when creating the discovery service - #[error("failed to launch discovery service: {0}")] - Discovery(io::Error), + #[error("failed to launch discovery service on {0}: {1}")] + Discovery(SocketAddr, io::Error), /// An error occurred with discovery v5 node. #[error("discv5 error, {0}")] Discv5Error(#[from] reth_discv5::Error), @@ -71,8 +71,8 @@ impl NetworkError { match err.kind() { ErrorKind::AddrInUse => Self::AddressAlreadyInUse { kind, error: err }, _ => { - if let ServiceKind::Discovery(_) = kind { - return Self::Discovery(err) + if let ServiceKind::Discovery(address) = kind { + return Self::Discovery(address, err) } Self::Io(err) } diff --git a/crates/net/network/src/fetch/client.rs b/crates/net/network/src/fetch/client.rs index c043692d26d..fdfd051a8a6 100644 --- a/crates/net/network/src/fetch/client.rs +++ b/crates/net/network/src/fetch/client.rs @@ -15,9 +15,12 @@ use reth_network_p2p::{ }; use reth_network_peers::PeerId; use reth_network_types::ReputationChangeKind; -use std::sync::{ - atomic::{AtomicUsize, Ordering}, - Arc, +use std::{ + ops::RangeInclusive, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, }; use tokio::sync::{mpsc::UnboundedSender, oneshot}; @@ -80,15 +83,16 @@ impl BodiesClient for FetchClient { type Output = BodiesFut; /// Sends a `GetBlockBodies` request to an available peer. - fn get_block_bodies_with_priority( + fn get_block_bodies_with_priority_and_range_hint( &self, request: Vec, priority: Priority, + range_hint: Option>, ) -> Self::Output { let (response, rx) = oneshot::channel(); if self .request_tx - .send(DownloadRequest::GetBlockBodies { request, response, priority }) + .send(DownloadRequest::GetBlockBodies { request, response, priority, range_hint }) .is_ok() { Box::pin(FlattenedResponse::from(rx)) diff --git a/crates/net/network/src/fetch/mod.rs b/crates/net/network/src/fetch/mod.rs index 7c60d633eff..794c184e69d 100644 --- a/crates/net/network/src/fetch/mod.rs +++ b/crates/net/network/src/fetch/mod.rs @@ -4,7 +4,7 @@ mod client; pub use client::FetchClient; -use crate::{message::BlockRequest, transform::header::HeaderTransform}; +use crate::{message::BlockRequest, session::BlockRangeInfo, transform::header::HeaderTransform}; use alloy_primitives::B256; use futures::StreamExt; use reth_eth_wire::{EthNetworkPrimitives, GetBlockBodies, GetBlockHeaders, NetworkPrimitives}; @@ -18,6 +18,7 @@ use reth_network_peers::PeerId; use reth_network_types::ReputationChangeKind; use std::{ collections::{HashMap, VecDeque}, + ops::RangeInclusive, sync::{ atomic::{AtomicU64, AtomicUsize, Ordering}, Arc, @@ -87,6 +88,7 @@ impl StateFetcher { best_hash: B256, best_number: u64, timeout: Arc, + range_info: Option, ) { self.peers.insert( peer_id, @@ -96,6 +98,7 @@ impl StateFetcher { best_number, timeout, last_response_likely_bad: false, + range_info, }, ); } @@ -356,6 +359,9 @@ struct Peer { /// downloaded), but we still want to avoid requesting from the same peer again if it has the /// lowest timeout. last_response_likely_bad: bool, + /// Tracks the range info for the peer. + #[allow(dead_code)] + range_info: Option, } impl Peer { @@ -423,6 +429,8 @@ pub(crate) enum DownloadRequest { request: Vec, response: oneshot::Sender>>, priority: Priority, + #[allow(dead_code)] + range_hint: Option>, }, } @@ -498,6 +506,7 @@ mod tests { request: vec![], response: tx, priority: Priority::default(), + range_hint: None, }); assert!(fetcher.poll(cx).is_pending()); @@ -517,8 +526,8 @@ mod tests { // Add a few random peers let peer1 = B512::random(); let peer2 = B512::random(); - fetcher.new_active_peer(peer1, B256::random(), 1, Arc::new(AtomicU64::new(1))); - fetcher.new_active_peer(peer2, B256::random(), 2, Arc::new(AtomicU64::new(1))); + fetcher.new_active_peer(peer1, B256::random(), 1, Arc::new(AtomicU64::new(1)), None); + fetcher.new_active_peer(peer2, B256::random(), 2, Arc::new(AtomicU64::new(1)), None); let first_peer = fetcher.next_best_peer().unwrap(); assert!(first_peer == peer1 || first_peer == peer2); @@ -548,9 +557,9 @@ mod tests { let peer2_timeout = Arc::new(AtomicU64::new(300)); - fetcher.new_active_peer(peer1, B256::random(), 1, Arc::new(AtomicU64::new(30))); - fetcher.new_active_peer(peer2, B256::random(), 2, Arc::clone(&peer2_timeout)); - fetcher.new_active_peer(peer3, B256::random(), 3, Arc::new(AtomicU64::new(50))); + fetcher.new_active_peer(peer1, B256::random(), 1, Arc::new(AtomicU64::new(30)), None); + fetcher.new_active_peer(peer2, B256::random(), 2, Arc::clone(&peer2_timeout), None); + fetcher.new_active_peer(peer3, B256::random(), 3, Arc::new(AtomicU64::new(50)), None); // Must always get peer1 (lowest timeout) assert_eq!(fetcher.next_best_peer(), Some(peer1)); @@ -625,6 +634,7 @@ mod tests { Default::default(), Default::default(), Default::default(), + None, ); let (req, header) = request_pair(); diff --git a/crates/net/network/src/import.rs b/crates/net/network/src/import.rs index 491fabba9a6..52187e5b2f1 100644 --- a/crates/net/network/src/import.rs +++ b/crates/net/network/src/import.rs @@ -1,6 +1,7 @@ //! This module provides an abstraction over block import in the form of the `BlockImport` trait. use crate::message::NewBlockMessage; +use reth_eth_wire::NewBlock; use reth_eth_wire_types::broadcast::NewBlockHashes; use reth_network_peers::PeerId; use std::{ @@ -9,7 +10,7 @@ use std::{ }; /// Abstraction over block import. -pub trait BlockImport: std::fmt::Debug + Send + Sync { +pub trait BlockImport: std::fmt::Debug + Send + Sync { /// Invoked for a received block announcement from the peer. /// /// For a `NewBlock` message: @@ -27,7 +28,7 @@ pub trait BlockImport: std::fmt::Debug + Se /// Represents different types of block announcement events from the network. #[derive(Debug, Clone)] -pub enum NewBlockEvent { +pub enum NewBlockEvent { /// A new full block announcement Block(NewBlockMessage), /// Only the hashes of new blocks diff --git a/crates/net/network/src/lib.rs b/crates/net/network/src/lib.rs index a20a11e4d0b..1d8e4d15be3 100644 --- a/crates/net/network/src/lib.rs +++ b/crates/net/network/src/lib.rs @@ -169,7 +169,6 @@ pub use manager::NetworkManager; pub use metrics::TxTypesCounter; pub use network::{NetworkHandle, NetworkProtocols}; pub use swarm::NetworkConnectionState; -pub use transactions::{FilterAnnouncement, MessageFilter}; /// re-export p2p interfaces pub use reth_network_p2p as p2p; diff --git a/crates/net/network/src/manager.rs b/crates/net/network/src/manager.rs index f511f05b50b..d2ce957614e 100644 --- a/crates/net/network/src/manager.rs +++ b/crates/net/network/src/manager.rs @@ -37,7 +37,9 @@ use crate::{ }; use futures::{Future, StreamExt}; use parking_lot::Mutex; +use reth_chainspec::EnrForkIdEntry; use reth_eth_wire::{DisconnectReason, EthNetworkPrimitives, NetworkPrimitives}; +use reth_eth_wire_types::NewBlockPayload; use reth_fs_util::{self as fs, FsPathError}; use reth_metrics::common::mpsc::UnboundedMeteredSender; use reth_network_api::{ @@ -270,7 +272,9 @@ impl NetworkManager { if let Some(disc_config) = discovery_v4_config.as_mut() { // merge configured boot nodes disc_config.bootstrap_nodes.extend(resolved_boot_nodes.clone()); - disc_config.add_eip868_pair("eth", status.forkid); + // add the forkid entry for EIP-868, but wrap it in an `EnrForkIdEntry` for proper + // encoding + disc_config.add_eip868_pair("eth", EnrForkIdEntry::from(status.forkid)); } if let Some(discv5) = discovery_v5_config.as_mut() { @@ -524,7 +528,7 @@ impl NetworkManager { #[allow(dead_code)] /// Invoked after a `NewBlock` message from the peer was validated - fn on_block_import_result(&mut self, event: BlockImportEvent) { + fn on_block_import_result(&mut self, event: BlockImportEvent) { match event { BlockImportEvent::Announcement(validation) => match validation { BlockValidation::ValidHeader { block } => { @@ -597,7 +601,8 @@ impl NetworkManager { this.swarm.state_mut().on_new_block(peer_id, block.hash); let block = Arc::unwrap_or_clone(block.block); // start block import process - this.block_import.notify(NewBlockWithPeer { peer_id, block: block.block }); + this.block_import + .notify(NewBlockWithPeer { peer_id, block: block.block().clone() }); }); } PeerMessage::PooledTransactions(msg) => { diff --git a/crates/net/network/src/message.rs b/crates/net/network/src/message.rs index 1343c2e566f..f1dd603fd22 100644 --- a/crates/net/network/src/message.rs +++ b/crates/net/network/src/message.rs @@ -10,12 +10,13 @@ use futures::FutureExt; use reth_eth_wire::{ message::RequestPair, BlockBodies, BlockHeaders, BlockRangeUpdate, EthMessage, EthNetworkPrimitives, GetBlockBodies, GetBlockHeaders, NetworkPrimitives, NewBlock, - NewBlockHashes, NewPooledTransactionHashes, NodeData, PooledTransactions, Receipts, - SharedTransactions, Transactions, + NewBlockHashes, NewBlockPayload, NewPooledTransactionHashes, NodeData, PooledTransactions, + Receipts, SharedTransactions, Transactions, }; use reth_eth_wire_types::RawCapabilityMessage; use reth_network_api::PeerRequest; use reth_network_p2p::error::{RequestError, RequestResult}; +use reth_primitives_traits::Block; use std::{ sync::Arc, task::{ready, Context, Poll}, @@ -24,19 +25,19 @@ use tokio::sync::oneshot; /// Internal form of a `NewBlock` message #[derive(Debug, Clone)] -pub struct NewBlockMessage { +pub struct NewBlockMessage

> { /// Hash of the block pub hash: B256, /// Raw received message - pub block: Arc>, + pub block: Arc

, } // === impl NewBlockMessage === -impl NewBlockMessage { +impl NewBlockMessage

{ /// Returns the block number of the block pub fn number(&self) -> u64 { - self.block.block.header().number() + self.block.block().header().number() } } @@ -47,7 +48,7 @@ pub enum PeerMessage { /// Announce new block hashes NewBlockHashes(NewBlockHashes), /// Broadcast new block. - NewBlock(NewBlockMessage), + NewBlock(NewBlockMessage), /// Received transactions _from_ the peer ReceivedTransaction(Transactions), /// Broadcast transactions _from_ local _to_ a peer. diff --git a/crates/net/network/src/network.rs b/crates/net/network/src/network.rs index 02caa3a26bf..ffe8bf1531f 100644 --- a/crates/net/network/src/network.rs +++ b/crates/net/network/src/network.rs @@ -9,7 +9,7 @@ use parking_lot::Mutex; use reth_discv4::{Discv4, NatResolver}; use reth_discv5::Discv5; use reth_eth_wire::{ - BlockRangeUpdate, DisconnectReason, EthNetworkPrimitives, NetworkPrimitives, NewBlock, + BlockRangeUpdate, DisconnectReason, EthNetworkPrimitives, NetworkPrimitives, NewPooledTransactionHashes, SharedTransactions, }; use reth_ethereum_forks::Head; @@ -120,7 +120,7 @@ impl NetworkHandle { /// Caution: in `PoS` this is a noop because new blocks are no longer announced over devp2p. /// Instead they are sent to the node by CL and can be requested over devp2p. /// Broadcasting new blocks is considered a protocol violation. - pub fn announce_block(&self, block: NewBlock, hash: B256) { + pub fn announce_block(&self, block: N::NewBlockPayload, hash: B256) { self.send_message(NetworkHandleMessage::AnnounceBlock(block, hash)) } @@ -502,7 +502,7 @@ pub(crate) enum NetworkHandleMessage), /// Broadcasts an event to announce a new block to all nodes. - AnnounceBlock(NewBlock, B256), + AnnounceBlock(N::NewBlockPayload, B256), /// Sends a list of transactions to the given peer. SendTransaction { /// The ID of the peer to which the transactions are sent. diff --git a/crates/net/network/src/peers.rs b/crates/net/network/src/peers.rs index af719e3e76f..bb69d1adc76 100644 --- a/crates/net/network/src/peers.rs +++ b/crates/net/network/src/peers.rs @@ -14,6 +14,7 @@ use reth_net_banlist::BanList; use reth_network_api::test_utils::{PeerCommand, PeersHandle}; use reth_network_peers::{NodeRecord, PeerId}; use reth_network_types::{ + is_connection_failed_reputation, peers::{ config::PeerBackoffDurations, reputation::{DEFAULT_REPUTATION, MAX_TRUSTED_PEER_REPUTATION_CHANGE}, @@ -103,6 +104,7 @@ impl PeersManager { backoff_durations, trusted_nodes, trusted_nodes_only, + trusted_nodes_resolution_interval, basic_nodes, max_backoff_count, incoming_ip_throttle_duration, @@ -141,7 +143,7 @@ impl PeersManager { trusted_peer_ids, trusted_peers_resolver: TrustedPeersResolver::new( trusted_nodes, - tokio::time::interval(Duration::from_secs(60 * 60)), // 1 hour + tokio::time::interval(trusted_nodes_resolution_interval), // 1 hour ), manager_tx, handle_rx: UnboundedReceiverStream::new(handle_rx), @@ -582,6 +584,12 @@ impl PeersManager { // we already have an active connection to the peer, so we can ignore this error return } + + if peer.is_trusted() && is_connection_failed_reputation(peer.reputation) { + // trigger resolution task for trusted peer since multiple connection failures + // occurred + self.trusted_peers_resolver.interval.reset_immediately(); + } } self.on_connection_failure(remote_addr, peer_id, err, ReputationChangeKind::FailedToConnect) diff --git a/crates/net/network/src/session/active.rs b/crates/net/network/src/session/active.rs index a454cf8fb1d..19f57f0f249 100644 --- a/crates/net/network/src/session/active.rs +++ b/crates/net/network/src/session/active.rs @@ -16,7 +16,7 @@ use crate::{ session::{ conn::EthRlpxConnection, handle::{ActiveSessionMessage, SessionCommand}, - SessionId, + BlockRangeInfo, SessionId, }, }; use alloy_primitives::Sealable; @@ -25,7 +25,7 @@ use metrics::Gauge; use reth_eth_wire::{ errors::{EthHandshakeError, EthStreamError}, message::{EthBroadcastMessage, RequestPair}, - Capabilities, DisconnectP2P, DisconnectReason, EthMessage, NetworkPrimitives, + Capabilities, DisconnectP2P, DisconnectReason, EthMessage, NetworkPrimitives, NewBlockPayload, }; use reth_eth_wire_types::RawCapabilityMessage; use reth_metrics::common::mpsc::MeteredPollSender; @@ -114,6 +114,8 @@ pub(crate) struct ActiveSession { /// Used to reserve a slot to guarantee that the termination message is delivered pub(crate) terminate_message: Option<(PollSender>, ActiveSessionMessage)>, + /// The eth69 range info for the remote peer. + pub(crate) range_info: Option, } impl ActiveSession { @@ -201,8 +203,10 @@ impl ActiveSession { self.try_emit_broadcast(PeerMessage::NewBlockHashes(msg)).into() } EthMessage::NewBlock(msg) => { - let block = - NewBlockMessage { hash: msg.block.header().hash_slow(), block: Arc::new(*msg) }; + let block = NewBlockMessage { + hash: msg.block().header().hash_slow(), + block: Arc::new(*msg), + }; self.try_emit_broadcast(PeerMessage::NewBlock(block)).into() } EthMessage::Transactions(msg) => { @@ -260,7 +264,11 @@ impl ActiveSession { on_response!(resp, GetReceipts) } EthMessage::BlockRangeUpdate(msg) => { - self.try_emit_broadcast(PeerMessage::BlockRangeUpdated(msg)).into() + if let Some(range_info) = self.range_info.as_ref() { + range_info.update(msg.earliest, msg.latest, msg.latest_hash); + } + + OnIncomingMessageOutcome::Ok } EthMessage::Other(bytes) => self.try_emit_broadcast(PeerMessage::Other(bytes)).into(), } @@ -985,6 +993,7 @@ mod tests { )), protocol_breach_request_timeout: PROTOCOL_BREACH_REQUEST_TIMEOUT, terminate_message: None, + range_info: None, } } ev => { diff --git a/crates/net/network/src/session/mod.rs b/crates/net/network/src/session/mod.rs index 1cc62a63585..1b73b87f8fd 100644 --- a/crates/net/network/src/session/mod.rs +++ b/crates/net/network/src/session/mod.rs @@ -4,24 +4,8 @@ mod active; mod conn; mod counter; mod handle; - -use active::QueuedOutgoingMessages; -pub use conn::EthRlpxConnection; -pub use handle::{ - ActiveSessionHandle, ActiveSessionMessage, PendingSessionEvent, PendingSessionHandle, - SessionCommand, -}; - -pub use reth_network_api::{Direction, PeerInfo}; - -use std::{ - collections::HashMap, - future::Future, - net::SocketAddr, - sync::{atomic::AtomicU64, Arc}, - task::{Context, Poll}, - time::{Duration, Instant}, -}; +mod types; +pub use types::BlockRangeInfo; use crate::{ message::PeerMessage, @@ -29,6 +13,7 @@ use crate::{ protocol::{IntoRlpxSubProtocol, OnNotSupported, RlpxSubProtocolHandlers, RlpxSubProtocols}, session::active::ActiveSession, }; +use active::QueuedOutgoingMessages; use counter::SessionCounter; use futures::{future::Either, io, FutureExt, StreamExt}; use reth_ecies::{stream::ECIESStream, ECIESError}; @@ -46,6 +31,14 @@ use reth_network_types::SessionsConfig; use reth_tasks::TaskSpawner; use rustc_hash::FxHashMap; use secp256k1::SecretKey; +use std::{ + collections::HashMap, + future::Future, + net::SocketAddr, + sync::{atomic::AtomicU64, Arc}, + task::{Context, Poll}, + time::{Duration, Instant}, +}; use tokio::{ io::{AsyncRead, AsyncWrite}, net::TcpStream, @@ -55,6 +48,13 @@ use tokio_stream::wrappers::ReceiverStream; use tokio_util::sync::PollSender; use tracing::{debug, instrument, trace}; +pub use conn::EthRlpxConnection; +pub use handle::{ + ActiveSessionHandle, ActiveSessionMessage, PendingSessionEvent, PendingSessionHandle, + SessionCommand, +}; +pub use reth_network_api::{Direction, PeerInfo}; + /// Internal identifier for active sessions. #[derive(Debug, Clone, Copy, PartialOrd, PartialEq, Eq, Hash)] pub struct SessionId(usize); @@ -543,6 +543,7 @@ impl SessionManager { internal_request_timeout: Arc::clone(&timeout), protocol_breach_request_timeout: self.protocol_breach_request_timeout, terminate_message: None, + range_info: None, }; self.spawn(session); @@ -579,6 +580,7 @@ impl SessionManager { messages, direction, timeout, + range_info: None, }) } PendingSessionEvent::Disconnected { remote_addr, session_id, direction, error } => { @@ -701,6 +703,8 @@ pub enum SessionEvent { /// The maximum time that the session waits for a response from the peer before timing out /// the connection timeout: Arc, + /// The range info for the peer. + range_info: Option, }, /// The peer was already connected with another session. AlreadyConnected { diff --git a/crates/net/network/src/session/types.rs b/crates/net/network/src/session/types.rs new file mode 100644 index 00000000000..c8cd98c3cbc --- /dev/null +++ b/crates/net/network/src/session/types.rs @@ -0,0 +1,79 @@ +//! Shared types for network sessions. + +use alloy_primitives::B256; +use parking_lot::RwLock; +use std::{ + ops::RangeInclusive, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, +}; + +/// Information about the range of blocks available from a peer. +/// +/// This represents the announced `eth69` +/// [`BlockRangeUpdate`](reth_eth_wire_types::BlockRangeUpdate) of a peer. +#[derive(Debug, Clone)] +pub struct BlockRangeInfo { + /// The inner range information. + inner: Arc, +} + +impl BlockRangeInfo { + /// Creates a new range information. + pub fn new(earliest: u64, latest: u64, latest_hash: B256) -> Self { + Self { + inner: Arc::new(BlockRangeInfoInner { + earliest: AtomicU64::new(earliest), + latest: AtomicU64::new(latest), + latest_hash: RwLock::new(latest_hash), + }), + } + } + + /// Returns true if the block number is within the range of blocks available from the peer. + pub fn contains(&self, block_number: u64) -> bool { + self.range().contains(&block_number) + } + + /// Returns the range of blocks available from the peer. + pub fn range(&self) -> RangeInclusive { + let earliest = self.earliest(); + let latest = self.latest(); + RangeInclusive::new(earliest, latest) + } + + /// Returns the earliest block number available from the peer. + pub fn earliest(&self) -> u64 { + self.inner.earliest.load(Ordering::Relaxed) + } + + /// Returns the latest block number available from the peer. + pub fn latest(&self) -> u64 { + self.inner.latest.load(Ordering::Relaxed) + } + + /// Returns the latest block hash available from the peer. + pub fn latest_hash(&self) -> B256 { + *self.inner.latest_hash.read() + } + + /// Updates the range information. + pub fn update(&self, earliest: u64, latest: u64, latest_hash: B256) { + self.inner.earliest.store(earliest, Ordering::Relaxed); + self.inner.latest.store(latest, Ordering::Relaxed); + *self.inner.latest_hash.write() = latest_hash; + } +} + +/// Inner structure containing the range information with atomic and thread-safe fields. +#[derive(Debug)] +pub(crate) struct BlockRangeInfoInner { + /// The earliest block which is available. + earliest: AtomicU64, + /// The latest block which is available. + latest: AtomicU64, + /// Latest available block's hash. + latest_hash: RwLock, +} diff --git a/crates/net/network/src/state.rs b/crates/net/network/src/state.rs index 074023755e9..89ad4874cc2 100644 --- a/crates/net/network/src/state.rs +++ b/crates/net/network/src/state.rs @@ -6,6 +6,7 @@ use crate::{ fetch::{BlockResponseOutcome, FetchAction, StateFetcher}, message::{BlockRequest, NewBlockMessage, PeerResponse, PeerResponseResult}, peers::{PeerAction, PeersManager}, + session::BlockRangeInfo, transform::header::HeaderTransform, FetchClient, }; @@ -14,7 +15,7 @@ use alloy_primitives::B256; use rand::seq::SliceRandom; use reth_eth_wire::{ BlockHashNumber, Capabilities, DisconnectReason, EthNetworkPrimitives, NetworkPrimitives, - NewBlockHashes, UnifiedStatus, + NewBlockHashes, NewBlockPayload, UnifiedStatus, }; use reth_ethereum_forks::ForkId; use reth_network_api::{DiscoveredEvent, DiscoveryEvent, PeerRequest, PeerRequestSender}; @@ -152,13 +153,20 @@ impl NetworkState { status: Arc, request_tx: PeerRequestSender>, timeout: Arc, + range_info: Option, ) { debug_assert!(!self.active_peers.contains_key(&peer), "Already connected; not possible"); // find the corresponding block number let block_number = self.client.block_number(status.blockhash).ok().flatten().unwrap_or_default(); - self.state_fetcher.new_active_peer(peer, status.blockhash, block_number, timeout); + self.state_fetcher.new_active_peer( + peer, + status.blockhash, + block_number, + timeout, + range_info, + ); self.active_peers.insert( peer, @@ -188,12 +196,12 @@ impl NetworkState { /// > the total number of peers) using the `NewBlock` message. /// /// See also - pub(crate) fn announce_new_block(&mut self, msg: NewBlockMessage) { + pub(crate) fn announce_new_block(&mut self, msg: NewBlockMessage) { // send a `NewBlock` message to a fraction of the connected peers (square root of the total // number of peers) let num_propagate = (self.active_peers.len() as f64).sqrt() as u64 + 1; - let number = msg.block.block.header().number(); + let number = msg.block.block().header().number(); let mut count = 0; // Shuffle to propagate to a random sample of peers on every block announcement @@ -230,8 +238,8 @@ impl NetworkState { /// Completes the block propagation process started in [`NetworkState::announce_new_block()`] /// but sending `NewBlockHash` broadcast to all peers that haven't seen it yet. - pub(crate) fn announce_new_block_hash(&mut self, msg: NewBlockMessage) { - let number = msg.block.block.header().number(); + pub(crate) fn announce_new_block_hash(&mut self, msg: NewBlockMessage) { + let number = msg.block.block().header().number(); let hashes = NewBlockHashes(vec![BlockHashNumber { hash: msg.hash, number }]); for (peer_id, peer) in &mut self.active_peers { if peer.blocks.contains(&msg.hash) { @@ -527,7 +535,7 @@ pub(crate) enum StateAction { /// Target of the message peer_id: PeerId, /// The `NewBlock` message - block: NewBlockMessage, + block: NewBlockMessage, }, NewBlockHashes { /// Target of the message @@ -616,6 +624,7 @@ mod tests { Arc::default(), peer_tx, Arc::new(AtomicU64::new(1)), + None, ); assert!(state.active_peers.contains_key(&peer_id)); diff --git a/crates/net/network/src/swarm.rs b/crates/net/network/src/swarm.rs index 7566c285d8b..fbb7b0bf941 100644 --- a/crates/net/network/src/swarm.rs +++ b/crates/net/network/src/swarm.rs @@ -122,6 +122,7 @@ impl Swarm { messages, direction, timeout, + range_info, } => { self.state.on_session_activated( peer_id, @@ -129,6 +130,7 @@ impl Swarm { status.clone(), messages.clone(), timeout, + range_info, ); Some(SwarmEvent::SessionEstablished { peer_id, diff --git a/crates/net/network/src/test_utils/testnet.rs b/crates/net/network/src/test_utils/testnet.rs index aa3ce1b8040..d064ed7eda6 100644 --- a/crates/net/network/src/test_utils/testnet.rs +++ b/crates/net/network/src/test_utils/testnet.rs @@ -6,8 +6,9 @@ use crate::{ eth_requests::EthRequestHandler, protocol::IntoRlpxSubProtocol, transactions::{ - config::TransactionPropagationKind, TransactionsHandle, TransactionsManager, - TransactionsManagerConfig, + config::{StrictEthAnnouncementFilter, TransactionPropagationKind}, + policy::NetworkPolicies, + TransactionsHandle, TransactionsManager, TransactionsManagerConfig, }, NetworkConfig, NetworkConfigBuilder, NetworkHandle, NetworkManager, }; @@ -400,7 +401,13 @@ pub struct Peer { #[pin] request_handler: Option>, #[pin] - transactions_manager: Option>, + transactions_manager: Option< + TransactionsManager< + Pool, + EthNetworkPrimitives, + NetworkPolicies, + >, + >, pool: Option, client: C, secret_key: SecretKey, @@ -530,12 +537,15 @@ where let (tx, rx) = unbounded_channel(); network.set_transactions(tx); + let announcement_policy = StrictEthAnnouncementFilter::default(); + let policies = NetworkPolicies::new(policy, announcement_policy); + let transactions_manager = TransactionsManager::with_policy( network.handle().clone(), pool.clone(), rx, config, - policy, + policies, ); Peer { diff --git a/crates/net/network/src/transactions/config.rs b/crates/net/network/src/transactions/config.rs index e799d14c766..85ea9e23589 100644 --- a/crates/net/network/src/transactions/config.rs +++ b/crates/net/network/src/transactions/config.rs @@ -1,4 +1,4 @@ -use std::str::FromStr; +use std::{fmt::Debug, marker::PhantomData, str::FromStr}; use super::{ PeerMetadata, DEFAULT_MAX_COUNT_TRANSACTIONS_SEEN_BY_PEER, @@ -9,8 +9,10 @@ use crate::transactions::constants::tx_fetcher::{ DEFAULT_MAX_CAPACITY_CACHE_PENDING_FETCH, DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS, DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS_PER_PEER, }; +use alloy_primitives::B256; use derive_more::{Constructor, Display}; use reth_eth_wire::NetworkPrimitives; +use reth_ethereum_primitives::TxType; /// Configuration for managing transactions within the network. #[derive(Debug, Clone)] @@ -149,3 +151,103 @@ impl FromStr for TransactionPropagationKind { } } } + +/// Defines the outcome of evaluating a transaction against an `AnnouncementFilteringPolicy`. +/// +/// Dictates how the `TransactionManager` should proceed on an announced transaction. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum AnnouncementAcceptance { + /// Accept the transaction announcement. + Accept, + /// Log the transaction but not fetching the transaction or penalizing the peer. + Ignore, + /// Reject + Reject { + /// If true, the peer sending this announcement should be penalized. + penalize_peer: bool, + }, +} + +/// A policy that defines how to handle incoming transaction annoucements, +/// particularly concerning transaction types and other annoucement metadata. +pub trait AnnouncementFilteringPolicy: Send + Sync + Unpin + 'static { + /// Decides how to handle a transaction announcement based on its type, hash, and size. + fn decide_on_announcement(&self, ty: u8, hash: &B256, size: usize) -> AnnouncementAcceptance; +} + +/// A generic `AnnouncementFilteringPolicy` that enforces strict validation +/// of transaction type based on a generic type `T`. +#[derive(Debug, Clone)] +pub struct TypedStrictFilter + Debug + Send + Sync + 'static>(PhantomData); + +impl + Debug + Send + Sync + 'static> Default for TypedStrictFilter { + fn default() -> Self { + Self(PhantomData) + } +} + +impl AnnouncementFilteringPolicy for TypedStrictFilter +where + T: TryFrom + Debug + Send + Sync + Unpin + 'static, + >::Error: Debug, +{ + fn decide_on_announcement(&self, ty: u8, hash: &B256, size: usize) -> AnnouncementAcceptance { + match T::try_from(ty) { + Ok(_valid_type) => AnnouncementAcceptance::Accept, + Err(e) => { + tracing::trace!(target: "net::tx::policy::strict_typed", + type_param = %std::any::type_name::(), + %ty, + %size, + %hash, + error = ?e, + "Invalid or unrecognized transaction type byte. Rejecting entry and recommending peer penalization." + ); + AnnouncementAcceptance::Reject { penalize_peer: true } + } + } + } +} + +/// Type alias for a `TypedStrictFilter`. This is the default strict announcement filter. +pub type StrictEthAnnouncementFilter = TypedStrictFilter; + +/// An [`AnnouncementFilteringPolicy`] that permissively handles unknown type bytes +/// based on a given type `T` using `T::try_from(u8)`. +/// +/// If `T::try_from(ty)` succeeds, the announcement is accepted. Otherwise, it's ignored. +#[derive(Debug, Clone)] +pub struct TypedRelaxedFilter + Debug + Send + Sync + 'static>(PhantomData); + +impl + Debug + Send + Sync + 'static> Default for TypedRelaxedFilter { + fn default() -> Self { + Self(PhantomData) + } +} + +impl AnnouncementFilteringPolicy for TypedRelaxedFilter +where + T: TryFrom + Debug + Send + Sync + Unpin + 'static, + >::Error: Debug, +{ + fn decide_on_announcement(&self, ty: u8, hash: &B256, size: usize) -> AnnouncementAcceptance { + match T::try_from(ty) { + Ok(_valid_type) => AnnouncementAcceptance::Accept, + Err(e) => { + tracing::trace!(target: "net::tx::policy::relaxed_typed", + type_param = %std::any::type_name::(), + %ty, + %size, + %hash, + error = ?e, + "Unknown transaction type byte. Ignoring entry." + ); + AnnouncementAcceptance::Ignore + } + } + } +} + +/// Type alias for `TypedRelaxedFilter`. This filter accepts known Ethereum transaction types and +/// ignores unknown ones without penalizing the peer. +pub type RelaxedEthAnnouncementFilter = TypedRelaxedFilter; diff --git a/crates/net/network/src/transactions/fetcher.rs b/crates/net/network/src/transactions/fetcher.rs index 43dc1715fb5..c1fdf0e1064 100644 --- a/crates/net/network/src/transactions/fetcher.rs +++ b/crates/net/network/src/transactions/fetcher.rs @@ -28,14 +28,12 @@ use super::{ config::TransactionFetcherConfig, constants::{tx_fetcher::*, SOFT_LIMIT_COUNT_HASHES_IN_GET_POOLED_TRANSACTIONS_REQUEST}, - MessageFilter, PeerMetadata, PooledTransactions, - SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE, + PeerMetadata, PooledTransactions, SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE, }; use crate::{ cache::{LruCache, LruMap}, duration_metered_exec, metrics::TransactionFetcherMetrics, - transactions::{validation, PartiallyFilterMessage}, }; use alloy_consensus::transaction::PooledTransaction; use alloy_primitives::TxHash; @@ -60,7 +58,6 @@ use std::{ }; use tokio::sync::{mpsc::error::TrySendError, oneshot, oneshot::error::RecvError}; use tracing::trace; -use validation::FilterOutcome; /// The type responsible for fetching missing transactions from peers. /// @@ -85,8 +82,6 @@ pub struct TransactionFetcher { pub hashes_pending_fetch: LruCache, /// Tracks all hashes in the transaction fetcher. pub hashes_fetch_inflight_and_pending_fetch: LruMap, - /// Filter for valid announcement and response data. - pub(super) filter_valid_message: MessageFilter, /// Info on capacity of the transaction fetcher. pub info: TransactionFetcherInfo, #[doc(hidden)] @@ -919,20 +914,19 @@ impl TransactionFetcher { // let unvalidated_payload_len = verified_payload.len(); - let (validation_outcome, valid_payload) = - self.filter_valid_message.partially_filter_valid_entries(verified_payload); + let valid_payload = verified_payload.dedup(); // todo: validate based on announced tx size/type and report peer for sending // invalid response . requires // passing the rlp encoded length down from active session along with the decoded // tx. - if validation_outcome == FilterOutcome::ReportPeer { + if valid_payload.len() != unvalidated_payload_len { trace!(target: "net::tx", - peer_id=format!("{peer_id:#}"), - unvalidated_payload_len, - valid_payload_len=valid_payload.len(), - "received invalid `PooledTransactions` response from peer, filtered out duplicate entries" + peer_id=format!("{peer_id:#}"), + unvalidated_payload_len, + valid_payload_len=valid_payload.len(), + "received `PooledTransactions` response from peer with duplicate entries, filtered them out" ); } // valid payload will have at least one transaction at this point. even if the tx @@ -1014,7 +1008,6 @@ impl Default for TransactionFetcher { hashes_fetch_inflight_and_pending_fetch: LruMap::new( DEFAULT_MAX_CAPACITY_CACHE_INFLIGHT_AND_PENDING_FETCH, ), - filter_valid_message: Default::default(), info: TransactionFetcherInfo::default(), metrics: Default::default(), } diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 08683073138..0fdee4a915f 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -6,18 +6,19 @@ pub mod config; pub mod constants; /// Component responsible for fetching transactions from [`NewPooledTransactionHashes`]. pub mod fetcher; -pub mod validation; +/// Defines the [`TransactionPolicies`] trait for aggregating transaction-related policies. +pub mod policy; pub use self::constants::{ tx_fetcher::DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESP_ON_PACK_GET_POOLED_TRANSACTIONS_REQ, SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE, }; -use config::TransactionPropagationKind; +use config::{AnnouncementAcceptance, StrictEthAnnouncementFilter, TransactionPropagationKind}; pub use config::{ - TransactionFetcherConfig, TransactionPropagationMode, TransactionPropagationPolicy, - TransactionsManagerConfig, + AnnouncementFilteringPolicy, TransactionFetcherConfig, TransactionPropagationMode, + TransactionPropagationPolicy, TransactionsManagerConfig, }; -pub use validation::*; +use policy::{NetworkPolicies, TransactionPolicies}; pub(crate) use fetcher::{FetchEvent, TransactionFetcher}; @@ -30,8 +31,10 @@ use crate::{ }, cache::LruCache, duration_metered_exec, metered_poll_nested_stream_with_budget, - metrics::{TransactionsManagerMetrics, NETWORK_POOL_TRANSACTIONS_SCOPE}, - NetworkHandle, + metrics::{ + AnnouncedTxTypesMetrics, TransactionsManagerMetrics, NETWORK_POOL_TRANSACTIONS_SCOPE, + }, + NetworkHandle, TxTypesCounter, }; use alloy_primitives::{TxHash, B256}; use constants::SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE; @@ -40,9 +43,9 @@ use reth_eth_wire::{ DedupPayload, EthNetworkPrimitives, EthVersion, GetPooledTransactions, HandleMempoolData, HandleVersionedMempoolData, NetworkPrimitives, NewPooledTransactionHashes, NewPooledTransactionHashes66, NewPooledTransactionHashes68, PooledTransactions, - RequestTxHashes, Transactions, + RequestTxHashes, Transactions, ValidAnnouncementData, }; -use reth_ethereum_primitives::TransactionSigned; +use reth_ethereum_primitives::{TransactionSigned, TxType}; use reth_metrics::common::mpsc::UnboundedMeteredReceiver; use reth_network_api::{ events::{PeerEvent, SessionInfo}, @@ -236,12 +239,18 @@ impl TransactionsHandle { /// /// It is directly connected to the [`TransactionPool`] to retrieve requested transactions and /// propagate new transactions over the network. +/// +/// It can be configured with different policies for transaction propagation and announcement +/// filtering. See [`NetworkPolicies`] and [`TransactionPolicies`] for more details. #[derive(Debug)] #[must_use = "Manager does nothing unless polled."] pub struct TransactionsManager< Pool, N: NetworkPrimitives = EthNetworkPrimitives, - P: TransactionPropagationPolicy = TransactionPropagationKind, + PBundle: TransactionPolicies = NetworkPolicies< + TransactionPropagationKind, + StrictEthAnnouncementFilter, + >, > { /// Access to the transaction pool. pool: Pool, @@ -298,13 +307,21 @@ pub struct TransactionsManager< transaction_events: UnboundedMeteredReceiver>, /// How the `TransactionsManager` is configured. config: TransactionsManagerConfig, - /// The policy to use when propagating transactions. - propagation_policy: P, + /// Network Policies + policies: PBundle, /// `TransactionsManager` metrics metrics: TransactionsManagerMetrics, + /// `AnnouncedTxTypes` metrics + announced_tx_types_metrics: AnnouncedTxTypesMetrics, } -impl TransactionsManager { +impl + TransactionsManager< + Pool, + N, + NetworkPolicies, + > +{ /// Sets up a new instance. /// /// Note: This expects an existing [`NetworkManager`](crate::NetworkManager) instance. @@ -319,13 +336,13 @@ impl TransactionsManager { pool, from_network, transactions_manager_config, - TransactionPropagationKind::default(), + NetworkPolicies::default(), ) } } -impl - TransactionsManager +impl + TransactionsManager { /// Sets up a new instance with given the settings. /// @@ -335,7 +352,7 @@ impl>, transactions_manager_config: TransactionsManagerConfig, - propagation_policy: P, + policies: PBundle, ) -> Self { let network_events = network.event_listener(); @@ -374,8 +391,9 @@ impl TransactionsManager -where - Pool: TransactionPool, - N: NetworkPrimitives, - Policy: TransactionPropagationPolicy, +impl + TransactionsManager { /// Processes a batch import results. fn on_batch_import_result(&mut self, batch_results: Vec>) { @@ -579,10 +594,15 @@ where } // 1. filter out spam - let (validation_outcome, mut partially_valid_msg) = - self.transaction_fetcher.filter_valid_message.partially_filter_valid_entries(msg); + if msg.is_empty() { + self.report_peer(peer_id, ReputationChangeKind::BadAnnouncement); + return; + } + + let original_len = msg.len(); + let mut partially_valid_msg = msg.dedup(); - if validation_outcome == FilterOutcome::ReportPeer { + if partially_valid_msg.len() != original_len { self.report_peer(peer_id, ReputationChangeKind::BadAnnouncement); } @@ -615,26 +635,67 @@ where // // validates messages with respect to the given network, e.g. allowed tx types // - let (validation_outcome, mut valid_announcement_data) = if partially_valid_msg + let mut should_report_peer = false; + let mut tx_types_counter = TxTypesCounter::default(); + + let is_eth68_message = partially_valid_msg .msg_version() - .expect("partially valid announcement should have version") - .is_eth68() - { - // validate eth68 announcement data - self.transaction_fetcher - .filter_valid_message - .filter_valid_entries_68(partially_valid_msg) - } else { - // validate eth66 announcement data - self.transaction_fetcher - .filter_valid_message - .filter_valid_entries_66(partially_valid_msg) - }; + .expect("partially valid announcement should have a version") + .is_eth68(); + + partially_valid_msg.retain(|tx_hash, metadata_ref_mut| { + let (ty_byte, size_val) = match *metadata_ref_mut { + Some((ty, size)) => { + if !is_eth68_message { + should_report_peer = true; + } + (ty, size) + } + None => { + if is_eth68_message { + should_report_peer = true; + return false; + } + (0u8, 0) + } + }; - if validation_outcome == FilterOutcome::ReportPeer { + if is_eth68_message { + if let Some((actual_ty_byte, _)) = *metadata_ref_mut { + if let Ok(parsed_tx_type) = TxType::try_from(actual_ty_byte) { + tx_types_counter.increase_by_tx_type(parsed_tx_type); + } + } + } + + let decision = self + .policies + .announcement_filter() + .decide_on_announcement(ty_byte, tx_hash, size_val); + + match decision { + AnnouncementAcceptance::Accept => true, + AnnouncementAcceptance::Ignore => false, + AnnouncementAcceptance::Reject { penalize_peer } => { + if penalize_peer { + should_report_peer = true; + } + false + } + } + }); + + if is_eth68_message { + self.announced_tx_types_metrics.update_eth68_announcement_metrics(tx_types_counter); + } + + if should_report_peer { self.report_peer(peer_id, ReputationChangeKind::BadAnnouncement); } + let mut valid_announcement_data = + ValidAnnouncementData::from_partially_valid_data(partially_valid_msg); + if valid_announcement_data.is_empty() { // no valid announcement data return @@ -732,16 +793,18 @@ where } } -impl TransactionsManager +impl TransactionsManager where - Pool: TransactionPool + 'static, + Pool: TransactionPool + Unpin + 'static, + N: NetworkPrimitives< - BroadcastedTransaction: SignedTransaction, - PooledTransaction: SignedTransaction, - >, + BroadcastedTransaction: SignedTransaction, + PooledTransaction: SignedTransaction, + > + Unpin, + + PBundle: TransactionPolicies, Pool::Transaction: PoolTransaction, - Policy: TransactionPropagationPolicy, { /// Invoked when transactions in the local mempool are considered __pending__. /// @@ -927,7 +990,7 @@ where // Note: Assuming ~random~ order due to random state of the peers map hasher for (peer_idx, (peer_id, peer)) in self.peers.iter_mut().enumerate() { - if !self.propagation_policy.can_propagate(peer) { + if !self.policies.propagation_policy().can_propagate(peer) { // skip peers we should not propagate to continue } @@ -1116,7 +1179,7 @@ where Entry::Vacant(entry) => entry.insert(peer), }; - self.propagation_policy.on_session_established(peer); + self.policies.propagation_policy_mut().on_session_established(peer); // Send a `NewPooledTransactionHashes` to the peer with up to // `SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE` @@ -1155,7 +1218,7 @@ where let peer = self.peers.remove(&peer_id); if let Some(mut peer) = peer { - self.propagation_policy.on_session_closed(&mut peer); + self.policies.propagation_policy_mut().on_session_closed(&mut peer); } self.transaction_fetcher.remove_peer(&peer_id); } @@ -1381,16 +1444,17 @@ where // // spawned in `NodeConfig::start_network`(reth_node_core::NodeConfig) and // `NetworkConfig::start_network`(reth_network::NetworkConfig) -impl Future for TransactionsManager +impl< + Pool: TransactionPool + Unpin + 'static, + N: NetworkPrimitives< + BroadcastedTransaction: SignedTransaction, + PooledTransaction: SignedTransaction, + > + Unpin, + PBundle: TransactionPolicies + Unpin, + > Future for TransactionsManager where - Pool: TransactionPool + Unpin + 'static, - N: NetworkPrimitives< - BroadcastedTransaction: SignedTransaction, - PooledTransaction: SignedTransaction, - >, Pool::Transaction: PoolTransaction, - Policy: TransactionPropagationPolicy, { type Output = (); @@ -1993,6 +2057,7 @@ mod tests { transactions::{buffer_hash_to_tx_fetcher, new_mock_session, new_tx_manager}, Testnet, }, + transactions::config::RelaxedEthAnnouncementFilter, NetworkConfigBuilder, NetworkManager, }; use alloy_consensus::{TxEip1559, TxLegacy}; @@ -2008,7 +2073,7 @@ mod tests { }; use reth_storage_api::noop::NoopProvider; use reth_transaction_pool::test_utils::{ - testing_pool, MockTransaction, MockTransactionFactory, + testing_pool, MockTransaction, MockTransactionFactory, TestPool, }; use secp256k1::SecretKey; use std::{ @@ -2760,4 +2825,114 @@ mod tests { let propagated = tx_manager.propagate_transactions(propagate, PropagationMode::Basic); assert!(propagated.0.is_empty()); } + + #[tokio::test] + async fn test_relaxed_filter_ignores_unknown_tx_types() { + reth_tracing::init_test_tracing(); + + let transactions_manager_config = TransactionsManagerConfig::default(); + + let propagation_policy = TransactionPropagationKind::default(); + let announcement_policy = RelaxedEthAnnouncementFilter::default(); + + let policy_bundle = NetworkPolicies::new(propagation_policy, announcement_policy); + + let pool = testing_pool(); + let secret_key = SecretKey::new(&mut rand_08::thread_rng()); + let client = NoopProvider::default(); + + let network_config = NetworkConfigBuilder::new(secret_key) + .listener_port(0) + .disable_discovery() + .build(client.clone()); + + let mut network_manager = NetworkManager::new(network_config).await.unwrap(); + let (to_tx_manager_tx, from_network_rx) = + mpsc::unbounded_channel::>(); + network_manager.set_transactions(to_tx_manager_tx); + let network_handle = network_manager.handle().clone(); + let network_service_handle = tokio::spawn(network_manager); + + let mut tx_manager = TransactionsManager::< + TestPool, + EthNetworkPrimitives, + NetworkPolicies, + >::with_policy( + network_handle.clone(), + pool.clone(), + from_network_rx, + transactions_manager_config, + policy_bundle, + ); + + let peer_id = PeerId::random(); + let eth_version = EthVersion::Eth68; + let (mock_peer_metadata, mut mock_session_rx) = new_mock_session(peer_id, eth_version); + tx_manager.peers.insert(peer_id, mock_peer_metadata); + + let mut tx_factory = MockTransactionFactory::default(); + + let valid_known_tx = tx_factory.create_eip1559(); + let known_tx_signed: Arc> = Arc::new(valid_known_tx); + + let known_tx_hash = *known_tx_signed.hash(); + let known_tx_type_byte = known_tx_signed.transaction.tx_type(); + let known_tx_size = known_tx_signed.encoded_length(); + + let unknown_tx_hash = B256::random(); + let unknown_tx_type_byte = 0xff_u8; + let unknown_tx_size = 150; + + let announcement_msg = NewPooledTransactionHashes::Eth68(NewPooledTransactionHashes68 { + types: vec![known_tx_type_byte, unknown_tx_type_byte], + sizes: vec![known_tx_size, unknown_tx_size], + hashes: vec![known_tx_hash, unknown_tx_hash], + }); + + tx_manager.on_new_pooled_transaction_hashes(peer_id, announcement_msg); + + poll_fn(|cx| { + let _ = tx_manager.poll_unpin(cx); + Poll::Ready(()) + }) + .await; + + let mut requested_hashes_in_getpooled = HashSet::new(); + let mut unexpected_request_received = false; + + match tokio::time::timeout(std::time::Duration::from_millis(200), mock_session_rx.recv()) + .await + { + Ok(Some(PeerRequest::GetPooledTransactions { request, response: tx_response_ch })) => { + let GetPooledTransactions(hashes) = request; + for hash in hashes { + requested_hashes_in_getpooled.insert(hash); + } + let _ = tx_response_ch.send(Ok(PooledTransactions(vec![]))); + } + Ok(Some(other_request)) => { + tracing::error!(?other_request, "Received unexpected PeerRequest type"); + unexpected_request_received = true; + } + Ok(None) => tracing::info!("Mock session channel closed or no request received."), + Err(_timeout_err) => { + tracing::info!("Timeout: No GetPooledTransactions request received.") + } + } + + assert!( + requested_hashes_in_getpooled.contains(&known_tx_hash), + "Should have requested the known EIP-1559 transaction. Requested: {requested_hashes_in_getpooled:?}" + ); + assert!( + !requested_hashes_in_getpooled.contains(&unknown_tx_hash), + "Should NOT have requested the unknown transaction type. Requested: {requested_hashes_in_getpooled:?}" + ); + assert!( + !unexpected_request_received, + "An unexpected P2P request was received by the mock peer." + ); + + network_service_handle.abort(); + } } diff --git a/crates/net/network/src/transactions/policy.rs b/crates/net/network/src/transactions/policy.rs new file mode 100644 index 00000000000..c25b9d9b414 --- /dev/null +++ b/crates/net/network/src/transactions/policy.rs @@ -0,0 +1,78 @@ +use crate::transactions::config::{AnnouncementFilteringPolicy, TransactionPropagationPolicy}; +use std::fmt::Debug; + +/// A bundle of policies that control the behavior of network components like +/// the [`TransactionsManager`](super::TransactionsManager). +/// +/// This trait allows for different collections of policies to be used interchangeably. +pub trait TransactionPolicies: Send + Sync + Debug + 'static { + /// The type of the policy used for transaction propagation. + type Propagation: TransactionPropagationPolicy; + /// The type of the policy used for filtering transaction announcements. + type Announcement: AnnouncementFilteringPolicy; + + /// Returns a reference to the transaction propagation policy. + fn propagation_policy(&self) -> &Self::Propagation; + + /// Returns a mutable reference to the transaction propagation policy. + fn propagation_policy_mut(&mut self) -> &mut Self::Propagation; + + /// Returns a reference to the announcement filtering policy. + fn announcement_filter(&self) -> &Self::Announcement; +} + +/// A container that bundles specific implementations of transaction-related policies, +/// +/// This struct implements the [`TransactionPolicies`] trait, providing a complete set of +/// policies required by components like the [`TransactionsManager`](super::TransactionsManager). +/// It holds a specific [`TransactionPropagationPolicy`] and an +/// [`AnnouncementFilteringPolicy`]. +#[derive(Debug, Clone, Default)] +pub struct NetworkPolicies { + propagation: P, + announcement: A, +} + +impl NetworkPolicies { + /// Creates a new bundle of network policies. + pub const fn new(propagation: P, announcement: A) -> Self { + Self { propagation, announcement } + } + + /// Returns a new `NetworkPolicies` bundle with the `TransactionPropagationPolicy` replaced. + pub fn with_propagation(self, new_propagation: NewP) -> NetworkPolicies + where + NewP: TransactionPropagationPolicy, + { + NetworkPolicies::new(new_propagation, self.announcement) + } + + /// Returns a new `NetworkPolicies` bundle with the `AnnouncementFilteringPolicy` replaced. + pub fn with_announcement(self, new_announcement: NewA) -> NetworkPolicies + where + NewA: AnnouncementFilteringPolicy, + { + NetworkPolicies::new(self.propagation, new_announcement) + } +} + +impl TransactionPolicies for NetworkPolicies +where + P: TransactionPropagationPolicy + Debug, + A: AnnouncementFilteringPolicy + Debug, +{ + type Propagation = P; + type Announcement = A; + + fn propagation_policy(&self) -> &Self::Propagation { + &self.propagation + } + + fn propagation_policy_mut(&mut self) -> &mut Self::Propagation { + &mut self.propagation + } + + fn announcement_filter(&self) -> &Self::Announcement { + &self.announcement + } +} diff --git a/crates/net/network/src/transactions/validation.rs b/crates/net/network/src/transactions/validation.rs deleted file mode 100644 index cf91ce69a7e..00000000000 --- a/crates/net/network/src/transactions/validation.rs +++ /dev/null @@ -1,461 +0,0 @@ -//! Validation of [`NewPooledTransactionHashes66`](reth_eth_wire::NewPooledTransactionHashes66) -//! and [`NewPooledTransactionHashes68`](reth_eth_wire::NewPooledTransactionHashes68) -//! announcements. Validation and filtering of announcements is network dependent. - -use crate::metrics::{AnnouncedTxTypesMetrics, TxTypesCounter}; -use alloy_primitives::Signature; -use derive_more::{Deref, DerefMut}; -use reth_eth_wire::{ - DedupPayload, Eth68TxMetadata, HandleMempoolData, PartiallyValidData, ValidAnnouncementData, -}; -use reth_ethereum_primitives::TxType; -use std::{fmt, fmt::Display, mem}; -use tracing::trace; - -/// The size of a decoded signature in bytes. -pub const SIGNATURE_DECODED_SIZE_BYTES: usize = mem::size_of::(); - -/// Outcomes from validating a `(ty, hash, size)` entry from a -/// [`NewPooledTransactionHashes68`](reth_eth_wire::NewPooledTransactionHashes68). Signals to the -/// caller how to deal with an announcement entry and the peer who sent the announcement. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum ValidationOutcome { - /// Tells the caller to keep the entry in the announcement for fetch. - Fetch, - /// Tells the caller to filter out the entry from the announcement. - Ignore, - /// Tells the caller to filter out the entry from the announcement and penalize the peer. On - /// this outcome, caller can drop the announcement, that is up to each implementation. - ReportPeer, -} - -/// Generic filter for announcements and responses. Checks for empty message and unique hashes/ -/// transactions in message. -pub trait PartiallyFilterMessage { - /// Removes duplicate entries from a mempool message. Returns [`FilterOutcome::ReportPeer`] if - /// the caller should penalize the peer, otherwise [`FilterOutcome::Ok`]. - fn partially_filter_valid_entries( - &self, - msg: impl DedupPayload + fmt::Debug, - ) -> (FilterOutcome, PartiallyValidData) { - // 1. checks if the announcement is empty - if msg.is_empty() { - trace!(target: "net::tx", - msg=?msg, - "empty payload" - ); - return (FilterOutcome::ReportPeer, PartiallyValidData::empty_eth66()) - } - - // 2. checks if announcement is spam packed with duplicate hashes - let original_len = msg.len(); - let partially_valid_data = msg.dedup(); - - ( - if partially_valid_data.len() == original_len { - FilterOutcome::Ok - } else { - FilterOutcome::ReportPeer - }, - partially_valid_data, - ) - } -} - -/// Filters valid entries in -/// [`NewPooledTransactionHashes68`](reth_eth_wire::NewPooledTransactionHashes68) and -/// [`NewPooledTransactionHashes66`](reth_eth_wire::NewPooledTransactionHashes66) in place, and -/// flags misbehaving peers. -pub trait FilterAnnouncement { - /// Removes invalid entries from a - /// [`NewPooledTransactionHashes68`](reth_eth_wire::NewPooledTransactionHashes68) announcement. - /// Returns [`FilterOutcome::ReportPeer`] if the caller should penalize the peer, otherwise - /// [`FilterOutcome::Ok`]. - fn filter_valid_entries_68( - &self, - msg: PartiallyValidData, - ) -> (FilterOutcome, ValidAnnouncementData); - - /// Removes invalid entries from a - /// [`NewPooledTransactionHashes66`](reth_eth_wire::NewPooledTransactionHashes66) announcement. - /// Returns [`FilterOutcome::ReportPeer`] if the caller should penalize the peer, otherwise - /// [`FilterOutcome::Ok`]. - fn filter_valid_entries_66( - &self, - msg: PartiallyValidData, - ) -> (FilterOutcome, ValidAnnouncementData); -} - -/// Outcome from filtering -/// [`NewPooledTransactionHashes68`](reth_eth_wire::NewPooledTransactionHashes68). Signals to caller -/// whether to penalize the sender of the announcement or not. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum FilterOutcome { - /// Peer behaves appropriately. - Ok, - /// A penalty should be flagged for the peer. Peer sent an announcement with unacceptably - /// invalid entries. - ReportPeer, -} - -/// Wrapper for types that implement [`FilterAnnouncement`]. The definition of a valid -/// announcement is network dependent. For example, different networks support different -/// [`TxType`]s, and different [`TxType`]s have different transaction size constraints. Defaults to -/// [`EthMessageFilter`]. -#[derive(Debug, Default, Deref, DerefMut)] -pub struct MessageFilter(N); - -/// Filter for announcements containing EIP [`TxType`]s. -#[derive(Debug, Default)] -pub struct EthMessageFilter { - announced_tx_types_metrics: AnnouncedTxTypesMetrics, -} - -impl Display for EthMessageFilter { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "EthMessageFilter") - } -} - -impl PartiallyFilterMessage for EthMessageFilter {} - -impl FilterAnnouncement for EthMessageFilter { - fn filter_valid_entries_68( - &self, - mut msg: PartiallyValidData, - ) -> (FilterOutcome, ValidAnnouncementData) { - trace!(target: "net::tx::validation", - msg=?*msg, - network=%self, - "validating eth68 announcement data.." - ); - - let mut should_report_peer = false; - let mut tx_types_counter = TxTypesCounter::default(); - - // checks if eth68 announcement metadata is valid - // - // transactions that are filtered out here, may not be spam, rather from benevolent peers - // that are unknowingly sending announcements with invalid data. - // - msg.retain(|hash, metadata| { - debug_assert!( - metadata.is_some(), - "metadata should exist for `%hash` in eth68 announcement passed to `%filter_valid_entries_68`, -`%hash`: {hash}" - ); - - let Some((ty, size)) = metadata else { - return false - }; - - // - // checks if tx type is valid value for this network - // - let tx_type = match TxType::try_from(*ty) { - Ok(ty) => ty, - Err(_) => { - trace!(target: "net::eth-wire", - ty=ty, - size=size, - hash=%hash, - network=%self, - "invalid tx type in eth68 announcement" - ); - - should_report_peer = true; - return false; - } - - }; - tx_types_counter.increase_by_tx_type(tx_type); - - true - }); - self.announced_tx_types_metrics.update_eth68_announcement_metrics(tx_types_counter); - ( - if should_report_peer { FilterOutcome::ReportPeer } else { FilterOutcome::Ok }, - ValidAnnouncementData::from_partially_valid_data(msg), - ) - } - - fn filter_valid_entries_66( - &self, - partially_valid_data: PartiallyValidData>, - ) -> (FilterOutcome, ValidAnnouncementData) { - trace!(target: "net::tx::validation", - hashes=?*partially_valid_data, - network=%self, - "validating eth66 announcement data.." - ); - - (FilterOutcome::Ok, ValidAnnouncementData::from_partially_valid_data(partially_valid_data)) - } -} - -#[cfg(test)] -mod test { - use super::*; - use alloy_primitives::B256; - use reth_eth_wire::{ - NewPooledTransactionHashes66, NewPooledTransactionHashes68, MAX_MESSAGE_SIZE, - }; - use std::{collections::HashMap, str::FromStr}; - - #[test] - fn eth68_empty_announcement() { - let types = vec![]; - let sizes = vec![]; - let hashes = vec![]; - - let announcement = NewPooledTransactionHashes68 { types, sizes, hashes }; - - let filter = EthMessageFilter::default(); - - let (outcome, _partially_valid_data) = filter.partially_filter_valid_entries(announcement); - - assert_eq!(outcome, FilterOutcome::ReportPeer); - } - - #[test] - fn eth68_announcement_unrecognized_tx_type() { - let types = vec![ - TxType::Eip7702 as u8 + 1, // the first type isn't valid - TxType::Legacy as u8, - ]; - let sizes = vec![MAX_MESSAGE_SIZE, MAX_MESSAGE_SIZE]; - let hashes = vec![ - B256::from_str("0xbeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafa") - .unwrap(), - B256::from_str("0xbeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefbbbb") - .unwrap(), - ]; - - let announcement = NewPooledTransactionHashes68 { - types: types.clone(), - sizes: sizes.clone(), - hashes: hashes.clone(), - }; - - let filter = EthMessageFilter::default(); - - let (outcome, partially_valid_data) = filter.partially_filter_valid_entries(announcement); - - assert_eq!(outcome, FilterOutcome::Ok); - - let (outcome, valid_data) = filter.filter_valid_entries_68(partially_valid_data); - - assert_eq!(outcome, FilterOutcome::ReportPeer); - - let mut expected_data = HashMap::default(); - expected_data.insert(hashes[1], Some((types[1], sizes[1]))); - - assert_eq!(expected_data, valid_data.into_data()) - } - - #[test] - fn eth68_announcement_duplicate_tx_hash() { - let types = vec![ - TxType::Eip1559 as u8, - TxType::Eip4844 as u8, - TxType::Eip1559 as u8, - TxType::Eip4844 as u8, - ]; - let sizes = vec![1, 1, 1, MAX_MESSAGE_SIZE]; - // first three or the same - let hashes = vec![ - B256::from_str("0xbeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafa") // dup - .unwrap(), - B256::from_str("0xbeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafa") // removed dup - .unwrap(), - B256::from_str("0xbeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafa") // removed dup - .unwrap(), - B256::from_str("0xbeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefbbbb") - .unwrap(), - ]; - - let announcement = NewPooledTransactionHashes68 { - types: types.clone(), - sizes: sizes.clone(), - hashes: hashes.clone(), - }; - - let filter = EthMessageFilter::default(); - - let (outcome, partially_valid_data) = filter.partially_filter_valid_entries(announcement); - - assert_eq!(outcome, FilterOutcome::ReportPeer); - - let mut expected_data = HashMap::default(); - expected_data.insert(hashes[3], Some((types[3], sizes[3]))); - expected_data.insert(hashes[0], Some((types[0], sizes[0]))); - - assert_eq!(expected_data, partially_valid_data.into_data()) - } - - #[test] - fn eth66_empty_announcement() { - let hashes = vec![]; - - let announcement = NewPooledTransactionHashes66(hashes); - - let filter: MessageFilter = MessageFilter::default(); - - let (outcome, _partially_valid_data) = filter.partially_filter_valid_entries(announcement); - - assert_eq!(outcome, FilterOutcome::ReportPeer); - } - - #[test] - fn eth66_announcement_duplicate_tx_hash() { - // first three or the same - let hashes = vec![ - B256::from_str("0xbeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefbbbb") // dup1 - .unwrap(), - B256::from_str("0xbeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafa") // dup2 - .unwrap(), - B256::from_str("0xbeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafa") // removed dup2 - .unwrap(), - B256::from_str("0xbeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafa") // removed dup2 - .unwrap(), - B256::from_str("0xbeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefbbbb") // removed dup1 - .unwrap(), - ]; - - let announcement = NewPooledTransactionHashes66(hashes.clone()); - - let filter: MessageFilter = MessageFilter::default(); - - let (outcome, partially_valid_data) = filter.partially_filter_valid_entries(announcement); - - assert_eq!(outcome, FilterOutcome::ReportPeer); - - let mut expected_data = HashMap::default(); - expected_data.insert(hashes[1], None); - expected_data.insert(hashes[0], None); - - assert_eq!(expected_data, partially_valid_data.into_data()) - } - - #[test] - fn eth68_announcement_eip7702_tx() { - let types = vec![TxType::Eip7702 as u8, TxType::Legacy as u8]; - let sizes = vec![MAX_MESSAGE_SIZE, MAX_MESSAGE_SIZE]; - let hashes = vec![ - B256::from_str("0xbeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafa") - .unwrap(), - B256::from_str("0xbeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefbbbb") - .unwrap(), - ]; - - let announcement = NewPooledTransactionHashes68 { - types: types.clone(), - sizes: sizes.clone(), - hashes: hashes.clone(), - }; - - let filter = EthMessageFilter::default(); - - let (outcome, partially_valid_data) = filter.partially_filter_valid_entries(announcement); - assert_eq!(outcome, FilterOutcome::Ok); - - let (outcome, valid_data) = filter.filter_valid_entries_68(partially_valid_data); - assert_eq!(outcome, FilterOutcome::Ok); - - let mut expected_data = HashMap::default(); - expected_data.insert(hashes[0], Some((types[0], sizes[0]))); - expected_data.insert(hashes[1], Some((types[1], sizes[1]))); - - assert_eq!(expected_data, valid_data.into_data()); - } - - #[test] - fn eth68_announcement_eip7702_tx_size_validation() { - let types = vec![TxType::Eip7702 as u8, TxType::Eip7702 as u8, TxType::Eip7702 as u8]; - // Test with different sizes: too small, reasonable, too large - let sizes = vec![ - 1, // too small - MAX_MESSAGE_SIZE / 2, // reasonable size - MAX_MESSAGE_SIZE + 1, // too large - ]; - let hashes = vec![ - B256::from_str("0xbeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafa") - .unwrap(), - B256::from_str("0xbeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefbbbb") - .unwrap(), - B256::from_str("0xbeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcccc") - .unwrap(), - ]; - - let announcement = NewPooledTransactionHashes68 { - types: types.clone(), - sizes: sizes.clone(), - hashes: hashes.clone(), - }; - - let filter = EthMessageFilter::default(); - - let (outcome, partially_valid_data) = filter.partially_filter_valid_entries(announcement); - assert_eq!(outcome, FilterOutcome::Ok); - - let (outcome, valid_data) = filter.filter_valid_entries_68(partially_valid_data); - assert_eq!(outcome, FilterOutcome::Ok); - - let mut expected_data = HashMap::default(); - - for i in 0..3 { - expected_data.insert(hashes[i], Some((types[i], sizes[i]))); - } - - assert_eq!(expected_data, valid_data.into_data()); - } - - #[test] - fn eth68_announcement_mixed_tx_types() { - let types = vec![ - TxType::Legacy as u8, - TxType::Eip7702 as u8, - TxType::Eip1559 as u8, - TxType::Eip4844 as u8, - ]; - let sizes = vec![MAX_MESSAGE_SIZE, MAX_MESSAGE_SIZE, MAX_MESSAGE_SIZE, MAX_MESSAGE_SIZE]; - let hashes = vec![ - B256::from_str("0xbeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafa") - .unwrap(), - B256::from_str("0xbeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefbbbb") - .unwrap(), - B256::from_str("0xbeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcccc") - .unwrap(), - B256::from_str("0xbeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefdddd") - .unwrap(), - ]; - - let announcement = NewPooledTransactionHashes68 { - types: types.clone(), - sizes: sizes.clone(), - hashes: hashes.clone(), - }; - - let filter = EthMessageFilter::default(); - - let (outcome, partially_valid_data) = filter.partially_filter_valid_entries(announcement); - assert_eq!(outcome, FilterOutcome::Ok); - - let (outcome, valid_data) = filter.filter_valid_entries_68(partially_valid_data); - assert_eq!(outcome, FilterOutcome::Ok); - - let mut expected_data = HashMap::default(); - // All transaction types should be included as they are valid - for i in 0..4 { - expected_data.insert(hashes[i], Some((types[i], sizes[i]))); - } - - assert_eq!(expected_data, valid_data.into_data()); - } - - #[test] - fn test_display_for_zst() { - let filter = EthMessageFilter::default(); - assert_eq!("EthMessageFilter", &filter.to_string()); - } -} diff --git a/crates/net/network/src/trusted_peers_resolver.rs b/crates/net/network/src/trusted_peers_resolver.rs index 04fc7b6b5fd..29940f415ec 100644 --- a/crates/net/network/src/trusted_peers_resolver.rs +++ b/crates/net/network/src/trusted_peers_resolver.rs @@ -13,7 +13,7 @@ use tracing::warn; /// It returns a resolved (`PeerId`, `NodeRecord`) update when one of its in‑flight tasks completes. #[derive(Debug)] pub struct TrustedPeersResolver { - /// The timer that triggers a new resolution cycle. + /// The list of trusted peers to resolve. pub trusted_peers: Vec, /// The timer that triggers a new resolution cycle. pub interval: Interval, diff --git a/crates/net/network/tests/it/big_pooled_txs_req.rs b/crates/net/network/tests/it/big_pooled_txs_req.rs index 1db378606ba..691ef5d379d 100644 --- a/crates/net/network/tests/it/big_pooled_txs_req.rs +++ b/crates/net/network/tests/it/big_pooled_txs_req.rs @@ -78,7 +78,9 @@ async fn test_large_tx_req() { // check all txs have been received match receive.await.unwrap() { Ok(PooledTransactions(txs)) => { - txs.into_iter().for_each(|tx| assert!(txs_hashes.contains(tx.hash()))); + for tx in txs { + assert!(txs_hashes.contains(tx.hash())); + } } Err(e) => { panic!("error: {e:?}"); diff --git a/crates/net/p2p/src/bodies/client.rs b/crates/net/p2p/src/bodies/client.rs index c97b9ab5385..90862c5144e 100644 --- a/crates/net/p2p/src/bodies/client.rs +++ b/crates/net/p2p/src/bodies/client.rs @@ -1,4 +1,5 @@ use std::{ + ops::RangeInclusive, pin::Pin, task::{ready, Context, Poll}, }; @@ -26,8 +27,26 @@ pub trait BodiesClient: DownloadClient { } /// Fetches the block body for the requested block with priority - fn get_block_bodies_with_priority(&self, hashes: Vec, priority: Priority) - -> Self::Output; + fn get_block_bodies_with_priority( + &self, + hashes: Vec, + priority: Priority, + ) -> Self::Output { + self.get_block_bodies_with_priority_and_range_hint(hashes, priority, None) + } + + /// Fetches the block body for the requested block with priority and a range hint for the + /// requested blocks. + /// + /// The range hint is not required, but can be used to optimize the routing of the request if + /// the hashes are continuous or close together and the range hint is `[earliest, latest]` for + /// the requested blocks. + fn get_block_bodies_with_priority_and_range_hint( + &self, + hashes: Vec, + priority: Priority, + range_hint: Option>, + ) -> Self::Output; /// Fetches a single block body for the requested hash. fn get_block_body(&self, hash: B256) -> SingleBodyRequest { diff --git a/crates/net/p2p/src/either.rs b/crates/net/p2p/src/either.rs index 3f1182bd482..a53592d8f9f 100644 --- a/crates/net/p2p/src/either.rs +++ b/crates/net/p2p/src/either.rs @@ -1,5 +1,7 @@ //! Support for different download types. +use std::ops::RangeInclusive; + use crate::{ bodies::client::BodiesClient, download::DownloadClient, @@ -37,14 +39,19 @@ where type Body = A::Body; type Output = Either; - fn get_block_bodies_with_priority( + fn get_block_bodies_with_priority_and_range_hint( &self, hashes: Vec, priority: Priority, + range_hint: Option>, ) -> Self::Output { match self { - Self::Left(a) => Either::Left(a.get_block_bodies_with_priority(hashes, priority)), - Self::Right(b) => Either::Right(b.get_block_bodies_with_priority(hashes, priority)), + Self::Left(a) => Either::Left( + a.get_block_bodies_with_priority_and_range_hint(hashes, priority, range_hint), + ), + Self::Right(b) => Either::Right( + b.get_block_bodies_with_priority_and_range_hint(hashes, priority, range_hint), + ), } } } diff --git a/crates/net/p2p/src/full_block.rs b/crates/net/p2p/src/full_block.rs index 33471866ad0..aa48f6c610e 100644 --- a/crates/net/p2p/src/full_block.rs +++ b/crates/net/p2p/src/full_block.rs @@ -20,6 +20,7 @@ use std::{ fmt::Debug, future::Future, hash::Hash, + ops::RangeInclusive, pin::Pin, sync::Arc, task::{ready, Context, Poll}, @@ -692,10 +693,11 @@ where /// # Returns /// /// A future containing an empty vector of block bodies and a randomly generated `PeerId`. - fn get_block_bodies_with_priority( + fn get_block_bodies_with_priority_and_range_hint( &self, _hashes: Vec, _priority: Priority, + _range_hint: Option>, ) -> Self::Output { // Create a future that immediately returns an empty vector of block bodies and a random // PeerId. diff --git a/crates/net/p2p/src/headers/downloader.rs b/crates/net/p2p/src/headers/downloader.rs index 9ceb223e887..d0cf6550ea1 100644 --- a/crates/net/p2p/src/headers/downloader.rs +++ b/crates/net/p2p/src/headers/downloader.rs @@ -80,8 +80,8 @@ impl SyncTarget { } /// Represents a gap to sync: from `local_head` to `target` -#[derive(Clone, Debug)] -pub struct HeaderSyncGap { +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct HeaderSyncGap { /// The local head block. Represents lower bound of sync range. pub local_head: SealedHeader, diff --git a/crates/net/p2p/src/snap/client.rs b/crates/net/p2p/src/snap/client.rs index 7f08da31e27..667824e448c 100644 --- a/crates/net/p2p/src/snap/client.rs +++ b/crates/net/p2p/src/snap/client.rs @@ -1,6 +1,9 @@ use crate::{download::DownloadClient, error::PeerRequestResult, priority::Priority}; use futures::Future; -use reth_eth_wire_types::snap::{AccountRangeMessage, GetAccountRangeMessage}; +use reth_eth_wire_types::snap::{ + AccountRangeMessage, GetAccountRangeMessage, GetByteCodesMessage, GetStorageRangesMessage, + GetTrieNodesMessage, +}; /// The snap sync downloader client #[auto_impl::auto_impl(&, Arc, Box)] @@ -21,4 +24,40 @@ pub trait SnapClient: DownloadClient { request: GetAccountRangeMessage, priority: Priority, ) -> Self::Output; + + /// Sends the storage ranges request to the p2p network and returns the storage ranges + /// response received from a peer. + fn get_storage_ranges(&self, request: GetStorageRangesMessage) -> Self::Output; + + /// Sends the storage ranges request to the p2p network with priority set and returns + /// the storage ranges response received from a peer. + fn get_storage_ranges_with_priority( + &self, + request: GetStorageRangesMessage, + priority: Priority, + ) -> Self::Output; + + /// Sends the byte codes request to the p2p network and returns the byte codes + /// response received from a peer. + fn get_byte_codes(&self, request: GetByteCodesMessage) -> Self::Output; + + /// Sends the byte codes request to the p2p network with priority set and returns + /// the byte codes response received from a peer. + fn get_byte_codes_with_priority( + &self, + request: GetByteCodesMessage, + priority: Priority, + ) -> Self::Output; + + /// Sends the trie nodes request to the p2p network and returns the trie nodes + /// response received from a peer. + fn get_trie_nodes(&self, request: GetTrieNodesMessage) -> Self::Output; + + /// Sends the trie nodes request to the p2p network with priority set and returns + /// the trie nodes response received from a peer. + fn get_trie_nodes_with_priority( + &self, + request: GetTrieNodesMessage, + priority: Priority, + ) -> Self::Output; } diff --git a/crates/net/p2p/src/test_utils/bodies.rs b/crates/net/p2p/src/test_utils/bodies.rs index 7570756d0fd..63f5656538d 100644 --- a/crates/net/p2p/src/test_utils/bodies.rs +++ b/crates/net/p2p/src/test_utils/bodies.rs @@ -8,7 +8,10 @@ use alloy_primitives::B256; use futures::FutureExt; use reth_ethereum_primitives::BlockBody; use reth_network_peers::PeerId; -use std::fmt::{Debug, Formatter}; +use std::{ + fmt::{Debug, Formatter}, + ops::RangeInclusive, +}; use tokio::sync::oneshot; /// A test client for fetching bodies @@ -40,10 +43,11 @@ where type Body = BlockBody; type Output = BodiesFut; - fn get_block_bodies_with_priority( + fn get_block_bodies_with_priority_and_range_hint( &self, hashes: Vec, _priority: Priority, + _range_hint: Option>, ) -> Self::Output { let (tx, rx) = oneshot::channel(); let _ = tx.send((self.responder)(hashes)); diff --git a/crates/net/p2p/src/test_utils/full_block.rs b/crates/net/p2p/src/test_utils/full_block.rs index 0ef329ef7db..dce6a3f9f45 100644 --- a/crates/net/p2p/src/test_utils/full_block.rs +++ b/crates/net/p2p/src/test_utils/full_block.rs @@ -14,7 +14,7 @@ use reth_eth_wire_types::HeadersDirection; use reth_ethereum_primitives::{Block, BlockBody}; use reth_network_peers::{PeerId, WithPeerId}; use reth_primitives_traits::{SealedBlock, SealedHeader}; -use std::{collections::HashMap, sync::Arc}; +use std::{collections::HashMap, ops::RangeInclusive, sync::Arc}; /// A headers+bodies client that stores the headers and bodies in memory, with an artificial soft /// bodies response limit that is set to 20 by default. @@ -145,10 +145,11 @@ impl BodiesClient for TestFullBlockClient { /// # Returns /// /// A future containing the result of the block body retrieval operation. - fn get_block_bodies_with_priority( + fn get_block_bodies_with_priority_and_range_hint( &self, hashes: Vec, _priority: Priority, + _range_hint: Option>, ) -> Self::Output { // Acquire a lock on the bodies. let bodies = self.bodies.lock(); diff --git a/crates/net/p2p/src/test_utils/headers.rs b/crates/net/p2p/src/test_utils/headers.rs index b1b34773388..b19b29d58dc 100644 --- a/crates/net/p2p/src/test_utils/headers.rs +++ b/crates/net/p2p/src/test_utils/headers.rs @@ -141,7 +141,9 @@ impl Stream for TestDownload { let mut headers = resp.1.into_iter().skip(1).map(SealedHeader::seal_slow).collect::>(); headers.sort_unstable_by_key(|h| h.number); - headers.into_iter().for_each(|h| this.buffer.push(h)); + for h in headers { + this.buffer.push(h); + } this.done = true; } Err(err) => { diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 6e345a6fe26..c5f7c57c441 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -37,7 +37,7 @@ use reth_provider::{ use reth_tasks::TaskExecutor; use reth_transaction_pool::{PoolConfig, PoolTransaction, TransactionPool}; use secp256k1::SecretKey; -use std::sync::Arc; +use std::{fmt::Debug, sync::Arc}; use tracing::{info, trace, warn}; pub mod add_ons; @@ -563,13 +563,8 @@ where where EngineNodeLauncher: LaunchNode>, { - let Self { builder, task_executor } = self; - - let engine_tree_config = builder.config.engine.tree_config(); - - let launcher = - EngineNodeLauncher::new(task_executor, builder.config.datadir(), engine_tree_config); - builder.launch_with(launcher).await + let launcher = self.engine_api_launcher(); + self.builder.launch_with(launcher).await } /// Launches the node with the [`DebugNodeLauncher`]. @@ -594,6 +589,17 @@ where )); builder.launch_with(launcher).await } + + /// Returns an [`EngineNodeLauncher`] that can be used to launch the node with engine API + /// support. + pub fn engine_api_launcher(&self) -> EngineNodeLauncher { + let engine_tree_config = self.builder.config.engine.tree_config(); + EngineNodeLauncher::new( + self.task_executor.clone(), + self.builder.config.datadir(), + engine_tree_config, + ) + } } /// Captures the necessary context for building the components of the node. @@ -722,7 +728,7 @@ impl BuilderContext { > + Unpin + 'static, Node::Provider: BlockReaderFor, - Policy: TransactionPropagationPolicy, + Policy: TransactionPropagationPolicy + Debug, { let (handle, network, txpool, eth) = builder .transactions_with_policy(pool, tx_config, propagation_policy) diff --git a/crates/node/builder/src/components/payload.rs b/crates/node/builder/src/components/payload.rs index 2edc3b5e822..b587889e86f 100644 --- a/crates/node/builder/src/components/payload.rs +++ b/crates/node/builder/src/components/payload.rs @@ -4,9 +4,11 @@ use crate::{BuilderContext, FullNodeTypes}; use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; use reth_chain_state::CanonStateSubscriptions; use reth_node_api::{NodeTypes, PayloadBuilderFor}; -use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; +use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService, PayloadServiceCommand}; use reth_transaction_pool::TransactionPool; use std::future::Future; +use tokio::sync::{broadcast, mpsc}; +use tracing::warn; /// A type that knows how to spawn the payload service. pub trait PayloadServiceBuilder: @@ -110,3 +112,44 @@ where Ok(payload_service_handle) } } + +/// A `NoopPayloadServiceBuilder` useful for node implementations that are not implementing +/// validating/sequencing logic. +#[derive(Debug, Clone, Copy, Default)] +#[non_exhaustive] +pub struct NoopPayloadServiceBuilder; + +impl PayloadServiceBuilder for NoopPayloadServiceBuilder +where + Node: FullNodeTypes, + Pool: TransactionPool, + Evm: Send, +{ + async fn spawn_payload_builder_service( + self, + ctx: &BuilderContext, + _pool: Pool, + _evm_config: Evm, + ) -> eyre::Result::Payload>> { + let (tx, mut rx) = mpsc::unbounded_channel(); + + ctx.task_executor().spawn_critical("payload builder", async move { + #[allow(clippy::collection_is_never_read)] + let mut subscriptions = Vec::new(); + + while let Some(message) = rx.recv().await { + match message { + PayloadServiceCommand::Subscribe(tx) => { + let (events_tx, events_rx) = broadcast::channel(100); + // Retain senders to make sure that channels are not getting closed + subscriptions.push(events_tx); + let _ = tx.send(events_rx); + } + message => warn!(?message, "Noop payload service received a message"), + } + } + }); + + Ok(PayloadBuilderHandle::new(tx)) + } +} diff --git a/crates/node/builder/src/components/pool.rs b/crates/node/builder/src/components/pool.rs index 5b08e0a7739..2d431831ee3 100644 --- a/crates/node/builder/src/components/pool.rs +++ b/crates/node/builder/src/components/pool.rs @@ -1,8 +1,12 @@ //! Pool component for the node builder. use alloy_primitives::Address; +use reth_chain_state::CanonStateSubscriptions; use reth_node_api::TxTy; -use reth_transaction_pool::{PoolConfig, PoolTransaction, SubPoolLimit, TransactionPool}; +use reth_transaction_pool::{ + blobstore::DiskFileBlobStore, CoinbaseTipOrdering, PoolConfig, PoolTransaction, SubPoolLimit, + TransactionPool, TransactionValidationTaskExecutor, TransactionValidator, +}; use std::{collections::HashSet, future::Future}; use crate::{BuilderContext, FullNodeTypes}; @@ -98,3 +102,206 @@ impl PoolBuilderConfigOverrides { config } } + +/// A builder for creating transaction pools with common configuration options. +/// +/// This builder provides a fluent API for setting up transaction pools with various +/// configurations like blob stores, validators, and maintenance tasks. +pub struct TxPoolBuilder<'a, Node: FullNodeTypes, V = ()> { + ctx: &'a BuilderContext, + validator: V, +} + +impl<'a, Node: FullNodeTypes> TxPoolBuilder<'a, Node> { + /// Creates a new `TxPoolBuilder` with the given context. + pub const fn new(ctx: &'a BuilderContext) -> Self { + Self { ctx, validator: () } + } +} + +impl<'a, Node: FullNodeTypes, V> TxPoolBuilder<'a, Node, V> { + /// Configure the validator for the transaction pool. + pub fn with_validator(self, validator: NewV) -> TxPoolBuilder<'a, Node, NewV> { + TxPoolBuilder { ctx: self.ctx, validator } + } +} + +impl<'a, Node: FullNodeTypes, V> TxPoolBuilder<'a, Node, TransactionValidationTaskExecutor> +where + V: TransactionValidator + Clone + 'static, + V::Transaction: + PoolTransaction> + reth_transaction_pool::EthPoolTransaction, +{ + /// Build the transaction pool and spawn its maintenance tasks. + /// This method creates the blob store, builds the pool, and spawns maintenance tasks. + pub fn build_and_spawn_maintenance_task( + self, + blob_store: DiskFileBlobStore, + pool_config: PoolConfig, + ) -> eyre::Result< + reth_transaction_pool::Pool< + TransactionValidationTaskExecutor, + CoinbaseTipOrdering, + DiskFileBlobStore, + >, + > { + // Destructure self to avoid partial move issues + let TxPoolBuilder { ctx, validator, .. } = self; + + let transaction_pool = reth_transaction_pool::Pool::new( + validator, + CoinbaseTipOrdering::default(), + blob_store, + pool_config.clone(), + ); + + // Spawn maintenance tasks using standalone functions + spawn_maintenance_tasks(ctx, transaction_pool.clone(), &pool_config)?; + + Ok(transaction_pool) + } +} + +/// Create blob store with default configuration. +pub fn create_blob_store( + ctx: &BuilderContext, +) -> eyre::Result { + let data_dir = ctx.config().datadir(); + Ok(reth_transaction_pool::blobstore::DiskFileBlobStore::open( + data_dir.blobstore(), + Default::default(), + )?) +} + +/// Create blob store with custom cache size configuration. +pub fn create_blob_store_with_cache( + ctx: &BuilderContext, + cache_size: Option, +) -> eyre::Result { + let data_dir = ctx.config().datadir(); + let config = if let Some(cache_size) = cache_size { + reth_transaction_pool::blobstore::DiskFileBlobStoreConfig::default() + .with_max_cached_entries(cache_size) + } else { + Default::default() + }; + + Ok(reth_transaction_pool::blobstore::DiskFileBlobStore::open(data_dir.blobstore(), config)?) +} + +/// Spawn local transaction backup task if enabled. +fn spawn_local_backup_task(ctx: &BuilderContext, pool: Pool) -> eyre::Result<()> +where + Node: FullNodeTypes, + Pool: TransactionPool + Clone + 'static, +{ + if !ctx.config().txpool.disable_transactions_backup { + let data_dir = ctx.config().datadir(); + let transactions_path = ctx + .config() + .txpool + .transactions_backup_path + .clone() + .unwrap_or_else(|| data_dir.txpool_transactions()); + + let transactions_backup_config = + reth_transaction_pool::maintain::LocalTransactionBackupConfig::with_local_txs_backup( + transactions_path, + ); + + ctx.task_executor().spawn_critical_with_graceful_shutdown_signal( + "local transactions backup task", + |shutdown| { + reth_transaction_pool::maintain::backup_local_transactions_task( + shutdown, + pool, + transactions_backup_config, + ) + }, + ); + } + Ok(()) +} + +/// Spawn the main maintenance task for transaction pool. +fn spawn_pool_maintenance_task( + ctx: &BuilderContext, + pool: Pool, + pool_config: &PoolConfig, +) -> eyre::Result<()> +where + Node: FullNodeTypes, + Pool: reth_transaction_pool::TransactionPoolExt + Clone + 'static, + Pool::Transaction: PoolTransaction>, +{ + let chain_events = ctx.provider().canonical_state_stream(); + let client = ctx.provider().clone(); + + ctx.task_executor().spawn_critical( + "txpool maintenance task", + reth_transaction_pool::maintain::maintain_transaction_pool_future( + client, + pool, + chain_events, + ctx.task_executor().clone(), + reth_transaction_pool::maintain::MaintainPoolConfig { + max_tx_lifetime: pool_config.max_queued_lifetime, + no_local_exemptions: pool_config.local_transactions_config.no_exemptions, + ..Default::default() + }, + ), + ); + + Ok(()) +} + +/// Spawn all maintenance tasks for a transaction pool (backup + main maintenance). +fn spawn_maintenance_tasks( + ctx: &BuilderContext, + pool: Pool, + pool_config: &PoolConfig, +) -> eyre::Result<()> +where + Node: FullNodeTypes, + Pool: reth_transaction_pool::TransactionPoolExt + Clone + 'static, + Pool::Transaction: PoolTransaction>, +{ + spawn_local_backup_task(ctx, pool.clone())?; + spawn_pool_maintenance_task(ctx, pool, pool_config)?; + Ok(()) +} + +impl std::fmt::Debug for TxPoolBuilder<'_, Node, V> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("TxPoolBuilder").field("validator", &self.validator).finish() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use reth_transaction_pool::PoolConfig; + + #[test] + fn test_pool_builder_config_overrides_apply() { + let base_config = PoolConfig::default(); + let overrides = PoolBuilderConfigOverrides { + pending_limit: Some(SubPoolLimit::default()), + max_account_slots: Some(100), + minimal_protocol_basefee: Some(1000), + ..Default::default() + }; + + let updated_config = overrides.apply(base_config); + assert_eq!(updated_config.max_account_slots, 100); + assert_eq!(updated_config.minimal_protocol_basefee, 1000); + } + + #[test] + fn test_pool_builder_config_overrides_default() { + let overrides = PoolBuilderConfigOverrides::default(); + assert!(overrides.pending_limit.is_none()); + assert!(overrides.max_account_slots.is_none()); + assert!(overrides.local_addresses.is_empty()); + } +} diff --git a/crates/node/builder/src/engine_api_ext.rs b/crates/node/builder/src/engine_api_ext.rs new file mode 100644 index 00000000000..936a2e19051 --- /dev/null +++ b/crates/node/builder/src/engine_api_ext.rs @@ -0,0 +1,45 @@ +//! `EngineApiBuilder` callback wrapper +//! +//! Wraps an `EngineApiBuilder` to provide access to the built Engine API instance. + +use crate::rpc::EngineApiBuilder; +use eyre::Result; +use reth_node_api::{AddOnsContext, FullNodeComponents}; +use reth_rpc_api::IntoEngineApiRpcModule; + +/// Provides access to an `EngineApi` instance with a callback +#[derive(Debug)] +pub struct EngineApiExt { + /// The inner builder that constructs the actual `EngineApi` + inner: B, + /// Optional callback function to execute with the built API + callback: Option, +} + +impl EngineApiExt { + /// Creates a new wrapper that calls `callback` when the API is built. + pub const fn new(inner: B, callback: F) -> Self { + Self { inner, callback: Some(callback) } + } +} + +impl EngineApiBuilder for EngineApiExt +where + B: EngineApiBuilder, + N: FullNodeComponents, + B::EngineApi: IntoEngineApiRpcModule + Send + Sync + Clone + 'static, + F: FnOnce(B::EngineApi) + Send + Sync + 'static, +{ + type EngineApi = B::EngineApi; + + /// Builds the `EngineApi` and executes the callback if present. + async fn build_engine_api(mut self, ctx: &AddOnsContext<'_, N>) -> Result { + let api = self.inner.build_engine_api(ctx).await?; + + if let Some(callback) = self.callback.take() { + callback(api.clone()); + } + + Ok(api) + } +} diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 342f1047274..fb289886e36 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -427,6 +427,7 @@ where NoopEvmConfig::::default(), self.toml_config().stages.clone(), self.prune_modes(), + None, )) .build( factory.clone(), diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index 288cafe783e..025aaacf8f8 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -4,7 +4,7 @@ use alloy_consensus::BlockHeader; use futures::{future::Either, stream, stream_select, StreamExt}; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_db_api::{database_metrics::DatabaseMetrics, Database}; -use reth_engine_local::{LocalEngineService, LocalPayloadAttributesBuilder}; +use reth_engine_local::{LocalMiner, LocalPayloadAttributesBuilder}; use reth_engine_service::service::{ChainEvent, EngineService}; use reth_engine_tree::{ engine::{EngineApiRequest, EngineRequestHandler}, @@ -19,12 +19,14 @@ use reth_node_api::{ PayloadAttributesBuilder, PayloadTypes, }; use reth_node_core::{ + args::DefaultEraHost, dirs::{ChainPath, DataDirPath}, exit::NodeExitFuture, primitives::Head, }; use reth_node_events::{cl::ConsensusLayerHealthEvents, node}; use reth_provider::providers::{BlockchainProvider, NodeTypesForProvider}; +use reth_stages::stages::EraImportSource; use reth_tasks::TaskExecutor; use reth_tokio_util::EventSender; use reth_tracing::tracing::{debug, error, info}; @@ -159,6 +161,18 @@ where // Configure the pipeline let pipeline_exex_handle = exex_manager_handle.clone().unwrap_or_else(ExExManagerHandle::empty); + + let era_import_source = if node_config.era.enabled { + EraImportSource::maybe_new( + node_config.era.source.path.clone(), + node_config.era.source.url.clone(), + || node_config.chain.chain().kind().default_era_host(), + || node_config.datadir().data_dir().join("era").into(), + ) + } else { + None + }; + let pipeline = build_networked_pipeline( &ctx.toml_config().stages, network_client.clone(), @@ -171,6 +185,7 @@ where static_file_producer, ctx.components().evm_config().clone(), pipeline_exex_handle, + era_import_source, )?; // The new engine writes directly to static files. This ensures that they're up to the tip. @@ -218,46 +233,37 @@ where // during this run. .maybe_store_messages(node_config.debug.engine_api_store.clone()); - let mut engine_service = if ctx.is_dev() { - let eth_service = LocalEngineService::new( - consensus.clone(), - ctx.provider_factory().clone(), - ctx.blockchain_db().clone(), - pruner, - ctx.components().payload_builder_handle().clone(), - engine_payload_validator, - engine_tree_config, - ctx.invalid_block_hook()?, - ctx.sync_metrics_tx(), - consensus_engine_tx.clone(), - Box::pin(consensus_engine_stream), - ctx.dev_mining_mode(ctx.components().pool()), - LocalPayloadAttributesBuilder::new(ctx.chain_spec()), - ctx.components().evm_config().clone(), - ); + let mut engine_service = EngineService::new( + consensus.clone(), + ctx.chain_spec(), + network_client.clone(), + Box::pin(consensus_engine_stream), + pipeline, + Box::new(ctx.task_executor().clone()), + ctx.provider_factory().clone(), + ctx.blockchain_db().clone(), + pruner, + ctx.components().payload_builder_handle().clone(), + engine_payload_validator, + engine_tree_config, + ctx.invalid_block_hook()?, + ctx.sync_metrics_tx(), + ctx.components().evm_config().clone(), + ); - Either::Left(eth_service) - } else { - let eth_service = EngineService::new( - consensus.clone(), - ctx.chain_spec(), - network_client.clone(), - Box::pin(consensus_engine_stream), - pipeline, - Box::new(ctx.task_executor().clone()), - ctx.provider_factory().clone(), - ctx.blockchain_db().clone(), - pruner, - ctx.components().payload_builder_handle().clone(), - engine_payload_validator, - engine_tree_config, - ctx.invalid_block_hook()?, - ctx.sync_metrics_tx(), - ctx.components().evm_config().clone(), + if ctx.is_dev() { + ctx.task_executor().spawn_critical( + "local engine", + LocalMiner::new( + ctx.blockchain_db().clone(), + LocalPayloadAttributesBuilder::new(ctx.chain_spec()), + beacon_engine_handle.clone(), + ctx.dev_mining_mode(ctx.components().pool()), + ctx.components().payload_builder_handle().clone(), + ) + .run(), ); - - Either::Right(eth_service) - }; + } info!(target: "reth::cli", "Consensus engine initialized"); @@ -306,9 +312,7 @@ where ctx.task_executor().spawn_critical("consensus engine", async move { if let Some(initial_target) = initial_target { debug!(target: "reth::cli", %initial_target, "start backfill sync"); - if let Either::Right(eth_service) = &mut engine_service { - eth_service.orchestrator_mut().start_backfill_sync(initial_target); - } + engine_service.orchestrator_mut().start_backfill_sync(initial_target); } let mut res = Ok(()); @@ -319,9 +323,7 @@ where payload = built_payloads.select_next_some() => { if let Some(executed_block) = payload.executed_block() { debug!(target: "reth::cli", block=?executed_block.recovered_block().num_hash(), "inserting built payload"); - if let Either::Right(eth_service) = &mut engine_service { - eth_service.orchestrator_mut().handler_mut().handler_mut().on_event(EngineApiRequest::InsertExecutedBlock(executed_block).into()); - } + engine_service.orchestrator_mut().handler_mut().handler_mut().on_event(EngineApiRequest::InsertExecutedBlock(executed_block).into()); } } event = engine_service.next() => { diff --git a/crates/node/builder/src/lib.rs b/crates/node/builder/src/lib.rs index d2e630231b3..b29e2c09dc0 100644 --- a/crates/node/builder/src/lib.rs +++ b/crates/node/builder/src/lib.rs @@ -18,6 +18,10 @@ pub mod hooks; pub mod node; pub use node::*; +/// Support for accessing the EngineApi outside the RPC server context. +mod engine_api_ext; +pub use engine_api_ext::EngineApiExt; + /// Support for configuring the components of a node. pub mod components; pub use components::{NodeComponents, NodeComponentsBuilder}; diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index 98e8c01216a..90d9d7b8ac5 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -3,7 +3,6 @@ use crate::{BeaconConsensusEngineEvent, BeaconConsensusEngineHandle}; use alloy_rpc_types::engine::ClientVersionV1; use alloy_rpc_types_engine::ExecutionData; -use futures::TryFutureExt; use jsonrpsee::RpcModule; use reth_chain_state::CanonStateSubscriptions; use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; @@ -21,7 +20,7 @@ use reth_rpc_api::{eth::helpers::AddDevSigners, IntoEngineApiRpcModule}; use reth_rpc_builder::{ auth::{AuthRpcModule, AuthServerHandle}, config::RethRpcServerConfig, - RpcModuleBuilder, RpcRegistryInner, RpcServerHandle, TransportRpcModules, + RpcModuleBuilder, RpcRegistryInner, RpcServerConfig, RpcServerHandle, TransportRpcModules, }; use reth_rpc_engine_api::{capabilities::EngineCapabilities, EngineApi}; use reth_rpc_eth_types::{cache::cache_new_blocks_task, EthConfig, EthStateCache}; @@ -228,6 +227,17 @@ where } } +/// Helper container for the parameters commonly passed to RPC module extension functions. +#[expect(missing_debug_implementations)] +pub struct RpcModuleContainer<'a, Node: FullNodeComponents, EthApi: EthApiTypes> { + /// Holds installed modules per transport type. + pub modules: &'a mut TransportRpcModules, + /// Holds jwt authenticated rpc module. + pub auth_module: &'a mut AuthRpcModule, + /// A Helper type the holds instances of the configured modules. + pub registry: &'a mut RpcRegistry, +} + /// Helper container to encapsulate [`RpcRegistryInner`], [`TransportRpcModules`] and /// [`AuthRpcModule`]. /// @@ -357,6 +367,55 @@ impl RpcHandleProvider { + /// Handle to the RPC server + pub rpc_server_handle: RpcServerHandle, + /// Configured RPC modules. + pub rpc_registry: RpcRegistry, + /// Notification channel for engine API events + pub engine_events: + EventSender::Primitives>>, + /// Handle to the consensus engine. + pub engine_handle: BeaconConsensusEngineHandle<::Payload>, +} + +/// Handle returned when only the authenticated Engine API server is launched. +/// +/// This handle provides access to the Engine API server and registry, but does not +/// include the regular RPC servers (HTTP/WS/IPC). Use this for specialized setups +/// that only need Engine API functionality. +#[derive(Debug, Clone)] +pub struct AuthServerOnlyHandle { + /// Handle to the auth server (engine API) + pub auth_server_handle: AuthServerHandle, + /// Configured RPC modules. + pub rpc_registry: RpcRegistry, + /// Notification channel for engine API events + pub engine_events: + EventSender::Primitives>>, + /// Handle to the consensus engine. + pub engine_handle: BeaconConsensusEngineHandle<::Payload>, +} + +/// Internal context struct for RPC setup shared between different launch methods +struct RpcSetupContext<'a, Node: FullNodeComponents, EthApi: EthApiTypes> { + node: Node, + config: &'a NodeConfig<::ChainSpec>, + modules: TransportRpcModules, + auth_module: AuthRpcModule, + auth_config: reth_rpc_builder::auth::AuthServerConfig, + registry: RpcRegistry, + on_rpc_started: Box>, + engine_events: EventSender::Primitives>>, + engine_handle: BeaconConsensusEngineHandle<::Payload>, +} + /// Node add-ons containing RPC server configuration, with customizable eth API handler. /// /// This struct can be used to provide the RPC server functionality. It is responsible for launching @@ -475,6 +534,55 @@ where EV: EngineValidatorBuilder, EB: EngineApiBuilder, { + /// Launches only the regular RPC server (HTTP/WS/IPC), without the authenticated Engine API + /// server. + /// + /// This is useful when you only need the regular RPC functionality and want to avoid + /// starting the auth server. + pub async fn launch_rpc_server( + self, + ctx: AddOnsContext<'_, N>, + ext: F, + ) -> eyre::Result> + where + F: FnOnce(RpcModuleContainer<'_, N, EthB::EthApi>) -> eyre::Result<()>, + { + let setup_ctx = self.setup_rpc_components(ctx, ext).await?; + let RpcSetupContext { + node, + config, + mut modules, + mut auth_module, + auth_config: _, + mut registry, + on_rpc_started, + engine_events, + engine_handle, + } = setup_ctx; + + let server_config = config.rpc.rpc_server_config(); + let rpc_server_handle = Self::launch_rpc_server_internal(server_config, &modules).await?; + + let handles = + RethRpcServerHandles { rpc: rpc_server_handle.clone(), auth: AuthServerHandle::noop() }; + Self::finalize_rpc_setup( + &mut registry, + &mut modules, + &mut auth_module, + &node, + config, + on_rpc_started, + handles, + )?; + + Ok(RpcServerOnlyHandle { + rpc_server_handle, + rpc_registry: registry, + engine_events, + engine_handle, + }) + } + /// Launches the RPC servers with the given context and an additional hook for extending /// modules. pub async fn launch_add_ons_with( @@ -483,11 +591,59 @@ where ext: F, ) -> eyre::Result> where - F: FnOnce( - &mut TransportRpcModules, - &mut AuthRpcModule, - &mut RpcRegistry, - ) -> eyre::Result<()>, + F: FnOnce(RpcModuleContainer<'_, N, EthB::EthApi>) -> eyre::Result<()>, + { + let setup_ctx = self.setup_rpc_components(ctx, ext).await?; + let RpcSetupContext { + node, + config, + mut modules, + mut auth_module, + auth_config, + mut registry, + on_rpc_started, + engine_events, + engine_handle, + } = setup_ctx; + + let server_config = config.rpc.rpc_server_config(); + let auth_module_clone = auth_module.clone(); + + // launch servers concurrently + let (rpc, auth) = futures::future::try_join( + Self::launch_rpc_server_internal(server_config, &modules), + Self::launch_auth_server_internal(auth_module_clone, auth_config), + ) + .await?; + + let handles = RethRpcServerHandles { rpc, auth }; + + Self::finalize_rpc_setup( + &mut registry, + &mut modules, + &mut auth_module, + &node, + config, + on_rpc_started, + handles.clone(), + )?; + + Ok(RpcHandle { + rpc_server_handles: handles, + rpc_registry: registry, + engine_events, + beacon_engine_handle: engine_handle, + }) + } + + /// Common setup for RPC server initialization + async fn setup_rpc_components<'a, F>( + self, + ctx: AddOnsContext<'a, N>, + ext: F, + ) -> eyre::Result> + where + F: FnOnce(RpcModuleContainer<'_, N, EthB::EthApi>) -> eyre::Result<()>, { let Self { eth_api_builder, engine_api_builder, hooks, .. } = self; @@ -543,55 +699,78 @@ where let RpcHooks { on_rpc_started, extend_rpc_modules } = hooks; - ext(ctx.modules, ctx.auth_module, ctx.registry)?; + ext(RpcModuleContainer { + modules: ctx.modules, + auth_module: ctx.auth_module, + registry: ctx.registry, + })?; extend_rpc_modules.extend_rpc_modules(ctx)?; - let server_config = config.rpc.rpc_server_config(); - let cloned_modules = modules.clone(); - let launch_rpc = server_config.start(&cloned_modules).map_ok(|handle| { - if let Some(path) = handle.ipc_endpoint() { - info!(target: "reth::cli", %path, "RPC IPC server started"); - } - if let Some(addr) = handle.http_local_addr() { - info!(target: "reth::cli", url=%addr, "RPC HTTP server started"); - } - if let Some(addr) = handle.ws_local_addr() { - info!(target: "reth::cli", url=%addr, "RPC WS server started"); - } - handle - }); - - let launch_auth = auth_module.clone().start_server(auth_config).map_ok(|handle| { - let addr = handle.local_addr(); - if let Some(ipc_endpoint) = handle.ipc_endpoint() { - info!(target: "reth::cli", url=%addr, ipc_endpoint=%ipc_endpoint, "RPC auth server started"); - } else { - info!(target: "reth::cli", url=%addr, "RPC auth server started"); - } - handle - }); - - // launch servers concurrently - let (rpc, auth) = futures::future::try_join(launch_rpc, launch_auth).await?; + Ok(RpcSetupContext { + node, + config, + modules, + auth_module, + auth_config, + registry, + on_rpc_started, + engine_events, + engine_handle: beacon_engine_handle, + }) + } - let handles = RethRpcServerHandles { rpc, auth }; + /// Helper to launch the RPC server + async fn launch_rpc_server_internal( + server_config: RpcServerConfig, + modules: &TransportRpcModules, + ) -> eyre::Result { + let handle = server_config.start(modules).await?; - let ctx = RpcContext { - node: node.clone(), - config, - registry: &mut registry, - modules: &mut modules, - auth_module: &mut auth_module, - }; + if let Some(path) = handle.ipc_endpoint() { + info!(target: "reth::cli", %path, "RPC IPC server started"); + } + if let Some(addr) = handle.http_local_addr() { + info!(target: "reth::cli", url=%addr, "RPC HTTP server started"); + } + if let Some(addr) = handle.ws_local_addr() { + info!(target: "reth::cli", url=%addr, "RPC WS server started"); + } - on_rpc_started.on_rpc_started(ctx, handles.clone())?; + Ok(handle) + } + + /// Helper to launch the auth server + async fn launch_auth_server_internal( + auth_module: AuthRpcModule, + auth_config: reth_rpc_builder::auth::AuthServerConfig, + ) -> eyre::Result { + auth_module.start_server(auth_config) + .await + .map_err(Into::into) + .inspect(|handle| { + let addr = handle.local_addr(); + if let Some(ipc_endpoint) = handle.ipc_endpoint() { + info!(target: "reth::cli", url=%addr, ipc_endpoint=%ipc_endpoint, "RPC auth server started"); + } else { + info!(target: "reth::cli", url=%addr, "RPC auth server started"); + } + }) + } + + /// Helper to finalize RPC setup by creating context and calling hooks + fn finalize_rpc_setup( + registry: &mut RpcRegistry, + modules: &mut TransportRpcModules, + auth_module: &mut AuthRpcModule, + node: &N, + config: &NodeConfig<::ChainSpec>, + on_rpc_started: Box>, + handles: RethRpcServerHandles, + ) -> eyre::Result<()> { + let ctx = RpcContext { node: node.clone(), config, registry, modules, auth_module }; - Ok(RpcHandle { - rpc_server_handles: handles, - rpc_registry: registry, - engine_events, - beacon_engine_handle, - }) + on_rpc_started.on_rpc_started(ctx, handles)?; + Ok(()) } } @@ -606,7 +785,7 @@ where type Handle = RpcHandle; async fn launch_add_ons(self, ctx: AddOnsContext<'_, N>) -> eyre::Result { - self.launch_add_ons_with(ctx, |_, _, _| Ok(())).await + self.launch_add_ons_with(ctx, |_| Ok(())).await } } diff --git a/crates/node/builder/src/setup.rs b/crates/node/builder/src/setup.rs index 255a844c93f..a4099691191 100644 --- a/crates/node/builder/src/setup.rs +++ b/crates/node/builder/src/setup.rs @@ -17,7 +17,11 @@ use reth_network_p2p::{ }; use reth_node_api::HeaderTy; use reth_provider::{providers::ProviderNodeTypes, ProviderFactory}; -use reth_stages::{prelude::DefaultStages, stages::ExecutionStage, Pipeline, StageSet}; +use reth_stages::{ + prelude::DefaultStages, + stages::{EraImportSource, ExecutionStage}, + Pipeline, StageSet, +}; use reth_static_file::StaticFileProducer; use reth_tasks::TaskExecutor; use reth_tracing::tracing::debug; @@ -37,6 +41,7 @@ pub fn build_networked_pipeline( static_file_producer: StaticFileProducer>, evm_config: Evm, exex_manager_handle: ExExManagerHandle, + era_import_source: Option, ) -> eyre::Result> where N: ProviderNodeTypes, @@ -64,6 +69,7 @@ where static_file_producer, evm_config, exex_manager_handle, + era_import_source, )?; Ok(pipeline) @@ -83,6 +89,7 @@ pub fn build_pipeline( static_file_producer: StaticFileProducer>, evm_config: Evm, exex_manager_handle: ExExManagerHandle, + era_import_source: Option, ) -> eyre::Result> where N: ProviderNodeTypes, @@ -114,6 +121,7 @@ where evm_config.clone(), stage_config.clone(), prune_modes, + era_import_source, ) .set(ExecutionStage::new( evm_config, diff --git a/crates/node/core/Cargo.toml b/crates/node/core/Cargo.toml index 677fdb7980e..3aff3175717 100644 --- a/crates/node/core/Cargo.toml +++ b/crates/node/core/Cargo.toml @@ -52,6 +52,7 @@ toml.workspace = true serde.workspace = true strum = { workspace = true, features = ["derive"] } thiserror.workspace = true +url.workspace = true # io dirs-next.workspace = true diff --git a/crates/node/core/src/args/engine.rs b/crates/node/core/src/args/engine.rs index 783e37dcfa6..8c03e42d9f2 100644 --- a/crates/node/core/src/args/engine.rs +++ b/crates/node/core/src/args/engine.rs @@ -25,8 +25,9 @@ pub struct EngineArgs { pub legacy_state_root_task_enabled: bool, /// CAUTION: This CLI flag has no effect anymore, use --engine.disable-caching-and-prewarming - /// if you want to disable caching and prewarming. - #[arg(long = "engine.caching-and-prewarming", default_value = "true")] + /// if you want to disable caching and prewarming + #[arg(long = "engine.caching-and-prewarming", default_value = "true", hide = true)] + #[deprecated] pub caching_and_prewarming_enabled: bool, /// Disable cross-block caching and parallel prewarming @@ -60,11 +61,33 @@ pub struct EngineArgs { #[arg(long = "engine.reserved-cpu-cores", default_value_t = DEFAULT_RESERVED_CPU_CORES)] pub reserved_cpu_cores: usize, - /// Enable precompile cache - #[arg(long = "engine.precompile-cache", default_value = "false")] + /// CAUTION: This CLI flag has no effect anymore, use --engine.disable-precompile-cache + /// if you want to disable precompile cache + #[arg(long = "engine.precompile-cache", default_value = "true", hide = true)] + #[deprecated] pub precompile_cache_enabled: bool, + + /// Disable precompile cache + #[arg(long = "engine.disable-precompile-cache", default_value = "false")] + pub precompile_cache_disabled: bool, + + /// Enable state root fallback, useful for testing + #[arg(long = "engine.state-root-fallback", default_value = "false")] + pub state_root_fallback: bool, + + /// Always process payload attributes and begin a payload build process even if + /// `forkchoiceState.headBlockHash` is already the canonical head or an ancestor. See + /// `TreeConfig::always_process_payload_attributes_on_canonical_head` for more details. + /// + /// Note: This is a no-op on OP Stack. + #[arg( + long = "engine.always-process-payload-attributes-on-canonical-head", + default_value = "false" + )] + pub always_process_payload_attributes_on_canonical_head: bool, } +#[allow(deprecated)] impl Default for EngineArgs { fn default() -> Self { Self { @@ -79,7 +102,10 @@ impl Default for EngineArgs { accept_execution_requests_hash: false, max_proof_task_concurrency: DEFAULT_MAX_PROOF_TASK_CONCURRENCY, reserved_cpu_cores: DEFAULT_RESERVED_CPU_CORES, - precompile_cache_enabled: false, + precompile_cache_enabled: true, + precompile_cache_disabled: false, + state_root_fallback: false, + always_process_payload_attributes_on_canonical_head: false, } } } @@ -97,7 +123,11 @@ impl EngineArgs { .with_cross_block_cache_size(self.cross_block_cache_size * 1024 * 1024) .with_max_proof_task_concurrency(self.max_proof_task_concurrency) .with_reserved_cpu_cores(self.reserved_cpu_cores) - .with_precompile_cache_enabled(self.precompile_cache_enabled) + .without_precompile_cache(self.precompile_cache_disabled) + .with_state_root_fallback(self.state_root_fallback) + .with_always_process_payload_attributes_on_canonical_head( + self.always_process_payload_attributes_on_canonical_head, + ) } } diff --git a/crates/node/core/src/args/era.rs b/crates/node/core/src/args/era.rs new file mode 100644 index 00000000000..84e835c370a --- /dev/null +++ b/crates/node/core/src/args/era.rs @@ -0,0 +1,62 @@ +use clap::Args; +use reth_chainspec::{ChainKind, NamedChain}; +use std::path::Path; +use url::Url; + +/// Syncs ERA1 encoded blocks from a local or remote source. +#[derive(Clone, Debug, Default, Args)] +pub struct EraArgs { + /// Enable import from ERA1 files. + #[arg( + id = "era.enable", + long = "era.enable", + value_name = "ERA_ENABLE", + default_value_t = false + )] + pub enabled: bool, + + /// Describes where to get the ERA files to import from. + #[clap(flatten)] + pub source: EraSourceArgs, +} + +/// Arguments for the block history import based on ERA1 encoded files. +#[derive(Clone, Debug, Default, Args)] +#[group(required = false, multiple = false)] +pub struct EraSourceArgs { + /// The path to a directory for import. + /// + /// The ERA1 files are read from the local directory parsing headers and bodies. + #[arg(long = "era.path", value_name = "ERA_PATH", verbatim_doc_comment)] + pub path: Option>, + + /// The URL to a remote host where the ERA1 files are hosted. + /// + /// The ERA1 files are read from the remote host using HTTP GET requests parsing headers + /// and bodies. + #[arg(long = "era.url", value_name = "ERA_URL", verbatim_doc_comment)] + pub url: Option, +} + +/// The `ExtractEraHost` trait allows to derive a default URL host for ERA files. +pub trait DefaultEraHost { + /// Converts `self` into [`Url`] index page of the ERA host. + /// + /// Returns `Err` if the conversion is not possible. + fn default_era_host(&self) -> Option; +} + +impl DefaultEraHost for ChainKind { + fn default_era_host(&self) -> Option { + Some(match self { + Self::Named(NamedChain::Mainnet) => { + Url::parse("https://era.ithaca.xyz/era1/index.html").expect("URL should be valid") + } + Self::Named(NamedChain::Sepolia) => { + Url::parse("https://era.ithaca.xyz/sepolia-era1/index.html") + .expect("URL should be valid") + } + _ => return None, + }) + } +} diff --git a/crates/node/core/src/args/mod.rs b/crates/node/core/src/args/mod.rs index 3a5e55ce292..6799fe418dc 100644 --- a/crates/node/core/src/args/mod.rs +++ b/crates/node/core/src/args/mod.rs @@ -64,5 +64,9 @@ pub use engine::EngineArgs; mod ress_args; pub use ress_args::RessArgs; +/// `EraArgs` for configuring ERA files import. +mod era; +pub use era::{DefaultEraHost, EraArgs, EraSourceArgs}; + mod error; pub mod types; diff --git a/crates/node/core/src/args/network.rs b/crates/node/core/src/args/network.rs index c11e927677f..2f5908aaf46 100644 --- a/crates/node/core/src/args/network.rs +++ b/crates/node/core/src/args/network.rs @@ -441,7 +441,7 @@ impl DiscoveryArgs { network_config_builder = network_config_builder.disable_nat(); } - if !self.disable_discovery && self.enable_discv5_discovery { + if self.should_enable_discv5() { network_config_builder = network_config_builder .discovery_v5(self.discovery_v5_builder(rlpx_tcp_socket, boot_nodes)); } @@ -490,6 +490,17 @@ impl DiscoveryArgs { .bootstrap_lookup_countdown(*discv5_bootstrap_lookup_countdown) } + /// Returns true if discv5 discovery should be configured + const fn should_enable_discv5(&self) -> bool { + if self.disable_discovery { + return false; + } + + self.enable_discv5_discovery || + self.discv5_addr.is_some() || + self.discv5_addr_ipv6.is_some() + } + /// Set the discovery port to zero, to allow the OS to assign a random unused port when /// discovery binds to the socket. pub const fn with_unused_discovery_port(mut self) -> Self { diff --git a/crates/node/core/src/cli/config.rs b/crates/node/core/src/cli/config.rs index cbdeccd649a..ca9ebedcc5d 100644 --- a/crates/node/core/src/cli/config.rs +++ b/crates/node/core/src/cli/config.rs @@ -42,9 +42,9 @@ pub trait PayloadBuilderConfig { } match chain.kind() { - ChainKind::Named(NamedChain::Sepolia | NamedChain::Holesky | NamedChain::Hoodi) => { - ETHEREUM_BLOCK_GAS_LIMIT_60M - } + ChainKind::Named( + NamedChain::Mainnet | NamedChain::Sepolia | NamedChain::Holesky | NamedChain::Hoodi, + ) => ETHEREUM_BLOCK_GAS_LIMIT_60M, _ => ETHEREUM_BLOCK_GAS_LIMIT_36M, } } diff --git a/crates/node/core/src/node_config.rs b/crates/node/core/src/node_config.rs index a3bd7e1dc78..e94256556cf 100644 --- a/crates/node/core/src/node_config.rs +++ b/crates/node/core/src/node_config.rs @@ -31,6 +31,7 @@ use std::{ }; use tracing::*; +use crate::args::EraArgs; pub use reth_engine_primitives::{ DEFAULT_MAX_PROOF_TASK_CONCURRENCY, DEFAULT_MEMORY_BLOCK_BUFFER_TARGET, DEFAULT_RESERVED_CPU_CORES, @@ -148,6 +149,9 @@ pub struct NodeConfig { /// All engine related arguments pub engine: EngineArgs, + + /// All ERA import related arguments with --era prefix + pub era: EraArgs, } impl NodeConfig { @@ -177,6 +181,7 @@ impl NodeConfig { pruning: PruningArgs::default(), datadir: DatadirArgs::default(), engine: EngineArgs::default(), + era: EraArgs::default(), } } @@ -479,6 +484,7 @@ impl NodeConfig { dev: self.dev, pruning: self.pruning, engine: self.engine, + era: self.era, } } } @@ -506,6 +512,7 @@ impl Clone for NodeConfig { pruning: self.pruning.clone(), datadir: self.datadir.clone(), engine: self.engine.clone(), + era: self.era.clone(), } } } diff --git a/crates/node/events/src/node.rs b/crates/node/events/src/node.rs index cb54bb70396..10173bafdda 100644 --- a/crates/node/events/src/node.rs +++ b/crates/node/events/src/node.rs @@ -280,6 +280,9 @@ impl NodeState { BeaconConsensusEngineEvent::InvalidBlock(block) => { warn!(number=block.number(), hash=?block.hash(), "Encountered invalid block"); } + BeaconConsensusEngineEvent::BlockReceived(num_hash) => { + info!(number=num_hash.number, hash=?num_hash.hash, "Received block from consensus engine"); + } } } diff --git a/crates/node/metrics/src/server.rs b/crates/node/metrics/src/server.rs index 96918f90f1a..96a466f7f65 100644 --- a/crates/node/metrics/src/server.rs +++ b/crates/node/metrics/src/server.rs @@ -59,7 +59,7 @@ impl MetricServer { task_executor.clone(), ) .await - .wrap_err("Could not start Prometheus endpoint")?; + .wrap_err_with(|| format!("Could not start Prometheus endpoint at {listen_addr}"))?; // Describe metrics after recorder installation describe_db_metrics(); diff --git a/crates/optimism/chainspec/src/superchain/chain_spec_macro.rs b/crates/optimism/chainspec/src/superchain/chain_spec_macro.rs index 61c58a88f16..65fdb5dfaae 100644 --- a/crates/optimism/chainspec/src/superchain/chain_spec_macro.rs +++ b/crates/optimism/chainspec/src/superchain/chain_spec_macro.rs @@ -73,7 +73,6 @@ macro_rules! create_superchain_specs { /// All supported superchains, including both older and newer naming, /// for backwards compatibility pub const SUPPORTED_CHAINS: &'static [&'static str] = &[ - "dev", "optimism", "optimism_sepolia", "optimism-sepolia", @@ -83,6 +82,7 @@ macro_rules! create_superchain_specs { $( $crate::key_for!($name, $env), )+ + "dev", ]; /// Parses the chain into an [`$crate::OpChainSpec`], if recognized. diff --git a/crates/optimism/cli/src/commands/init_state.rs b/crates/optimism/cli/src/commands/init_state.rs index 850f2b65bfa..da574239d5b 100644 --- a/crates/optimism/cli/src/commands/init_state.rs +++ b/crates/optimism/cli/src/commands/init_state.rs @@ -1,5 +1,6 @@ //! Command that initializes the node from a genesis file. +use alloy_consensus::Header; use clap::Parser; use reth_cli::chainspec::ChainSpecParser; use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment}; @@ -58,6 +59,7 @@ impl> InitStateCommandOp { &provider_rw, SealedHeader::new(BEDROCK_HEADER, BEDROCK_HEADER_HASH), BEDROCK_HEADER_TTD, + |number| Header { number, ..Default::default() }, )?; // SAFETY: it's safe to commit static files, since in the event of a crash, they diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index 9764c921824..9acef67dabe 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -27,8 +27,8 @@ op-alloy-consensus.workspace = true alloy-consensus.workspace = true # Optimism -reth-optimism-consensus.workspace = true reth-optimism-chainspec.workspace = true +reth-optimism-consensus.workspace = true reth-optimism-forks.workspace = true reth-optimism-primitives.workspace = true @@ -42,7 +42,6 @@ thiserror.workspace = true [dev-dependencies] reth-evm = { workspace = true, features = ["test-utils"] } reth-revm = { workspace = true, features = ["test-utils"] } -reth-optimism-chainspec.workspace = true alloy-genesis.workspace = true alloy-consensus.workspace = true reth-optimism-primitives = { workspace = true, features = ["arbitrary"] } diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index 43ffae09125..841c5e4603d 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -5,7 +5,7 @@ pub type OpExecutorProvider = crate::OpEvmConfig; #[cfg(test)] mod tests { - use crate::{OpChainSpec, OpEvmConfig, OpRethReceiptBuilder}; + use crate::{OpEvmConfig, OpRethReceiptBuilder}; use alloc::sync::Arc; use alloy_consensus::{Block, BlockBody, Header, SignableTransaction, TxEip1559}; use alloy_primitives::{b256, Address, Signature, StorageKey, StorageValue, U256}; @@ -13,7 +13,7 @@ mod tests { use op_revm::constants::L1_BLOCK_CONTRACT; use reth_chainspec::MIN_TRANSACTION_GAS; use reth_evm::{execute::Executor, ConfigureEvm}; - use reth_optimism_chainspec::OpChainSpecBuilder; + use reth_optimism_chainspec::{OpChainSpec, OpChainSpecBuilder}; use reth_optimism_primitives::{OpReceipt, OpTransactionSigned}; use reth_primitives_traits::{Account, RecoveredBlock}; use reth_revm::{database::StateProviderDatabase, test_utils::StateProviderTest}; diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index 9177e1a28a1..523bd49de79 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -72,14 +72,14 @@ impl Clone for OpEvmConfig OpEvmConfig { +impl OpEvmConfig { /// Creates a new [`OpEvmConfig`] with the given chain spec for OP chains. pub fn optimism(chain_spec: Arc) -> Self { Self::new(chain_spec, OpRethReceiptBuilder::default()) } } -impl OpEvmConfig { +impl OpEvmConfig { /// Creates a new [`OpEvmConfig`] with the given chain spec. pub fn new(chain_spec: Arc, receipt_builder: R) -> Self { Self { @@ -227,7 +227,7 @@ mod tests { use reth_execution_types::{ AccountRevertInit, BundleStateInit, Chain, ExecutionOutcome, RevertsInit, }; - use reth_optimism_chainspec::BASE_MAINNET; + use reth_optimism_chainspec::{OpChainSpec, BASE_MAINNET}; use reth_optimism_primitives::{OpBlock, OpPrimitives, OpReceipt}; use reth_primitives_traits::{Account, RecoveredBlock}; use revm::{ diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 718fc5358ee..63de9ec3291 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -37,6 +37,7 @@ reth-rpc-api.workspace = true reth-optimism-payload-builder.workspace = true reth-optimism-evm.workspace = true reth-optimism-rpc.workspace = true +reth-optimism-storage.workspace = true reth-optimism-txpool.workspace = true reth-optimism-chainspec.workspace = true reth-optimism-consensus = { workspace = true, features = ["std"] } @@ -94,9 +95,7 @@ asm-keccak = [ "reth-optimism-node/asm-keccak", "reth-node-core/asm-keccak", ] -js-tracer = [ - "reth-node-builder/js-tracer", -] +js-tracer = ["reth-node-builder/js-tracer"] test-utils = [ "reth-tasks", "reth-e2e-test-utils", @@ -119,6 +118,4 @@ test-utils = [ "reth-primitives-traits/test-utils", "reth-trie-common/test-utils", ] -reth-codec = [ - "reth-optimism-primitives/reth-codec", -] +reth-codec = ["reth-optimism-primitives/reth-codec"] diff --git a/crates/optimism/node/src/args.rs b/crates/optimism/node/src/args.rs index 703313aabcd..3276abf2e78 100644 --- a/crates/optimism/node/src/args.rs +++ b/crates/optimism/node/src/args.rs @@ -59,6 +59,10 @@ pub struct RollupArgs { /// Optional headers to use when connecting to the sequencer. #[arg(long = "rollup.sequencer-headers", requires = "sequencer")] pub sequencer_headers: Vec, + + /// Minimum suggested priority fee (tip) in wei, default `1_000_000` + #[arg(long, default_value_t = 1_000_000)] + pub min_suggested_priority_fee: u64, } impl Default for RollupArgs { @@ -73,6 +77,7 @@ impl Default for RollupArgs { supervisor_http: DEFAULT_SUPERVISOR_URL.to_string(), supervisor_safety_level: SafetyLevel::CrossUnsafe, sequencer_headers: Vec::new(), + min_suggested_priority_fee: 1_000_000, } } } diff --git a/crates/optimism/node/src/engine.rs b/crates/optimism/node/src/engine.rs index 0e17d31b787..bba734ae8fd 100644 --- a/crates/optimism/node/src/engine.rs +++ b/crates/optimism/node/src/engine.rs @@ -5,7 +5,6 @@ use op_alloy_rpc_types_engine::{ OpExecutionData, OpExecutionPayloadEnvelopeV3, OpExecutionPayloadEnvelopeV4, OpPayloadAttributes, }; -use reth_chainspec::ChainSpec; use reth_consensus::ConsensusError; use reth_node_api::{ payload::{ @@ -16,12 +15,11 @@ use reth_node_api::{ validate_version_specific_fields, BuiltPayload, EngineTypes, EngineValidator, NodePrimitives, PayloadValidator, }; -use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::isthmus; -use reth_optimism_forks::{OpHardfork, OpHardforks}; +use reth_optimism_forks::OpHardforks; use reth_optimism_payload_builder::{OpExecutionPayloadValidator, OpPayloadTypes}; use reth_optimism_primitives::{OpBlock, ADDRESS_L2_TO_L1_MESSAGE_PASSER}; -use reth_primitives_traits::{RecoveredBlock, SealedBlock, SignedTransaction}; +use reth_primitives_traits::{Block, RecoveredBlock, SealedBlock, SignedTransaction}; use reth_provider::StateProviderFactory; use reth_trie_common::{HashedPostState, KeyHasher}; use std::{marker::PhantomData, sync::Arc}; @@ -30,16 +28,10 @@ use std::{marker::PhantomData, sync::Arc}; #[derive(Debug, Default, Clone, serde::Deserialize, serde::Serialize)] #[non_exhaustive] pub struct OpEngineTypes { - _marker: std::marker::PhantomData, + _marker: PhantomData, } -impl< - T: PayloadTypes< - ExecutionData = OpExecutionData, - BuiltPayload: BuiltPayload>, - >, - > PayloadTypes for OpEngineTypes -{ +impl> PayloadTypes for OpEngineTypes { type ExecutionData = T::ExecutionData; type BuiltPayload = T::BuiltPayload; type PayloadAttributes = T::PayloadAttributes; @@ -50,7 +42,10 @@ impl< <::Primitives as NodePrimitives>::Block, >, ) -> ::ExecutionData { - OpExecutionData::from_block_unchecked(block.hash(), &block.into_block()) + OpExecutionData::from_block_unchecked( + block.hash(), + &block.into_block().into_ethereum_block(), + ) } } @@ -70,17 +65,17 @@ where } /// Validator for Optimism engine API. -#[derive(Debug, Clone)] -pub struct OpEngineValidator { - inner: OpExecutionPayloadValidator, +#[derive(Debug)] +pub struct OpEngineValidator { + inner: OpExecutionPayloadValidator, provider: P, hashed_addr_l2tol1_msg_passer: B256, phantom: PhantomData, } -impl OpEngineValidator { +impl OpEngineValidator { /// Instantiates a new validator. - pub fn new(chain_spec: Arc, provider: P) -> Self { + pub fn new(chain_spec: Arc, provider: P) -> Self { let hashed_addr_l2tol1_msg_passer = KH::hash_key(ADDRESS_L2_TO_L1_MESSAGE_PASSER); Self { inner: OpExecutionPayloadValidator::new(chain_spec), @@ -89,19 +84,39 @@ impl OpEngineValidator { phantom: PhantomData, } } +} + +impl Clone for OpEngineValidator +where + P: Clone, + ChainSpec: OpHardforks, +{ + fn clone(&self) -> Self { + Self { + inner: OpExecutionPayloadValidator::new(self.inner.clone()), + provider: self.provider.clone(), + hashed_addr_l2tol1_msg_passer: self.hashed_addr_l2tol1_msg_passer, + phantom: Default::default(), + } + } +} +impl OpEngineValidator +where + ChainSpec: OpHardforks, +{ /// Returns the chain spec used by the validator. #[inline] - #[allow(clippy::missing_const_for_fn)] - fn chain_spec(&self) -> &OpChainSpec { + pub fn chain_spec(&self) -> &ChainSpec { self.inner.chain_spec() } } -impl PayloadValidator for OpEngineValidator +impl PayloadValidator for OpEngineValidator where P: StateProviderFactory + Unpin + 'static, Tx: SignedTransaction + Unpin + 'static, + ChainSpec: OpHardforks + Send + Sync + 'static, { type Block = alloy_consensus::Block; type ExecutionData = OpExecutionData; @@ -146,7 +161,7 @@ where } } -impl EngineValidator for OpEngineValidator +impl EngineValidator for OpEngineValidator where Types: PayloadTypes< PayloadAttributes = OpPayloadAttributes, @@ -154,7 +169,8 @@ where BuiltPayload: BuiltPayload>, >, P: StateProviderFactory + Unpin + 'static, - Tx: SignedTransaction + Unpin + 'static + Send + Sync, + Tx: SignedTransaction + Unpin + 'static, + ChainSpec: OpHardforks + Send + Sync + 'static, { fn validate_version_specific_fields( &self, @@ -197,7 +213,7 @@ where if attributes.gas_limit.is_none() { return Err(EngineObjectValidationError::InvalidParams( "MissingGasLimitInPayloadAttributes".to_string().into(), - )) + )); } if self @@ -213,7 +229,7 @@ where if elasticity != 0 && denominator == 0 { return Err(EngineObjectValidationError::InvalidParams( "Eip1559ParamsDenominatorZero".to_string().into(), - )) + )); } } @@ -229,23 +245,23 @@ where /// Canyon activates the Shanghai EIPs, see the Canyon specs for more details: /// pub fn validate_withdrawals_presence( - chain_spec: &ChainSpec, + chain_spec: impl OpHardforks, version: EngineApiMessageVersion, message_validation_kind: MessageValidationKind, timestamp: u64, has_withdrawals: bool, ) -> Result<(), EngineObjectValidationError> { - let is_shanghai = chain_spec.fork(OpHardfork::Canyon).active_at_timestamp(timestamp); + let is_shanghai = chain_spec.is_canyon_active_at_timestamp(timestamp); match version { EngineApiMessageVersion::V1 => { if has_withdrawals { return Err(message_validation_kind - .to_error(VersionSpecificValidationError::WithdrawalsNotSupportedInV1)) + .to_error(VersionSpecificValidationError::WithdrawalsNotSupportedInV1)); } if is_shanghai { return Err(message_validation_kind - .to_error(VersionSpecificValidationError::NoWithdrawalsPostShanghai)) + .to_error(VersionSpecificValidationError::NoWithdrawalsPostShanghai)); } } EngineApiMessageVersion::V2 | @@ -254,11 +270,11 @@ pub fn validate_withdrawals_presence( EngineApiMessageVersion::V5 => { if is_shanghai && !has_withdrawals { return Err(message_validation_kind - .to_error(VersionSpecificValidationError::NoWithdrawalsPostShanghai)) + .to_error(VersionSpecificValidationError::NoWithdrawalsPostShanghai)); } if !is_shanghai && has_withdrawals { return Err(message_validation_kind - .to_error(VersionSpecificValidationError::HasWithdrawalsPreShanghai)) + .to_error(VersionSpecificValidationError::HasWithdrawalsPreShanghai)); } } }; @@ -273,8 +289,9 @@ mod test { use crate::engine; use alloy_primitives::{b64, Address, B256, B64}; use alloy_rpc_types_engine::PayloadAttributes; + use reth_chainspec::ChainSpec; use reth_node_builder::EngineValidator; - use reth_optimism_chainspec::BASE_SEPOLIA; + use reth_optimism_chainspec::{OpChainSpec, BASE_SEPOLIA}; use reth_provider::noop::NoopProvider; use reth_trie_common::KeccakKeyHasher; @@ -317,10 +334,10 @@ mod test { OpEngineValidator::new::(get_chainspec(), NoopProvider::default()); let attributes = get_attributes(None, 1732633199); - let result = as EngineValidator< + let result = as EngineValidator< OpEngineTypes, >>::ensure_well_formed_attributes( - &validator, EngineApiMessageVersion::V3, &attributes + &validator, EngineApiMessageVersion::V3, &attributes, ); assert!(result.is_ok()); } @@ -331,10 +348,10 @@ mod test { OpEngineValidator::new::(get_chainspec(), NoopProvider::default()); let attributes = get_attributes(None, 1732633200); - let result = as EngineValidator< + let result = as EngineValidator< OpEngineTypes, >>::ensure_well_formed_attributes( - &validator, EngineApiMessageVersion::V3, &attributes + &validator, EngineApiMessageVersion::V3, &attributes, ); assert!(matches!(result, Err(EngineObjectValidationError::InvalidParams(_)))); } @@ -345,10 +362,10 @@ mod test { OpEngineValidator::new::(get_chainspec(), NoopProvider::default()); let attributes = get_attributes(Some(b64!("0000000000000008")), 1732633200); - let result = as EngineValidator< + let result = as EngineValidator< OpEngineTypes, >>::ensure_well_formed_attributes( - &validator, EngineApiMessageVersion::V3, &attributes + &validator, EngineApiMessageVersion::V3, &attributes, ); assert!(matches!(result, Err(EngineObjectValidationError::InvalidParams(_)))); } @@ -359,10 +376,10 @@ mod test { OpEngineValidator::new::(get_chainspec(), NoopProvider::default()); let attributes = get_attributes(Some(b64!("0000000800000008")), 1732633200); - let result = as EngineValidator< + let result = as EngineValidator< OpEngineTypes, >>::ensure_well_formed_attributes( - &validator, EngineApiMessageVersion::V3, &attributes + &validator, EngineApiMessageVersion::V3, &attributes, ); assert!(result.is_ok()); } @@ -373,10 +390,10 @@ mod test { OpEngineValidator::new::(get_chainspec(), NoopProvider::default()); let attributes = get_attributes(Some(b64!("0000000000000000")), 1732633200); - let result = as EngineValidator< + let result = as EngineValidator< OpEngineTypes, >>::ensure_well_formed_attributes( - &validator, EngineApiMessageVersion::V3, &attributes + &validator, EngineApiMessageVersion::V3, &attributes, ); assert!(result.is_ok()); } diff --git a/crates/optimism/node/src/lib.rs b/crates/optimism/node/src/lib.rs index fc57365b460..ac9cfe98d83 100644 --- a/crates/optimism/node/src/lib.rs +++ b/crates/optimism/node/src/lib.rs @@ -18,9 +18,10 @@ pub mod args; /// trait. pub mod engine; pub use engine::OpEngineTypes; +pub use reth_optimism_payload_builder::{OpPayloadPrimitives, OpPayloadTypes}; pub mod node; -pub use node::{OpNetworkPrimitives, OpNode}; +pub use node::*; pub mod rpc; pub use rpc::OpEngineApiBuilder; @@ -39,3 +40,5 @@ pub use reth_optimism_payload_builder::{ }; pub use reth_optimism_evm::*; + +pub use reth_optimism_storage::OpStorage; diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 50d1500066a..4eb76160a3b 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -7,24 +7,27 @@ use crate::{ OpEngineApiBuilder, OpEngineTypes, }; use op_alloy_consensus::{interop::SafetyLevel, OpPooledTransaction}; -use reth_chainspec::{EthChainSpec, Hardforks}; +use op_alloy_rpc_types_engine::{OpExecutionData, OpPayloadAttributes}; +use reth_chainspec::{ChainSpecProvider, EthChainSpec, Hardforks}; use reth_evm::{ConfigureEvm, EvmFactory, EvmFactoryFor}; use reth_network::{ - primitives::NetPrimitivesFor, NetworkConfig, NetworkHandle, NetworkManager, NetworkPrimitives, + types::BasicNetworkPrimitives, NetworkConfig, NetworkHandle, NetworkManager, NetworkPrimitives, PeersInfo, }; use reth_node_api::{ - AddOnsContext, FullNodeComponents, KeyHasherTy, NodeAddOns, NodePrimitives, PrimitivesTy, TxTy, + AddOnsContext, EngineTypes, FullNodeComponents, KeyHasherTy, NodeAddOns, NodePrimitives, + PayloadTypes, PrimitivesTy, TxTy, }; use reth_node_builder::{ components::{ BasicPayloadServiceBuilder, ComponentsBuilder, ConsensusBuilder, ExecutorBuilder, NetworkBuilder, PayloadBuilderBuilder, PoolBuilder, PoolBuilderConfigOverrides, + TxPoolBuilder, }, node::{FullNodeTypes, NodeTypes}, rpc::{ - EngineValidatorAddOn, EngineValidatorBuilder, EthApiBuilder, RethRpcAddOns, RpcAddOns, - RpcHandle, + EngineApiBuilder, EngineValidatorAddOn, EngineValidatorBuilder, EthApiBuilder, + RethRpcAddOns, RethRpcServerHandles, RpcAddOns, RpcContext, RpcHandle, }, BuilderContext, DebugNode, Node, NodeAdapter, NodeComponentsBuilder, }; @@ -35,26 +38,28 @@ use reth_optimism_forks::OpHardforks; use reth_optimism_payload_builder::{ builder::OpPayloadTransactions, config::{OpBuilderConfig, OpDAConfig}, + OpBuiltPayload, OpPayloadBuilderAttributes, OpPayloadPrimitives, }; -use reth_optimism_primitives::{DepositReceipt, OpPrimitives, OpReceipt, OpTransactionSigned}; +use reth_optimism_primitives::{DepositReceipt, OpPrimitives}; use reth_optimism_rpc::{ eth::{ext::OpEthExtApi, OpEthApiBuilder}, miner::{MinerApiExtServer, OpMinerExtApi}, witness::{DebugExecutionWitnessApiServer, OpDebugWitnessApi}, OpEthApi, OpEthApiError, SequencerClient, }; +use reth_optimism_storage::OpStorage; use reth_optimism_txpool::{ supervisor::{SupervisorClient, DEFAULT_SUPERVISOR_URL}, OpPooledTx, }; -use reth_provider::{providers::ProviderFactoryBuilder, CanonStateSubscriptions, EthStorage}; +use reth_provider::{providers::ProviderFactoryBuilder, CanonStateSubscriptions}; use reth_rpc_api::DebugApiServer; use reth_rpc_eth_api::{ext::L2EthApiExtServer, FullEthApiServer}; use reth_rpc_eth_types::error::FromEvmError; use reth_rpc_server_types::RethRpcModule; use reth_tracing::tracing::{debug, info}; use reth_transaction_pool::{ - blobstore::DiskFileBlobStore, CoinbaseTipOrdering, EthPoolTransaction, PoolTransaction, + blobstore::DiskFileBlobStore, EthPoolTransaction, PoolPooledTx, PoolTransaction, TransactionPool, TransactionValidationTaskExecutor, }; use reth_trie_db::MerklePatriciaTrie; @@ -63,16 +68,18 @@ use std::{marker::PhantomData, sync::Arc}; /// Marker trait for Optimism node types with standard engine, chain spec, and primitives. pub trait OpNodeTypes: - NodeTypes + NodeTypes { } /// Blanket impl for all node types that conform to the Optimism spec. impl OpNodeTypes for N where - N: NodeTypes + N: NodeTypes< + Payload = OpEngineTypes, + ChainSpec: OpHardforks + Hardforks, + Primitives = OpPrimitives, + > { } -/// Storage implementation for Optimism. -pub type OpStorage = EthStorage; /// Type configuration for a regular Optimism node. #[derive(Debug, Default, Clone)] @@ -89,6 +96,16 @@ pub struct OpNode { pub da_config: OpDAConfig, } +/// A [`ComponentsBuilder`] with its generic arguments set to a stack of Optimism specific builders. +pub type OpNodeComponentBuilder = ComponentsBuilder< + Node, + OpPoolBuilder, + BasicPayloadServiceBuilder, + OpNetworkBuilder, + OpExecutorBuilder, + OpConsensusBuilder, +>; + impl OpNode { /// Creates a new instance of the Optimism node type. pub fn new(args: RollupArgs) -> Self { @@ -102,16 +119,7 @@ impl OpNode { } /// Returns the components for the given [`RollupArgs`]. - pub fn components( - &self, - ) -> ComponentsBuilder< - Node, - OpPoolBuilder, - BasicPayloadServiceBuilder, - OpNetworkBuilder, - OpExecutorBuilder, - OpConsensusBuilder, - > + pub fn components(&self) -> OpNodeComponentBuilder where Node: FullNodeTypes, { @@ -175,7 +183,7 @@ where N: FullNodeTypes< Types: NodeTypes< Payload = OpEngineTypes, - ChainSpec = OpChainSpec, + ChainSpec: OpHardforks + Hardforks, Primitives = OpPrimitives, Storage = OpStorage, >, @@ -193,6 +201,8 @@ where type AddOns = OpAddOns< NodeAdapter>::Components>, OpEthApiBuilder, + OpEngineValidatorBuilder, + OpEngineApiBuilder, >; fn components_builder(&self) -> Self::ComponentsBuilder { @@ -205,6 +215,7 @@ where .with_sequencer_headers(self.args.sequencer_headers.clone()) .with_da_config(self.da_config.clone()) .with_enable_tx_conditional(self.args.enable_tx_conditional) + .with_min_suggested_priority_fee(self.args.min_suggested_priority_fee) .build() } } @@ -216,14 +227,7 @@ where type RpcBlock = alloy_rpc_types_eth::Block; fn rpc_to_primitive_block(rpc_block: Self::RpcBlock) -> reth_node_api::BlockTy { - let alloy_rpc_types_eth::Block { header, transactions, .. } = rpc_block; - reth_optimism_primitives::OpBlock { - header: header.inner, - body: reth_optimism_primitives::OpBlockBody { - transactions: transactions.into_transactions().collect(), - ..Default::default() - }, - } + rpc_block.into_consensus() } } @@ -237,12 +241,7 @@ impl NodeTypes for OpNode { /// Add-ons w.r.t. optimism. #[derive(Debug)] -pub struct OpAddOns< - N: FullNodeComponents, - EthB: EthApiBuilder, - EV = OpEngineValidatorBuilder, - EB = OpEngineApiBuilder, -> { +pub struct OpAddOns, EV, EB> { /// Rpc add-ons responsible for launching the RPC servers and instantiating the RPC handlers /// and eth-api. pub rpc_add_ons: RpcAddOns, @@ -255,9 +254,16 @@ pub struct OpAddOns< pub sequencer_headers: Vec, /// Enable transaction conditionals. enable_tx_conditional: bool, + min_suggested_priority_fee: u64, } -impl Default for OpAddOns> +impl Default + for OpAddOns< + N, + OpEthApiBuilder, + OpEngineValidatorBuilder, + OpEngineApiBuilder, + > where N: FullNodeComponents, OpEthApiBuilder: EthApiBuilder, @@ -267,7 +273,13 @@ where } } -impl OpAddOns> +impl + OpAddOns< + N, + OpEthApiBuilder, + OpEngineValidatorBuilder, + OpEngineApiBuilder, + > where N: FullNodeComponents, OpEthApiBuilder: EthApiBuilder, @@ -278,14 +290,80 @@ where } } -impl NodeAddOns for OpAddOns> +impl OpAddOns +where + N: FullNodeComponents, + EthB: EthApiBuilder, +{ + /// Maps the [`reth_node_builder::rpc::EngineApiBuilder`] builder type. + pub fn with_engine_api(self, engine_api_builder: T) -> OpAddOns { + let Self { + rpc_add_ons, + da_config, + sequencer_url, + sequencer_headers, + enable_tx_conditional, + min_suggested_priority_fee, + } = self; + OpAddOns { + rpc_add_ons: rpc_add_ons.with_engine_api(engine_api_builder), + da_config, + sequencer_url, + sequencer_headers, + enable_tx_conditional, + min_suggested_priority_fee, + } + } + + /// Maps the [`EngineValidatorBuilder`] builder type. + pub fn with_engine_validator(self, engine_validator_builder: T) -> OpAddOns { + let Self { + rpc_add_ons, + da_config, + sequencer_url, + sequencer_headers, + enable_tx_conditional, + min_suggested_priority_fee, + } = self; + OpAddOns { + rpc_add_ons: rpc_add_ons.with_engine_validator(engine_validator_builder), + da_config, + sequencer_url, + sequencer_headers, + enable_tx_conditional, + min_suggested_priority_fee, + } + } + + /// Sets the hook that is run once the rpc server is started. + pub fn on_rpc_started(mut self, hook: F) -> Self + where + F: FnOnce(RpcContext<'_, N, EthB::EthApi>, RethRpcServerHandles) -> eyre::Result<()> + + Send + + 'static, + { + self.rpc_add_ons = self.rpc_add_ons.on_rpc_started(hook); + self + } + + /// Sets the hook that is run to configure the rpc modules. + pub fn extend_rpc_modules(mut self, hook: F) -> Self + where + F: FnOnce(RpcContext<'_, N, EthB::EthApi>) -> eyre::Result<()> + Send + 'static, + { + self.rpc_add_ons = self.rpc_add_ons.extend_rpc_modules(hook); + self + } +} + +impl NodeAddOns for OpAddOns, EV, EB> where N: FullNodeComponents< Types: NodeTypes< - ChainSpec = OpChainSpec, - Primitives = OpPrimitives, + ChainSpec: OpHardforks, + Primitives: OpPayloadPrimitives, Storage = OpStorage, - Payload = OpEngineTypes, + Payload: EngineTypes, >, Evm: ConfigureEvm, >, @@ -294,6 +372,8 @@ where EvmFactoryFor: EvmFactory>, OpEthApi: FullEthApiServer, NetworkT: op_alloy_network::Network + Unpin, + EV: EngineValidatorBuilder, + EB: EngineApiBuilder, { type Handle = RpcHandle>; @@ -307,6 +387,7 @@ where sequencer_url, sequencer_headers, enable_tx_conditional, + .. } = self; let builder = reth_optimism_payload_builder::OpPayloadBuilder::new( @@ -335,7 +416,10 @@ where ); rpc_add_ons - .launch_add_ons_with(ctx, move |modules, auth_modules, registry| { + .launch_add_ons_with(ctx, move |container| { + let reth_node_builder::rpc::RpcModuleContainer { modules, auth_module, registry } = + container; + debug!(target: "reth::cli", "Installing debug payload witness rpc endpoint"); modules.merge_if_module_configured(RethRpcModule::Debug, debug_ext.into_rpc())?; @@ -348,13 +432,13 @@ where // install the miner extension in the authenticated if configured if modules.module_config().contains_any(&RethRpcModule::Miner) { debug!(target: "reth::cli", "Installing miner DA rpc endpoint"); - auth_modules.merge_auth_methods(miner_ext.into_rpc())?; + auth_module.merge_auth_methods(miner_ext.into_rpc())?; } // install the debug namespace in the authenticated if configured if modules.module_config().contains_any(&RethRpcModule::Debug) { debug!(target: "reth::cli", "Installing debug rpc endpoint"); - auth_modules.merge_auth_methods(registry.debug_api().into_rpc())?; + auth_module.merge_auth_methods(registry.debug_api().into_rpc())?; } if enable_tx_conditional { @@ -371,14 +455,14 @@ where } } -impl RethRpcAddOns for OpAddOns> +impl RethRpcAddOns for OpAddOns, EV, EB> where N: FullNodeComponents< Types: NodeTypes< - ChainSpec = OpChainSpec, + ChainSpec: OpHardforks, Primitives = OpPrimitives, Storage = OpStorage, - Payload = OpEngineTypes, + Payload: EngineTypes, >, Evm: ConfigureEvm, >, @@ -387,6 +471,8 @@ where EvmFactoryFor: EvmFactory>, OpEthApi: FullEthApiServer, NetworkT: op_alloy_network::Network + Unpin, + EV: EngineValidatorBuilder, + EB: EngineApiBuilder, { type EthApi = OpEthApi; @@ -395,24 +481,23 @@ where } } -impl EngineValidatorAddOn for OpAddOns> +impl EngineValidatorAddOn for OpAddOns, EV, EB> where N: FullNodeComponents< Types: NodeTypes< - ChainSpec = OpChainSpec, + ChainSpec: OpHardforks, Primitives = OpPrimitives, - Payload = OpEngineTypes, + Payload: EngineTypes, >, >, OpEthApiBuilder: EthApiBuilder, + EV: EngineValidatorBuilder + Default, + EB: EngineApiBuilder, { - type Validator = OpEngineValidator< - N::Provider, - <::Primitives as NodePrimitives>::SignedTx, - >; + type Validator = >::Validator; async fn engine_validator(&self, ctx: &AddOnsContext<'_, N>) -> eyre::Result { - OpEngineValidatorBuilder::default().build(ctx).await + EV::default().build(ctx).await } } @@ -431,6 +516,8 @@ pub struct OpAddOnsBuilder { enable_tx_conditional: bool, /// Marker for network types. _nt: PhantomData, + /// Minimum suggested priority fee (tip) + min_suggested_priority_fee: u64, } impl Default for OpAddOnsBuilder { @@ -440,6 +527,7 @@ impl Default for OpAddOnsBuilder { sequencer_headers: Vec::new(), da_config: None, enable_tx_conditional: false, + min_suggested_priority_fee: 1_000_000, _nt: PhantomData, } } @@ -469,63 +557,61 @@ impl OpAddOnsBuilder { self.enable_tx_conditional = enable_tx_conditional; self } + + /// Configure the minimum priority fee (tip) + pub const fn with_min_suggested_priority_fee(mut self, min: u64) -> Self { + self.min_suggested_priority_fee = min; + self + } } impl OpAddOnsBuilder { /// Builds an instance of [`OpAddOns`]. - pub fn build(self) -> OpAddOns> + pub fn build(self) -> OpAddOns, EV, EB> where N: FullNodeComponents, OpEthApiBuilder: EthApiBuilder, + EV: Default, + EB: Default, { - let Self { sequencer_url, sequencer_headers, da_config, enable_tx_conditional, .. } = self; + let Self { + sequencer_url, + sequencer_headers, + da_config, + enable_tx_conditional, + min_suggested_priority_fee, + .. + } = self; OpAddOns { rpc_add_ons: RpcAddOns::new( OpEthApiBuilder::default() .with_sequencer(sequencer_url.clone()) - .with_sequencer_headers(sequencer_headers.clone()), - OpEngineValidatorBuilder::default(), - OpEngineApiBuilder::default(), + .with_sequencer_headers(sequencer_headers.clone()) + .with_min_suggested_priority_fee(min_suggested_priority_fee), + EV::default(), + EB::default(), ), da_config: da_config.unwrap_or_default(), sequencer_url, sequencer_headers, enable_tx_conditional, + min_suggested_priority_fee, } } } /// A regular optimism evm and executor builder. -#[derive(Debug, Copy)] +#[derive(Debug, Copy, Clone, Default)] #[non_exhaustive] -pub struct OpExecutorBuilder { - /// Marker for chain spec type. - _cs: PhantomData, - /// Marker for primitives type. - _p: PhantomData, -} - -impl Clone for OpExecutorBuilder { - fn clone(&self) -> Self { - Self::default() - } -} - -impl Default for OpExecutorBuilder { - fn default() -> Self { - Self { _cs: PhantomData, _p: PhantomData } - } -} +pub struct OpExecutorBuilder; -impl ExecutorBuilder for OpExecutorBuilder +impl ExecutorBuilder for OpExecutorBuilder where - Node: FullNodeTypes>, - ChainSpec: EthChainSpec + OpHardforks, - Primitives: NodePrimitives, - OpEvmConfig: ConfigureEvm + 'static, + Node: FullNodeTypes>, { - type EVM = OpEvmConfig; + type EVM = + OpEvmConfig<::ChainSpec, ::Primitives>; async fn build_evm(self, ctx: &BuilderContext) -> eyre::Result { let evm_config = OpEvmConfig::new(ctx.chain_spec(), OpRethReceiptBuilder::default()); @@ -613,8 +699,7 @@ where async fn build_pool(self, ctx: &BuilderContext) -> eyre::Result { let Self { pool_config_overrides, .. } = self; - let data_dir = ctx.config().datadir(); - let blob_store = DiskFileBlobStore::open(data_dir.blobstore(), Default::default())?; + // supervisor used for interop if ctx.chain_spec().is_interop_active_at_timestamp(ctx.head().timestamp) && self.supervisor_http == DEFAULT_SUPERVISOR_URL @@ -629,6 +714,7 @@ where .build() .await; + let blob_store = reth_node_builder::components::create_blob_store(ctx)?; let validator = TransactionValidationTaskExecutor::eth_builder(ctx.provider().clone()) .no_eip4844() .with_head_timestamp(ctx.head().timestamp) @@ -648,87 +734,43 @@ where .with_supervisor(supervisor_client.clone()) }); - let transaction_pool = reth_transaction_pool::Pool::new( - validator, - CoinbaseTipOrdering::default(), - blob_store, - pool_config_overrides.apply(ctx.pool_config()), - ); + let final_pool_config = pool_config_overrides.apply(ctx.pool_config()); + + let transaction_pool = TxPoolBuilder::new(ctx) + .with_validator(validator) + .build_and_spawn_maintenance_task(blob_store, final_pool_config)?; + info!(target: "reth::cli", "Transaction pool initialized"); + debug!(target: "reth::cli", "Spawned txpool maintenance task"); - // spawn txpool maintenance tasks + // The Op txpool maintenance task is only spawned when interop is active + if ctx.chain_spec().is_interop_active_at_timestamp(ctx.head().timestamp) && + self.supervisor_http == DEFAULT_SUPERVISOR_URL { - let pool = transaction_pool.clone(); + // spawn the Op txpool maintenance task let chain_events = ctx.provider().canonical_state_stream(); - let client = ctx.provider().clone(); - if !ctx.config().txpool.disable_transactions_backup { - // Use configured backup path or default to data dir - let transactions_path = ctx - .config() - .txpool - .transactions_backup_path - .clone() - .unwrap_or_else(|| data_dir.txpool_transactions()); - - let transactions_backup_config = - reth_transaction_pool::maintain::LocalTransactionBackupConfig::with_local_txs_backup(transactions_path); - - ctx.task_executor().spawn_critical_with_graceful_shutdown_signal( - "local transactions backup task", - |shutdown| { - reth_transaction_pool::maintain::backup_local_transactions_task( - shutdown, - pool.clone(), - transactions_backup_config, - ) - }, - ); - } - - // spawn the main maintenance task ctx.task_executor().spawn_critical( - "txpool maintenance task", - reth_transaction_pool::maintain::maintain_transaction_pool_future( - client, - pool.clone(), + "Op txpool interop maintenance task", + reth_optimism_txpool::maintain::maintain_transaction_pool_interop_future( + transaction_pool.clone(), chain_events, - ctx.task_executor().clone(), - reth_transaction_pool::maintain::MaintainPoolConfig { - max_tx_lifetime: pool.config().max_queued_lifetime, - no_local_exemptions: transaction_pool - .config() - .local_transactions_config - .no_exemptions, - ..Default::default() - }, + supervisor_client, ), ); - debug!(target: "reth::cli", "Spawned txpool maintenance task"); + debug!(target: "reth::cli", "Spawned Op interop txpool maintenance task"); + } + if self.enable_tx_conditional { // spawn the Op txpool maintenance task let chain_events = ctx.provider().canonical_state_stream(); ctx.task_executor().spawn_critical( - "Op txpool interop maintenance task", - reth_optimism_txpool::maintain::maintain_transaction_pool_interop_future( - pool.clone(), + "Op txpool conditional maintenance task", + reth_optimism_txpool::maintain::maintain_transaction_pool_conditional_future( + transaction_pool.clone(), chain_events, - supervisor_client, ), ); - debug!(target: "reth::cli", "Spawned Op interop txpool maintenance task"); - - if self.enable_tx_conditional { - // spawn the Op txpool maintenance task - let chain_events = ctx.provider().canonical_state_stream(); - ctx.task_executor().spawn_critical( - "Op txpool conditional maintenance task", - reth_optimism_txpool::maintain::maintain_transaction_pool_conditional_future( - pool, - chain_events, - ), - ); - debug!(target: "reth::cli", "Spawned Op conditional txpool maintenance task"); - } + debug!(target: "reth::cli", "Spawned Op conditional txpool maintenance task"); } Ok(transaction_pool) @@ -781,21 +823,22 @@ impl OpPayloadBuilder { impl PayloadBuilderBuilder for OpPayloadBuilder where Node: FullNodeTypes< + Provider: ChainSpecProvider, Types: NodeTypes< - Payload = OpEngineTypes, - ChainSpec = OpChainSpec, - Primitives = OpPrimitives, + Primitives: OpPayloadPrimitives, + Payload: PayloadTypes< + BuiltPayload = OpBuiltPayload>, + PayloadAttributes = OpPayloadAttributes, + PayloadBuilderAttributes = OpPayloadBuilderAttributes>, + >, >, >, Evm: ConfigureEvm< Primitives = PrimitivesTy, NextBlockEnvCtx = OpNextBlockEnvAttributes, > + 'static, - Pool: TransactionPool>> - + Unpin - + 'static, + Pool: TransactionPool>> + Unpin + 'static, Txs: OpPayloadTransactions, - ::Transaction: OpPooledTx, { type PayloadBuilder = reth_optimism_payload_builder::OpPayloadBuilder; @@ -820,40 +863,37 @@ where /// A basic optimism network builder. #[derive(Debug, Default)] -pub struct OpNetworkBuilder { +pub struct OpNetworkBuilder { /// Disable transaction pool gossip pub disable_txpool_gossip: bool, /// Disable discovery v4 pub disable_discovery_v4: bool, - /// Marker for the network primitives type - _np: PhantomData, - /// Marker for the pooled transaction type - _pt: PhantomData, } -impl Clone for OpNetworkBuilder { +impl Clone for OpNetworkBuilder { fn clone(&self) -> Self { Self::new(self.disable_txpool_gossip, self.disable_discovery_v4) } } -impl OpNetworkBuilder { +impl OpNetworkBuilder { /// Creates a new `OpNetworkBuilder`. pub const fn new(disable_txpool_gossip: bool, disable_discovery_v4: bool) -> Self { - Self { disable_txpool_gossip, disable_discovery_v4, _np: PhantomData, _pt: PhantomData } + Self { disable_txpool_gossip, disable_discovery_v4 } } } -impl OpNetworkBuilder { +impl OpNetworkBuilder { /// Returns the [`NetworkConfig`] that contains the settings to launch the p2p network. /// /// This applies the configured [`OpNetworkBuilder`] settings. - pub fn network_config( + pub fn network_config( &self, ctx: &BuilderContext, - ) -> eyre::Result::Provider, NetworkP>> + ) -> eyre::Result> where Node: FullNodeTypes>, + NetworkP: NetworkPrimitives, { let Self { disable_txpool_gossip, disable_discovery_v4, .. } = self.clone(); let args = &ctx.config().network; @@ -892,22 +932,15 @@ impl OpNetworkBuilder } } -impl NetworkBuilder - for OpNetworkBuilder +impl NetworkBuilder for OpNetworkBuilder where Node: FullNodeTypes>, - Pool: TransactionPool< - Transaction: PoolTransaction, Pooled = PooledTx>, - > + Unpin + Pool: TransactionPool>> + + Unpin + 'static, - NetworkP: NetworkPrimitives< - PooledTransaction = PooledTx, - BroadcastedTransaction = <::Primitives as NodePrimitives>::SignedTx - > - + NetPrimitivesFor>, - PooledTx: Send, { - type Network = NetworkHandle; + type Network = + NetworkHandle, PoolPooledTx>>; async fn build_network( self, @@ -951,12 +984,13 @@ pub struct OpEngineValidatorBuilder; impl EngineValidatorBuilder for OpEngineValidatorBuilder where - Types: NodeTypes, + Types: NodeTypes, Node: FullNodeComponents, { type Validator = OpEngineValidator< Node::Provider, <::Primitives as NodePrimitives>::SignedTx, + ::ChainSpec, >; async fn build(self, ctx: &AddOnsContext<'_, Node>) -> eyre::Result { @@ -968,15 +1002,4 @@ where } /// Network primitive types used by Optimism networks. -#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Hash)] -#[non_exhaustive] -pub struct OpNetworkPrimitives; - -impl NetworkPrimitives for OpNetworkPrimitives { - type BlockHeader = alloy_consensus::Header; - type BlockBody = alloy_consensus::BlockBody; - type Block = alloy_consensus::Block; - type BroadcastedTransaction = OpTransactionSigned; - type PooledTransaction = OpPooledTransaction; - type Receipt = OpReceipt; -} +pub type OpNetworkPrimitives = BasicNetworkPrimitives; diff --git a/crates/optimism/node/src/rpc.rs b/crates/optimism/node/src/rpc.rs index 1126235af0b..56022b5a4d4 100644 --- a/crates/optimism/node/src/rpc.rs +++ b/crates/optimism/node/src/rpc.rs @@ -14,7 +14,7 @@ use reth_payload_builder::PayloadStore; use reth_rpc_engine_api::{EngineApi, EngineCapabilities}; /// Builder for basic [`OpEngineApi`] implementation. -#[derive(Debug, Default)] +#[derive(Debug, Default, Clone)] pub struct OpEngineApiBuilder { engine_validator_builder: EV, } diff --git a/crates/optimism/node/src/utils.rs b/crates/optimism/node/src/utils.rs index a8ab57c7222..9e2f7b5b3b0 100644 --- a/crates/optimism/node/src/utils.rs +++ b/crates/optimism/node/src/utils.rs @@ -25,6 +25,7 @@ pub async fn setup(num_nodes: usize) -> eyre::Result<(Vec, TaskManager, num_nodes, Arc::new(OpChainSpecBuilder::base_mainnet().genesis(genesis).ecotone_activated().build()), false, + Default::default(), optimism_payload_attributes, ) .await diff --git a/crates/optimism/node/tests/it/priority.rs b/crates/optimism/node/tests/it/priority.rs index 6b504052eb3..ff1ee5340a3 100644 --- a/crates/optimism/node/tests/it/priority.rs +++ b/crates/optimism/node/tests/it/priority.rs @@ -19,8 +19,8 @@ use reth_optimism_chainspec::OpChainSpecBuilder; use reth_optimism_node::{ args::RollupArgs, node::{ - OpAddOns, OpConsensusBuilder, OpExecutorBuilder, OpNetworkBuilder, OpNodeTypes, - OpPayloadBuilder, OpPoolBuilder, + OpAddOns, OpConsensusBuilder, OpExecutorBuilder, OpNetworkBuilder, OpNodeComponentBuilder, + OpNodeTypes, OpPayloadBuilder, OpPoolBuilder, }, txpool::OpPooledTransaction, utils::optimism_payload_attributes, @@ -88,14 +88,7 @@ impl OpPayloadTransactions for CustomTxPriority { /// Builds the node with custom transaction priority service within default payload builder. fn build_components( chain_id: ChainId, -) -> ComponentsBuilder< - Node, - OpPoolBuilder, - BasicPayloadServiceBuilder>, - OpNetworkBuilder, - OpExecutorBuilder, - OpConsensusBuilder, -> +) -> OpNodeComponentBuilder> where Node: FullNodeTypes, { diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index b6c7eb1d449..d5a3260420d 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -6,13 +6,13 @@ use crate::{ payload::{OpBuiltPayload, OpPayloadBuilderAttributes}, OpPayloadPrimitives, }; -use alloy_consensus::{Transaction, Typed2718}; +use alloy_consensus::{BlockHeader, Transaction, Typed2718}; use alloy_primitives::{Bytes, B256, U256}; use alloy_rpc_types_debug::ExecutionWitness; use alloy_rpc_types_engine::PayloadId; use op_alloy_rpc_types_engine::OpPayloadAttributes; use reth_basic_payload_builder::*; -use reth_chain_state::{ExecutedBlock, ExecutedBlockWithTrieUpdates}; +use reth_chain_state::{ExecutedBlock, ExecutedBlockWithTrieUpdates, ExecutedTrieUpdates}; use reth_chainspec::{ChainSpecProvider, EthChainSpec}; use reth_evm::{ execute::{ @@ -32,7 +32,9 @@ use reth_optimism_txpool::{ use reth_payload_builder_primitives::PayloadBuilderError; use reth_payload_primitives::PayloadBuilderAttributes; use reth_payload_util::{BestPayloadTransactions, NoopPayloadTransactions, PayloadTransactions}; -use reth_primitives_traits::{NodePrimitives, SealedHeader, SignedTransaction, TxTy}; +use reth_primitives_traits::{ + HeaderTy, NodePrimitives, SealedHeader, SealedHeaderFor, SignedTransaction, TxTy, +}; use reth_revm::{ cancelled::CancelOnDrop, database::StateProviderDatabase, db::State, witness::ExecutionWitnessRecord, @@ -126,7 +128,7 @@ impl OpPayloadBuilder { impl OpPayloadBuilder where Pool: TransactionPool>, - Client: StateProviderFactory + ChainSpecProvider, + Client: StateProviderFactory + ChainSpecProvider, N: OpPayloadPrimitives, Evm: ConfigureEvm, { @@ -175,7 +177,7 @@ where /// Computes the witness for the payload. pub fn payload_witness( &self, - parent: SealedHeader, + parent: SealedHeader, attributes: OpPayloadAttributes, ) -> Result { let attributes = OpPayloadBuilderAttributes::try_new(parent.hash(), attributes, 3) @@ -201,8 +203,8 @@ where /// Implementation of the [`PayloadBuilder`] trait for [`OpPayloadBuilder`]. impl PayloadBuilder for OpPayloadBuilder where - Client: StateProviderFactory + ChainSpecProvider + Clone, N: OpPayloadPrimitives, + Client: StateProviderFactory + ChainSpecProvider + Clone, Pool: TransactionPool>, Evm: ConfigureEvm, Txs: OpPayloadTransactions, @@ -231,7 +233,7 @@ where // system txs, hence on_missing_payload we return [MissingPayloadBehaviour::AwaitInProgress]. fn build_empty_payload( &self, - config: PayloadConfig, + config: PayloadConfig, ) -> Result { let args = BuildArguments { config, @@ -290,7 +292,7 @@ impl OpBuilder<'_, Txs> { PayloadTransactions + OpPooledTx>, { let Self { best } = self; - debug!(target: "payload_builder", id=%ctx.payload_id(), parent_header = ?ctx.parent().hash(), parent_number = ctx.parent().number, "building new payload"); + debug!(target: "payload_builder", id=%ctx.payload_id(), parent_header = ?ctx.parent().hash(), parent_number = ctx.parent().number(), "building new payload"); let mut db = State::builder().with_database(db).with_bundle_update().build(); @@ -328,7 +330,7 @@ impl OpBuilder<'_, Txs> { let execution_outcome = ExecutionOutcome::new( db.take_bundle(), vec![execution_result.receipts], - block.number, + block.number(), Vec::new(), ); @@ -339,7 +341,7 @@ impl OpBuilder<'_, Txs> { execution_output: Arc::new(execution_outcome), hashed_state: Arc::new(hashed_state), }, - trie: Arc::new(trie_updates), + trie: ExecutedTrieUpdates::Present(Arc::new(trie_updates)), }; let no_tx_pool = ctx.attributes().no_tx_pool; @@ -486,7 +488,8 @@ pub struct OpPayloadBuilderCtx { /// The chainspec pub chain_spec: Arc, /// How to build the payload. - pub config: PayloadConfig>>, + pub config: + PayloadConfig>, HeaderTy>, /// Marker to check whether the job has been cancelled. pub cancel: CancelOnDrop, /// The currently best payload. @@ -499,9 +502,8 @@ where ChainSpec: EthChainSpec + OpHardforks, { /// Returns the parent block the payload will be build on. - #[allow(clippy::missing_const_for_fn)] - pub fn parent(&self) -> &SealedHeader { - &self.config.parent_header + pub fn parent(&self) -> &SealedHeaderFor { + self.config.parent_header.as_ref() } /// Returns the builder attributes. @@ -562,7 +564,10 @@ where timestamp: self.attributes().timestamp(), suggested_fee_recipient: self.attributes().suggested_fee_recipient(), prev_randao: self.attributes().prev_randao(), - gas_limit: self.attributes().gas_limit.unwrap_or(self.parent().gas_limit), + gas_limit: self + .attributes() + .gas_limit + .unwrap_or_else(|| self.parent().gas_limit()), parent_beacon_block_root: self.attributes().parent_beacon_block_root(), extra_data: self.extra_data()?, }, diff --git a/crates/optimism/payload/src/lib.rs b/crates/optimism/payload/src/lib.rs index 038e7cab833..03545863e81 100644 --- a/crates/optimism/payload/src/lib.rs +++ b/crates/optimism/payload/src/lib.rs @@ -18,9 +18,9 @@ pub mod payload; use op_alloy_rpc_types_engine::OpExecutionData; pub use payload::{OpBuiltPayload, OpPayloadAttributes, OpPayloadBuilderAttributes}; mod traits; -use reth_optimism_primitives::{OpBlock, OpPrimitives}; +use reth_optimism_primitives::OpPrimitives; use reth_payload_primitives::{BuiltPayload, PayloadTypes}; -use reth_primitives_traits::{NodePrimitives, SealedBlock}; +use reth_primitives_traits::{Block, NodePrimitives, SealedBlock}; pub use traits::*; pub mod validator; pub use validator::OpExecutionPayloadValidator; @@ -34,7 +34,7 @@ pub struct OpPayloadTypes(core::marker::Phanto impl PayloadTypes for OpPayloadTypes where - OpBuiltPayload: BuiltPayload>, + OpBuiltPayload: BuiltPayload, { type ExecutionData = OpExecutionData; type BuiltPayload = OpBuiltPayload; @@ -46,6 +46,9 @@ where <::Primitives as NodePrimitives>::Block, >, ) -> Self::ExecutionData { - OpExecutionData::from_block_unchecked(block.hash(), &block.into_block()) + OpExecutionData::from_block_unchecked( + block.hash(), + &block.into_block().into_ethereum_block(), + ) } } diff --git a/crates/optimism/payload/src/payload.rs b/crates/optimism/payload/src/payload.rs index ebdb2b9a762..f32f19ff6f9 100644 --- a/crates/optimism/payload/src/payload.rs +++ b/crates/optimism/payload/src/payload.rs @@ -17,13 +17,13 @@ use op_alloy_rpc_types_engine::{ OpExecutionPayloadEnvelopeV3, OpExecutionPayloadEnvelopeV4, OpExecutionPayloadV4, }; use reth_chain_state::ExecutedBlockWithTrieUpdates; -use reth_optimism_primitives::OpPrimitives; use reth_payload_builder::EthPayloadBuilderAttributes; use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes}; use reth_primitives_traits::{NodePrimitives, SealedBlock, SignedTransaction, WithEncoded}; /// Re-export for use in downstream arguments. pub use op_alloy_rpc_types_engine::OpPayloadAttributes; +use reth_optimism_primitives::OpPrimitives; /// Optimism Payload Builder Attributes #[derive(Debug, Clone, PartialEq, Eq)] diff --git a/crates/optimism/payload/src/traits.rs b/crates/optimism/payload/src/traits.rs index a0d13022cd5..6ca07e86e3f 100644 --- a/crates/optimism/payload/src/traits.rs +++ b/crates/optimism/payload/src/traits.rs @@ -1,29 +1,33 @@ -use alloy_consensus::{BlockBody, Header}; +use alloy_consensus::BlockBody; use reth_optimism_primitives::{transaction::OpTransaction, DepositReceipt}; -use reth_primitives_traits::{NodePrimitives, SignedTransaction}; +use reth_primitives_traits::{FullBlockHeader, NodePrimitives, SignedTransaction}; /// Helper trait to encapsulate common bounds on [`NodePrimitives`] for OP payload builder. pub trait OpPayloadPrimitives: NodePrimitives< Receipt: DepositReceipt, SignedTx = Self::_TX, - BlockHeader = Header, - BlockBody = BlockBody, + BlockBody = BlockBody, + BlockHeader = Self::_Header, > { /// Helper AT to bound [`NodePrimitives::Block`] type without causing bound cycle. type _TX: SignedTransaction + OpTransaction; + /// Helper AT to bound [`NodePrimitives::Block`] type without causing bound cycle. + type _Header: FullBlockHeader; } -impl OpPayloadPrimitives for T +impl OpPayloadPrimitives for T where Tx: SignedTransaction + OpTransaction, T: NodePrimitives< SignedTx = Tx, Receipt: DepositReceipt, + BlockBody = BlockBody, BlockHeader = Header, - BlockBody = BlockBody, >, + Header: FullBlockHeader, { type _TX = Tx; + type _Header = Header; } diff --git a/crates/optimism/primitives/src/receipt.rs b/crates/optimism/primitives/src/receipt.rs index 5a49238a1f2..de9a777cb1d 100644 --- a/crates/optimism/primitives/src/receipt.rs +++ b/crates/optimism/primitives/src/receipt.rs @@ -381,8 +381,11 @@ impl reth_primitives_traits::Receipt for OpReceipt {} /// Trait for deposit receipt. pub trait DepositReceipt: reth_primitives_traits::Receipt { - /// Returns deposit receipt if it is a deposit transaction. + /// Converts a `Receipt` into a mutable Optimism deposit receipt. fn as_deposit_receipt_mut(&mut self) -> Option<&mut OpDepositReceipt>; + + /// Extracts an Optimism deposit receipt from `Receipt`. + fn as_deposit_receipt(&self) -> Option<&OpDepositReceipt>; } impl DepositReceipt for OpReceipt { @@ -392,6 +395,13 @@ impl DepositReceipt for OpReceipt { _ => None, } } + + fn as_deposit_receipt(&self) -> Option<&OpDepositReceipt> { + match self { + Self::Deposit(receipt) => Some(receipt), + _ => None, + } + } } #[cfg(feature = "reth-codec")] diff --git a/crates/optimism/primitives/src/transaction/mod.rs b/crates/optimism/primitives/src/transaction/mod.rs index 230d8a6a3ea..d24acaa08b7 100644 --- a/crates/optimism/primitives/src/transaction/mod.rs +++ b/crates/optimism/primitives/src/transaction/mod.rs @@ -7,6 +7,7 @@ mod tx_type; mod signed; pub use op_alloy_consensus::{OpTxType, OpTypedTransaction}; +use reth_primitives_traits::Extended; /// Signed transaction. pub type OpTransactionSigned = op_alloy_consensus::OpTxEnvelope; @@ -23,3 +24,16 @@ impl OpTransaction for op_alloy_consensus::OpTxEnvelope { Self::is_deposit(self) } } + +impl OpTransaction for Extended +where + B: OpTransaction, + T: OpTransaction, +{ + fn is_deposit(&self) -> bool { + match self { + Self::BuiltIn(b) => b.is_deposit(), + Self::Other(t) => t.is_deposit(), + } + } +} diff --git a/crates/optimism/reth/Cargo.toml b/crates/optimism/reth/Cargo.toml index abd7cb7048a..f00b52acbe9 100644 --- a/crates/optimism/reth/Cargo.toml +++ b/crates/optimism/reth/Cargo.toml @@ -35,6 +35,7 @@ reth-transaction-pool = { workspace = true, optional = true } reth-trie = { workspace = true, optional = true } reth-node-builder = { workspace = true, optional = true } reth-tasks = { workspace = true, optional = true } +reth-cli-util = { workspace = true, optional = true } # reth-op reth-optimism-primitives.workspace = true @@ -88,7 +89,7 @@ test-utils = [ full = ["consensus", "evm", "node", "provider", "rpc", "trie", "pool", "network"] alloy-compat = ["reth-optimism-primitives/alloy-compat"] -cli = ["dep:reth-optimism-cli"] +cli = ["dep:reth-optimism-cli", "dep:reth-cli-util"] consensus = [ "dep:reth-consensus", "dep:reth-consensus-common", diff --git a/crates/optimism/reth/src/lib.rs b/crates/optimism/reth/src/lib.rs index f4a2af0d321..abafb72c66c 100644 --- a/crates/optimism/reth/src/lib.rs +++ b/crates/optimism/reth/src/lib.rs @@ -22,7 +22,12 @@ pub mod primitives { /// Re-exported cli types #[cfg(feature = "cli")] -pub use reth_optimism_cli as cli; +pub mod cli { + #[doc(inline)] + pub use reth_cli_util::*; + #[doc(inline)] + pub use reth_optimism_cli::*; +} /// Re-exported pool types #[cfg(feature = "pool")] diff --git a/crates/optimism/rpc/Cargo.toml b/crates/optimism/rpc/Cargo.toml index 40ca8e6aaf2..1187076f5d3 100644 --- a/crates/optimism/rpc/Cargo.toml +++ b/crates/optimism/rpc/Cargo.toml @@ -31,7 +31,6 @@ reth-chainspec.workspace = true reth-rpc-engine-api.workspace = true # op-reth -reth-optimism-chainspec.workspace = true reth-optimism-evm.workspace = true reth-optimism-payload-builder.workspace = true reth-optimism-txpool.workspace = true @@ -75,6 +74,10 @@ thiserror.workspace = true tracing.workspace = true derive_more = { workspace = true, features = ["constructor"] } +# metrics +reth-metrics.workspace = true +metrics.workspace = true + [dev-dependencies] reth-optimism-chainspec.workspace = true diff --git a/crates/optimism/rpc/src/error.rs b/crates/optimism/rpc/src/error.rs index 99f4e4ff4b3..134de276f92 100644 --- a/crates/optimism/rpc/src/error.rs +++ b/crates/optimism/rpc/src/error.rs @@ -5,8 +5,9 @@ use alloy_rpc_types_eth::{error::EthRpcErrorCode, BlockError}; use alloy_transport::{RpcError, TransportErrorKind}; use jsonrpsee_types::error::{INTERNAL_ERROR_CODE, INVALID_PARAMS_CODE}; use op_revm::{OpHaltReason, OpTransactionError}; +use reth_evm::execute::ProviderError; use reth_optimism_evm::OpBlockExecutionError; -use reth_rpc_eth_api::AsEthApiError; +use reth_rpc_eth_api::{AsEthApiError, TransactionConversionError}; use reth_rpc_eth_types::{error::api::FromEvmHalt, EthApiError}; use reth_rpc_server_types::result::{internal_rpc_err, rpc_err}; use revm::context_interface::result::{EVMError, InvalidTransaction}; @@ -160,12 +161,6 @@ impl From for jsonrpsee_types::error::ErrorObject<'static> } } -impl From for OpEthApiError { - fn from(error: BlockError) -> Self { - Self::Eth(error.into()) - } -} - impl From> for OpEthApiError where T: Into, @@ -193,3 +188,21 @@ impl FromEvmHalt for OpEthApiError { } } } + +impl From for OpEthApiError { + fn from(value: TransactionConversionError) -> Self { + Self::Eth(EthApiError::from(value)) + } +} + +impl From for OpEthApiError { + fn from(value: ProviderError) -> Self { + Self::Eth(EthApiError::from(value)) + } +} + +impl From for OpEthApiError { + fn from(value: BlockError) -> Self { + Self::Eth(EthApiError::from(value)) + } +} diff --git a/crates/optimism/rpc/src/eth/block.rs b/crates/optimism/rpc/src/eth/block.rs index 67211e9d531..12f3c168d3f 100644 --- a/crates/optimism/rpc/src/eth/block.rs +++ b/crates/optimism/rpc/src/eth/block.rs @@ -5,7 +5,7 @@ use alloy_rpc_types_eth::BlockId; use op_alloy_rpc_types::OpTransactionReceipt; use reth_chainspec::ChainSpecProvider; use reth_node_api::BlockBody; -use reth_optimism_chainspec::OpChainSpec; +use reth_optimism_forks::OpHardforks; use reth_optimism_primitives::{OpReceipt, OpTransactionSigned}; use reth_rpc_eth_api::{ helpers::{EthBlocks, LoadBlock, LoadPendingBlock, LoadReceipt, SpawnBlocking}, @@ -24,7 +24,7 @@ where NetworkTypes: RpcTypes, Provider: BlockReader, >, - N: OpNodeCore + HeaderProvider>, + N: OpNodeCore + HeaderProvider>, { async fn block_receipts( &self, diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index aadbc8d9ac2..e9d2efe04f1 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -8,6 +8,7 @@ mod block; mod call; mod pending_block; +use crate::{eth::transaction::OpTxInfoMapper, OpEthApiError, SequencerClient}; use alloy_primitives::U256; use eyre::WrapErr; use op_alloy_network::Optimism; @@ -24,7 +25,7 @@ use reth_rpc_eth_api::{ AddDevSigners, EthApiSpec, EthFees, EthSigner, EthState, LoadBlock, LoadFee, LoadState, SpawnBlocking, Trace, }, - EthApiTypes, FromEvmError, FullEthApiServer, RpcNodeCore, RpcNodeCoreExt, + EthApiTypes, FromEvmError, FullEthApiServer, RpcConverter, RpcNodeCore, RpcNodeCoreExt, }; use reth_rpc_eth_types::{EthStateCache, FeeHistoryCache, GasPriceOracle}; use reth_storage_api::{ @@ -36,9 +37,7 @@ use reth_tasks::{ TaskSpawner, }; use reth_transaction_pool::TransactionPool; -use std::{fmt, marker::PhantomData, sync::Arc}; - -use crate::{OpEthApiError, SequencerClient}; +use std::{fmt, fmt::Formatter, marker::PhantomData, sync::Arc}; /// Adapter for [`EthApiInner`], which holds all the data required to serve core `eth_` API. pub type EthApiNodeBackend = EthApiInner< @@ -68,12 +67,23 @@ pub struct OpEthApi { inner: Arc>, /// Marker for the network types. _nt: PhantomData, + tx_resp_builder: RpcConverter>, } impl OpEthApi { /// Creates a new `OpEthApi`. - pub fn new(eth_api: EthApiNodeBackend, sequencer_client: Option) -> Self { - Self { inner: Arc::new(OpEthApiInner { eth_api, sequencer_client }), _nt: PhantomData } + pub fn new( + eth_api: EthApiNodeBackend, + sequencer_client: Option, + min_suggested_priority_fee: U256, + ) -> Self { + let inner = + Arc::new(OpEthApiInner { eth_api, sequencer_client, min_suggested_priority_fee }); + Self { + inner: inner.clone(), + _nt: PhantomData, + tx_resp_builder: RpcConverter::with_mapper(OpTxInfoMapper::new(inner)), + } } } @@ -84,13 +94,11 @@ where >, { /// Returns a reference to the [`EthApiNodeBackend`]. - #[allow(clippy::missing_const_for_fn)] pub fn eth_api(&self) -> &EthApiNodeBackend { self.inner.eth_api() } /// Returns the configured sequencer client, if any. - #[allow(clippy::missing_const_for_fn)] pub fn sequencer_client(&self) -> Option<&SequencerClient> { self.inner.sequencer_client() } @@ -103,16 +111,18 @@ where impl EthApiTypes for OpEthApi where - Self: Send + Sync + std::fmt::Debug, + Self: Send + Sync + fmt::Debug, N: OpNodeCore, - NetworkT: op_alloy_network::Network + Clone + std::fmt::Debug, + NetworkT: op_alloy_network::Network + Clone + fmt::Debug, + ::Primitives: fmt::Debug, { type Error = OpEthApiError; type NetworkTypes = NetworkT; - type TransactionCompat = Self; + type TransactionCompat = + RpcConverter>; fn tx_resp_builder(&self) -> &Self::TransactionCompat { - self + &self.tx_resp_builder } } @@ -193,6 +203,7 @@ where Self: Send + Sync + Clone + 'static, N: OpNodeCore, NetworkT: op_alloy_network::Network, + ::Primitives: fmt::Debug, { #[inline] fn io_task_spawner(&self) -> impl TaskSpawner { @@ -228,6 +239,12 @@ where fn fee_history_cache(&self) -> &FeeHistoryCache { self.inner.eth_api.fee_history_cache() } + + async fn suggested_priority_fee(&self) -> Result { + let base_tip = self.inner.eth_api.gas_oracle().suggest_tip_cap().await?; + let min_tip = U256::from(self.inner.min_suggested_priority_fee); + Ok(base_tip.max(min_tip)) + } } impl LoadState for OpEthApi @@ -237,6 +254,7 @@ where Pool: TransactionPool, >, NetworkT: op_alloy_network::Network, + ::Primitives: fmt::Debug, { } @@ -290,12 +308,22 @@ impl fmt::Debug for OpEthApi { } /// Container type `OpEthApi` -struct OpEthApiInner { +pub struct OpEthApiInner { /// Gateway to node's core components. eth_api: EthApiNodeBackend, /// Sequencer client, configured to forward submitted transactions to sequencer of given OP /// network. sequencer_client: Option, + /// Minimum priority fee enforced by OP-specific logic. + /// + /// See also + min_suggested_priority_fee: U256, +} + +impl fmt::Debug for OpEthApiInner { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("OpEthApiInner").finish() + } } impl OpEthApiInner { @@ -318,20 +346,32 @@ pub struct OpEthApiBuilder { sequencer_url: Option, /// Headers to use for the sequencer client requests. sequencer_headers: Vec, + /// Minimum suggested priority fee (tip) + min_suggested_priority_fee: u64, /// Marker for network types. _nt: PhantomData, } impl Default for OpEthApiBuilder { fn default() -> Self { - Self { sequencer_url: None, sequencer_headers: Vec::new(), _nt: PhantomData } + Self { + sequencer_url: None, + sequencer_headers: Vec::new(), + min_suggested_priority_fee: 1_000_000, + _nt: PhantomData, + } } } impl OpEthApiBuilder { /// Creates a [`OpEthApiBuilder`] instance from core components. pub const fn new() -> Self { - Self { sequencer_url: None, sequencer_headers: Vec::new(), _nt: PhantomData } + Self { + sequencer_url: None, + sequencer_headers: Vec::new(), + min_suggested_priority_fee: 1_000_000, + _nt: PhantomData, + } } /// With a [`SequencerClient`]. @@ -345,6 +385,12 @@ impl OpEthApiBuilder { self.sequencer_headers = sequencer_headers; self } + + /// With minimum suggested priority fee (tip) + pub const fn with_min_suggested_priority_fee(mut self, min: u64) -> Self { + self.min_suggested_priority_fee = min; + self + } } impl EthApiBuilder for OpEthApiBuilder @@ -356,7 +402,7 @@ where type EthApi = OpEthApi; async fn build_eth_api(self, ctx: EthApiCtx<'_, N>) -> eyre::Result { - let Self { sequencer_url, sequencer_headers, .. } = self; + let Self { sequencer_url, sequencer_headers, min_suggested_priority_fee, .. } = self; let eth_api = reth_rpc::EthApiBuilder::new( ctx.components.provider().clone(), ctx.components.pool().clone(), @@ -383,6 +429,6 @@ where None }; - Ok(OpEthApi::new(eth_api, sequencer_client)) + Ok(OpEthApi::new(eth_api, sequencer_client, U256::from(min_suggested_priority_fee))) } } diff --git a/crates/optimism/rpc/src/eth/pending_block.rs b/crates/optimism/rpc/src/eth/pending_block.rs index b0c13f14b1f..684207fde8a 100644 --- a/crates/optimism/rpc/src/eth/pending_block.rs +++ b/crates/optimism/rpc/src/eth/pending_block.rs @@ -42,14 +42,15 @@ where + StateProviderFactory, Pool: TransactionPool>>, Evm: ConfigureEvm< - Primitives: NodePrimitives< - SignedTx = ProviderTx, - BlockHeader = ProviderHeader, - Receipt = ProviderReceipt, - Block = ProviderBlock, - >, + Primitives = ::Primitives, NextBlockEnvCtx = OpNextBlockEnvAttributes, >, + Primitives: NodePrimitives< + BlockHeader = ProviderHeader, + SignedTx = ProviderTx, + Receipt = ProviderReceipt, + Block = ProviderBlock, + >, >, { #[inline] @@ -64,7 +65,7 @@ where fn next_env_attributes( &self, parent: &SealedHeader>, - ) -> Result<::NextBlockEnvCtx, Self::Error> { + ) -> Result<::NextBlockEnvCtx, Self::Error> { Ok(OpNextBlockEnvAttributes { timestamp: parent.timestamp().saturating_add(12), suggested_fee_recipient: parent.beneficiary(), diff --git a/crates/optimism/rpc/src/eth/receipt.rs b/crates/optimism/rpc/src/eth/receipt.rs index fb08a3c0756..92bd6fb1957 100644 --- a/crates/optimism/rpc/src/eth/receipt.rs +++ b/crates/optimism/rpc/src/eth/receipt.rs @@ -7,7 +7,6 @@ use op_alloy_consensus::{OpDepositReceipt, OpDepositReceiptWithBloom, OpReceiptE use op_alloy_rpc_types::{L1BlockInfo, OpTransactionReceipt, OpTransactionReceiptFields}; use reth_chainspec::ChainSpecProvider; use reth_node_api::{FullNodeComponents, NodeTypes}; -use reth_optimism_chainspec::OpChainSpec; use reth_optimism_evm::RethL1BlockInfo; use reth_optimism_forks::OpHardforks; use reth_optimism_primitives::{OpReceipt, OpTransactionSigned}; @@ -20,7 +19,7 @@ use crate::{OpEthApi, OpEthApiError}; impl LoadReceipt for OpEthApi where Self: Send + Sync, - N: FullNodeComponents>, + N: FullNodeComponents>, Self::Provider: TransactionsProvider + ReceiptProvider, { @@ -115,7 +114,7 @@ impl OpReceiptFieldsBuilder { /// Applies [`L1BlockInfo`](op_revm::L1BlockInfo). pub fn l1_block_info( mut self, - chain_spec: &OpChainSpec, + chain_spec: &impl OpHardforks, tx: &OpTransactionSigned, l1_block_info: &mut op_revm::L1BlockInfo, ) -> Result { @@ -223,7 +222,7 @@ pub struct OpReceiptBuilder { impl OpReceiptBuilder { /// Returns a new builder. pub fn new( - chain_spec: &OpChainSpec, + chain_spec: &impl OpHardforks, transaction: &OpTransactionSigned, meta: TransactionMeta, receipt: &OpReceipt, @@ -341,7 +340,7 @@ mod test { assert!(OP_MAINNET.is_fjord_active_at_timestamp(BLOCK_124665056_TIMESTAMP)); let receipt_meta = OpReceiptFieldsBuilder::new(BLOCK_124665056_TIMESTAMP, 124665056) - .l1_block_info(&OP_MAINNET, &tx_1, &mut l1_block_info) + .l1_block_info(&*OP_MAINNET, &tx_1, &mut l1_block_info) .expect("should parse revm l1 info") .build(); @@ -412,7 +411,7 @@ mod test { l1_block_info.operator_fee_constant = Some(U256::from(2)); let receipt_meta = OpReceiptFieldsBuilder::new(BLOCK_124665056_TIMESTAMP, 124665056) - .l1_block_info(&OP_MAINNET, &tx_1, &mut l1_block_info) + .l1_block_info(&*OP_MAINNET, &tx_1, &mut l1_block_info) .expect("should parse revm l1 info") .build(); @@ -435,7 +434,7 @@ mod test { l1_block_info.operator_fee_constant = Some(U256::ZERO); let receipt_meta = OpReceiptFieldsBuilder::new(BLOCK_124665056_TIMESTAMP, 124665056) - .l1_block_info(&OP_MAINNET, &tx_1, &mut l1_block_info) + .l1_block_info(&*OP_MAINNET, &tx_1, &mut l1_block_info) .expect("should parse revm l1 info") .build(); @@ -469,7 +468,7 @@ mod test { let tx_1 = OpTransactionSigned::decode_2718(&mut &tx[..]).unwrap(); let receipt_meta = OpReceiptFieldsBuilder::new(1730216981, 21713817) - .l1_block_info(&BASE_MAINNET, &tx_1, &mut l1_block_info) + .l1_block_info(&*BASE_MAINNET, &tx_1, &mut l1_block_info) .expect("should parse revm l1 info") .build(); diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index 30123166fcd..30422316ad9 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -1,26 +1,29 @@ //! Loads and formats OP transaction RPC response. -use alloy_consensus::{transaction::Recovered, SignableTransaction}; -use alloy_primitives::{Bytes, Signature, B256}; -use alloy_rpc_types_eth::TransactionInfo; -use op_alloy_consensus::{ - transaction::{OpDepositInfo, OpTransactionInfo}, - OpTxEnvelope, +use crate::{ + eth::{OpEthApiInner, OpNodeCore}, + OpEthApi, OpEthApiError, SequencerClient, }; -use op_alloy_rpc_types::{OpTransactionRequest, Transaction}; +use alloy_primitives::{Bytes, B256}; +use alloy_rpc_types_eth::TransactionInfo; +use op_alloy_consensus::{transaction::OpTransactionInfo, OpTxEnvelope}; use reth_node_api::FullNodeComponents; -use reth_optimism_primitives::{OpReceipt, OpTransactionSigned}; +use reth_optimism_primitives::DepositReceipt; use reth_rpc_eth_api::{ helpers::{EthSigner, EthTransactions, LoadTransaction, SpawnBlocking}, - EthApiTypes, FromEthApiError, FullEthApiTypes, RpcNodeCore, RpcNodeCoreExt, TransactionCompat, + try_into_op_tx_info, EthApiTypes, FromEthApiError, FullEthApiTypes, RpcNodeCore, + RpcNodeCoreExt, TxInfoMapper, }; -use reth_rpc_eth_types::{utils::recover_raw_transaction, EthApiError}; +use reth_rpc_eth_types::utils::recover_raw_transaction; use reth_storage_api::{ - BlockReader, BlockReaderIdExt, ProviderTx, ReceiptProvider, TransactionsProvider, + errors::ProviderError, BlockReader, BlockReaderIdExt, ProviderTx, ReceiptProvider, + TransactionsProvider, }; use reth_transaction_pool::{PoolTransaction, TransactionOrigin, TransactionPool}; - -use crate::{eth::OpNodeCore, OpEthApi, OpEthApiError, SequencerClient}; +use std::{ + fmt::{Debug, Formatter}, + sync::Arc, +}; impl EthTransactions for OpEthApi where @@ -86,58 +89,39 @@ where } } -impl TransactionCompat for OpEthApi -where - N: FullNodeComponents>, -{ - type Transaction = Transaction; - type Error = OpEthApiError; - - fn fill( - &self, - tx: Recovered, - tx_info: TransactionInfo, - ) -> Result { - let tx = tx.convert::(); - let mut deposit_receipt_version = None; - let mut deposit_nonce = None; - - if tx.is_deposit() { - // for depost tx we need to fetch the receipt - self.inner - .eth_api - .provider() - .receipt_by_hash(tx.tx_hash()) - .map_err(Self::Error::from_eth_err)? - .inspect(|receipt| { - if let OpReceipt::Deposit(receipt) = receipt { - deposit_receipt_version = receipt.deposit_receipt_version; - deposit_nonce = receipt.deposit_nonce; - } - }); - } - let deposit_meta = OpDepositInfo { deposit_nonce, deposit_receipt_version }; - let op_tx_info = OpTransactionInfo::new(tx_info, deposit_meta); +/// Optimism implementation of [`TxInfoMapper`]. +/// +/// For deposits, receipt is fetched to extract `deposit_nonce` and `deposit_receipt_version`. +/// Otherwise, it works like regular Ethereum implementation, i.e. uses [`TransactionInfo`]. +#[derive(Clone)] +pub struct OpTxInfoMapper(Arc>); - Ok(Transaction::from_transaction(tx, op_tx_info)) +impl Debug for OpTxInfoMapper { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.debug_struct("OpTxInfoMapper").finish() } +} - fn build_simulate_v1_transaction( - &self, - request: alloy_rpc_types_eth::TransactionRequest, - ) -> Result { - let request: OpTransactionRequest = request.into(); - let Ok(tx) = request.build_typed_tx() else { - return Err(OpEthApiError::Eth(EthApiError::TransactionConversionError)) - }; - - // Create an empty signature for the transaction. - let signature = Signature::new(Default::default(), Default::default(), false); - Ok(tx.into_signed(signature).into()) +impl OpTxInfoMapper { + /// Creates [`OpTxInfoMapper`] that uses [`ReceiptProvider`] borrowed from given `eth_api`. + pub const fn new(eth_api: Arc>) -> Self { + Self(eth_api) } +} - fn otterscan_api_truncate_input(tx: &mut Self::Transaction) { - let input = tx.inner.inner.inner_mut().input_mut(); - *input = input.slice(..4); +impl TxInfoMapper<&OpTxEnvelope> for OpTxInfoMapper +where + N: FullNodeComponents, + N::Provider: ReceiptProvider, +{ + type Out = OpTransactionInfo; + type Err = ProviderError; + + fn try_map( + &self, + tx: &OpTxEnvelope, + tx_info: TransactionInfo, + ) -> Result { + try_into_op_tx_info(self.0.eth_api.provider(), tx, tx_info) } } diff --git a/crates/optimism/rpc/src/miner.rs b/crates/optimism/rpc/src/miner.rs index f18b815f255..a4de556ea13 100644 --- a/crates/optimism/rpc/src/miner.rs +++ b/crates/optimism/rpc/src/miner.rs @@ -3,6 +3,7 @@ use alloy_primitives::U64; use jsonrpsee_core::{async_trait, RpcResult}; pub use op_alloy_rpc_jsonrpsee::traits::MinerApiExtServer; +use reth_metrics::{metrics::Gauge, Metrics}; use reth_optimism_payload_builder::config::OpDAConfig; use tracing::debug; @@ -11,13 +12,14 @@ use tracing::debug; #[derive(Debug, Clone)] pub struct OpMinerExtApi { da_config: OpDAConfig, + metrics: OpMinerMetrics, } impl OpMinerExtApi { /// Instantiate the miner API extension with the given, sharable data availability /// configuration. - pub const fn new(da_config: OpDAConfig) -> Self { - Self { da_config } + pub fn new(da_config: OpDAConfig) -> Self { + Self { da_config, metrics: OpMinerMetrics::default() } } } @@ -27,6 +29,34 @@ impl MinerApiExtServer for OpMinerExtApi { async fn set_max_da_size(&self, max_tx_size: U64, max_block_size: U64) -> RpcResult { debug!(target: "rpc", "Setting max DA size: tx={}, block={}", max_tx_size, max_block_size); self.da_config.set_max_da_size(max_tx_size.to(), max_block_size.to()); + + self.metrics.set_max_da_tx_size(max_tx_size.to()); + self.metrics.set_max_da_block_size(max_block_size.to()); + Ok(true) } } + +/// Optimism miner metrics +#[derive(Metrics, Clone)] +#[metrics(scope = "optimism_rpc.miner")] +pub struct OpMinerMetrics { + /// Max DA tx size set on the miner + max_da_tx_size: Gauge, + /// Max DA block size set on the miner + max_da_block_size: Gauge, +} + +impl OpMinerMetrics { + /// Sets the max DA tx size gauge value + #[inline] + pub fn set_max_da_tx_size(&self, size: u64) { + self.max_da_tx_size.set(size as f64); + } + + /// Sets the max DA block size gauge value + #[inline] + pub fn set_max_da_block_size(&self, size: u64) { + self.max_da_block_size.set(size as f64); + } +} diff --git a/crates/optimism/rpc/src/witness.rs b/crates/optimism/rpc/src/witness.rs index c32f482bbe6..bc86e93f91c 100644 --- a/crates/optimism/rpc/src/witness.rs +++ b/crates/optimism/rpc/src/witness.rs @@ -7,8 +7,8 @@ use op_alloy_rpc_types_engine::OpPayloadAttributes; use reth_chainspec::ChainSpecProvider; use reth_evm::ConfigureEvm; use reth_node_api::NodePrimitives; -use reth_optimism_chainspec::OpChainSpec; use reth_optimism_evm::OpNextBlockEnvAttributes; +use reth_optimism_forks::OpHardforks; use reth_optimism_payload_builder::{OpPayloadBuilder, OpPayloadPrimitives}; use reth_optimism_txpool::OpPooledTx; use reth_primitives_traits::SealedHeader; @@ -44,10 +44,14 @@ impl OpDebugWitnessApi { impl OpDebugWitnessApi where EvmConfig: ConfigureEvm, - Provider: NodePrimitivesProvider + BlockReaderIdExt

, + Provider: NodePrimitivesProvider> + + BlockReaderIdExt, { /// Fetches the parent header by hash. - fn parent_header(&self, parent_block_hash: B256) -> ProviderResult { + fn parent_header( + &self, + parent_block_hash: B256, + ) -> ProviderResult> { self.inner .provider .sealed_header_by_hash(parent_block_hash)? @@ -62,10 +66,10 @@ where Pool: TransactionPool< Transaction: OpPooledTx::SignedTx>, > + 'static, - Provider: BlockReaderIdExt
+ Provider: BlockReaderIdExt
::BlockHeader> + NodePrimitivesProvider + StateProviderFactory - + ChainSpecProvider + + ChainSpecProvider + Clone + 'static, EvmConfig: ConfigureEvm diff --git a/crates/optimism/storage/Cargo.toml b/crates/optimism/storage/Cargo.toml index 0bb7c3a0bd3..56ced8d74e1 100644 --- a/crates/optimism/storage/Cargo.toml +++ b/crates/optimism/storage/Cargo.toml @@ -12,11 +12,13 @@ workspace = true [dependencies] # reth +reth-node-api.workspace = true reth-chainspec.workspace = true reth-primitives-traits.workspace = true -reth-optimism-forks.workspace = true reth-optimism-primitives = { workspace = true, features = ["serde", "reth-codec"] } reth-storage-api = { workspace = true, features = ["db-api"] } +reth-db-api.workspace = true +reth-provider.workspace = true # ethereum alloy-primitives.workspace = true @@ -37,7 +39,6 @@ std = [ "reth-stages-types/std", "alloy-consensus/std", "reth-chainspec/std", - "reth-optimism-forks/std", "reth-optimism-primitives/std", "reth-primitives-traits/std", ] diff --git a/crates/optimism/storage/src/chain.rs b/crates/optimism/storage/src/chain.rs index 5df84eeae46..424a773d49a 100644 --- a/crates/optimism/storage/src/chain.rs +++ b/crates/optimism/storage/src/chain.rs @@ -3,21 +3,60 @@ use alloy_consensus::Header; use alloy_primitives::BlockNumber; use core::marker::PhantomData; use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks}; -use reth_optimism_forks::OpHardforks; +use reth_db_api::transaction::{DbTx, DbTxMut}; +use reth_node_api::{FullNodePrimitives, FullSignedTx}; use reth_optimism_primitives::OpTransactionSigned; use reth_primitives_traits::{Block, FullBlockHeader, SignedTransaction}; +use reth_provider::{ + providers::{ChainStorage, NodeTypesForProvider}, + DatabaseProvider, +}; use reth_storage_api::{ - errors::ProviderResult, BlockBodyReader, BlockBodyWriter, DBProvider, ReadBodyInput, - StorageLocation, + errors::ProviderResult, BlockBodyReader, BlockBodyWriter, ChainStorageReader, + ChainStorageWriter, DBProvider, ReadBodyInput, StorageLocation, }; /// Optimism storage implementation. #[derive(Debug, Clone, Copy)] -pub struct OptStorage(PhantomData<(T, H)>); +pub struct OpStorage(PhantomData<(T, H)>); + +impl Default for OpStorage { + fn default() -> Self { + Self(Default::default()) + } +} + +impl ChainStorage for OpStorage +where + T: FullSignedTx, + H: FullBlockHeader, + N: FullNodePrimitives< + Block = alloy_consensus::Block, + BlockHeader = H, + BlockBody = alloy_consensus::BlockBody, + SignedTx = T, + >, +{ + fn reader(&self) -> impl ChainStorageReader, N> + where + TX: DbTx + 'static, + Types: NodeTypesForProvider, + { + self + } + + fn writer(&self) -> impl ChainStorageWriter, N> + where + TX: DbTxMut + DbTx + 'static, + Types: NodeTypesForProvider, + { + self + } +} -impl BlockBodyWriter> - for OptStorage +impl BlockBodyWriter> for OpStorage where + Provider: DBProvider, T: SignedTransaction, H: FullBlockHeader, { @@ -42,9 +81,9 @@ where } } -impl BlockBodyReader for OptStorage +impl BlockBodyReader for OpStorage where - Provider: ChainSpecProvider + DBProvider, + Provider: ChainSpecProvider + DBProvider, T: SignedTransaction, H: FullBlockHeader, { diff --git a/crates/optimism/storage/src/lib.rs b/crates/optimism/storage/src/lib.rs index 8ef3d88d239..adefb646f6e 100644 --- a/crates/optimism/storage/src/lib.rs +++ b/crates/optimism/storage/src/lib.rs @@ -12,7 +12,7 @@ extern crate alloc; mod chain; -pub use chain::OptStorage; +pub use chain::OpStorage; #[cfg(test)] mod tests { diff --git a/crates/optimism/txpool/src/supervisor/client.rs b/crates/optimism/txpool/src/supervisor/client.rs index 5b6c65eeb28..4cc67685b59 100644 --- a/crates/optimism/txpool/src/supervisor/client.rs +++ b/crates/optimism/txpool/src/supervisor/client.rs @@ -113,6 +113,7 @@ impl SupervisorClient { ) .await { + self.inner.metrics.increment_metrics_for_error(&err); trace!(target: "txpool", hash=%hash, err=%err, "Cross chain transaction invalid"); return Some(Err(InvalidCrossTx::ValidationError(err))); } diff --git a/crates/optimism/txpool/src/supervisor/metrics.rs b/crates/optimism/txpool/src/supervisor/metrics.rs index 1ccb2178916..0c66d0039ac 100644 --- a/crates/optimism/txpool/src/supervisor/metrics.rs +++ b/crates/optimism/txpool/src/supervisor/metrics.rs @@ -1,6 +1,11 @@ //! Optimism supervisor and sequencer metrics -use reth_metrics::{metrics::Histogram, Metrics}; +use crate::supervisor::InteropTxValidatorError; +use op_alloy_rpc_types::InvalidInboxEntry; +use reth_metrics::{ + metrics::{Counter, Histogram}, + Metrics, +}; use std::time::Duration; /// Optimism supervisor metrics @@ -9,6 +14,29 @@ use std::time::Duration; pub struct SupervisorMetrics { /// How long it takes to query the supervisor in the Optimism transaction pool pub(crate) supervisor_query_latency: Histogram, + + /// Counter for the number of times data was skipped + pub(crate) skipped_data_count: Counter, + /// Counter for the number of times an unknown chain was encountered + pub(crate) unknown_chain_count: Counter, + /// Counter for the number of times conflicting data was encountered + pub(crate) conflicting_data_count: Counter, + /// Counter for the number of times ineffective data was encountered + pub(crate) ineffective_data_count: Counter, + /// Counter for the number of times data was out of order + pub(crate) out_of_order_count: Counter, + /// Counter for the number of times data was awaiting replacement + pub(crate) awaiting_replacement_count: Counter, + /// Counter for the number of times data was out of scope + pub(crate) out_of_scope_count: Counter, + /// Counter for the number of times there was no parent for the first block + pub(crate) no_parent_for_first_block_count: Counter, + /// Counter for the number of times future data was encountered + pub(crate) future_data_count: Counter, + /// Counter for the number of times data was missed + pub(crate) missed_data_count: Counter, + /// Counter for the number of times data corruption was encountered + pub(crate) data_corruption_count: Counter, } impl SupervisorMetrics { @@ -17,6 +45,30 @@ impl SupervisorMetrics { pub fn record_supervisor_query(&self, duration: Duration) { self.supervisor_query_latency.record(duration.as_secs_f64()); } + + /// Increments the metrics for the given error + pub fn increment_metrics_for_error(&self, error: &InteropTxValidatorError) { + if let InteropTxValidatorError::InvalidEntry(inner) = error { + match inner { + InvalidInboxEntry::SkippedData => self.skipped_data_count.increment(1), + InvalidInboxEntry::UnknownChain => self.unknown_chain_count.increment(1), + InvalidInboxEntry::ConflictingData => self.conflicting_data_count.increment(1), + InvalidInboxEntry::IneffectiveData => self.ineffective_data_count.increment(1), + InvalidInboxEntry::OutOfOrder => self.out_of_order_count.increment(1), + InvalidInboxEntry::AwaitingReplacement => { + self.awaiting_replacement_count.increment(1) + } + InvalidInboxEntry::OutOfScope => self.out_of_scope_count.increment(1), + InvalidInboxEntry::NoParentForFirstBlock => { + self.no_parent_for_first_block_count.increment(1) + } + InvalidInboxEntry::FutureData => self.future_data_count.increment(1), + InvalidInboxEntry::MissedData => self.missed_data_count.increment(1), + InvalidInboxEntry::DataCorruption => self.data_corruption_count.increment(1), + InvalidInboxEntry::UninitializedChainDatabase => {} + } + } + } } /// Optimism sequencer metrics diff --git a/crates/optimism/txpool/src/transaction.rs b/crates/optimism/txpool/src/transaction.rs index 053ba64f6fb..6cbc645fe51 100644 --- a/crates/optimism/txpool/src/transaction.rs +++ b/crates/optimism/txpool/src/transaction.rs @@ -69,14 +69,14 @@ impl OpPooledTransaction { } } - /// Returns the estimated compressed size of a transaction in bytes scaled by 1e6. + /// Returns the estimated compressed size of a transaction in bytes. /// This value is computed based on the following formula: - /// `max(minTransactionSize, intercept + fastlzCoef*fastlzSize)` + /// `max(minTransactionSize, intercept + fastlzCoef*fastlzSize) / 1e6` /// Uses cached EIP-2718 encoded bytes to avoid recomputing the encoding for each estimation. pub fn estimated_compressed_size(&self) -> u64 { *self .estimated_tx_compressed_size - .get_or_init(|| op_alloy_flz::tx_estimated_size_fjord(self.encoded_2718())) + .get_or_init(|| op_alloy_flz::tx_estimated_size_fjord_bytes(self.encoded_2718())) } /// Returns lazily computed EIP-2718 encoded bytes of the transaction. diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index 048a55b8486..470e34bee33 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -726,7 +726,7 @@ impl BuildOutcome { } /// Applies a fn on the current payload. - pub(crate) fn map_payload(self, f: F) -> BuildOutcome

+ pub fn map_payload(self, f: F) -> BuildOutcome

where F: FnOnce(Payload) -> P, { diff --git a/crates/primitives-traits/src/block/body.rs b/crates/primitives-traits/src/block/body.rs index 69d6c0089d3..688431940e7 100644 --- a/crates/primitives-traits/src/block/body.rs +++ b/crates/primitives-traits/src/block/body.rs @@ -67,6 +67,13 @@ pub trait BlockBody: self.transactions_iter().find(|tx| tx.tx_hash() == hash) } + /// Returns true if the block body contains a transaction with the given hash. + /// + /// This is a convenience function for `transaction_by_hash().is_some()` + fn contains_transaction(&self, hash: &B256) -> bool { + self.transaction_by_hash(hash).is_some() + } + /// Clones the transactions in the block. /// /// This is a convenience function for `transactions().to_vec()` diff --git a/crates/primitives-traits/src/constants/mod.rs b/crates/primitives-traits/src/constants/mod.rs index b5bf127fdf3..7df2c017b30 100644 --- a/crates/primitives-traits/src/constants/mod.rs +++ b/crates/primitives-traits/src/constants/mod.rs @@ -17,6 +17,9 @@ pub const MAXIMUM_GAS_LIMIT_BLOCK: u64 = 2u64.pow(63) - 1; /// The bound divisor of the gas limit, used in update calculations. pub const GAS_LIMIT_BOUND_DIVISOR: u64 = 1024; +/// Maximum transaction gas limit as defined by [EIP-7825](https://eips.ethereum.org/EIPS/eip-7825) activated in `Osaka` hardfork. +pub const MAX_TX_GAS_LIMIT_OSAKA: u64 = 30_000_000; + /// The number of blocks to unwind during a reorg that already became a part of canonical chain. /// /// In reality, the node can end up in this particular situation very rarely. It would happen only diff --git a/crates/primitives-traits/src/extended.rs b/crates/primitives-traits/src/extended.rs index 4fdfeaf9d86..94c35d0190b 100644 --- a/crates/primitives-traits/src/extended.rs +++ b/crates/primitives-traits/src/extended.rs @@ -278,13 +278,12 @@ mod op { use op_alloy_consensus::{OpPooledTransaction, OpTxEnvelope}; impl TryFrom> for Extended { - type Error = OpTxEnvelope; + type Error = >::Error; fn try_from(value: Extended) -> Result { match value { Extended::BuiltIn(tx) => { - let converted_tx: OpPooledTransaction = - tx.clone().try_into().map_err(|_| tx)?; + let converted_tx: OpPooledTransaction = tx.try_into()?; Ok(Self::BuiltIn(converted_tx)) } Extended::Other(tx) => Ok(Self::Other(tx)), @@ -298,6 +297,15 @@ mod op { } } + impl From> for Extended { + fn from(tx: Extended) -> Self { + match tx { + Extended::BuiltIn(tx) => Self::BuiltIn(tx.into()), + Extended::Other(tx) => Self::Other(tx), + } + } + } + impl TryFrom> for OpPooledTransaction { type Error = ValueError; diff --git a/crates/primitives-traits/src/receipt.rs b/crates/primitives-traits/src/receipt.rs index a8d632c3569..3e2e64ad923 100644 --- a/crates/primitives-traits/src/receipt.rs +++ b/crates/primitives-traits/src/receipt.rs @@ -5,6 +5,7 @@ use alloc::vec::Vec; use alloy_consensus::{ Eip2718EncodableReceipt, RlpDecodableReceipt, RlpEncodableReceipt, TxReceipt, Typed2718, }; +use alloy_rlp::{Decodable, Encodable}; use core::fmt; /// Helper trait that unifies all behaviour required by receipt to support full node operations. @@ -23,6 +24,8 @@ pub trait Receipt: + TxReceipt + RlpEncodableReceipt + RlpDecodableReceipt + + Encodable + + Decodable + Eip2718EncodableReceipt + Typed2718 + MaybeSerde diff --git a/crates/primitives-traits/src/serde_bincode_compat.rs b/crates/primitives-traits/src/serde_bincode_compat.rs index 8b3ca7a594b..d08a05ecdfd 100644 --- a/crates/primitives-traits/src/serde_bincode_compat.rs +++ b/crates/primitives-traits/src/serde_bincode_compat.rs @@ -242,4 +242,17 @@ mod block_bincode { repr.into() } } + + #[cfg(feature = "scroll-alloy-traits")] + impl SerdeBincodeCompat for scroll_alloy_consensus::ScrollTxEnvelope { + type BincodeRepr<'a> = scroll_alloy_consensus::serde_bincode_compat::ScrollTxEnvelope<'a>; + + fn as_repr(&self) -> Self::BincodeRepr<'_> { + self.into() + } + + fn from_repr(repr: Self::BincodeRepr<'_>) -> Self { + repr.into() + } + } } diff --git a/crates/primitives-traits/src/size.rs b/crates/primitives-traits/src/size.rs index fb5f6ae1427..21b192b6bc7 100644 --- a/crates/primitives-traits/src/size.rs +++ b/crates/primitives-traits/src/size.rs @@ -178,6 +178,7 @@ mod op { #[cfg(feature = "scroll-alloy-traits")] mod scroll { use super::*; + use scroll_alloy_consensus::ScrollTxEnvelope; impl InMemorySize for scroll_alloy_consensus::ScrollTypedTransaction { fn size(&self) -> usize { @@ -201,6 +202,18 @@ mod scroll { } } } + + impl InMemorySize for ScrollTxEnvelope { + fn size(&self) -> usize { + match self { + Self::Legacy(tx) => tx.size(), + Self::Eip2930(tx) => tx.size(), + Self::Eip1559(tx) => tx.size(), + Self::Eip7702(tx) => tx.size(), + Self::L1Message(tx) => tx.size(), + } + } + } } #[cfg(test)] diff --git a/crates/primitives-traits/src/transaction/error.rs b/crates/primitives-traits/src/transaction/error.rs index d155656c0e6..b87405e4abd 100644 --- a/crates/primitives-traits/src/transaction/error.rs +++ b/crates/primitives-traits/src/transaction/error.rs @@ -61,6 +61,9 @@ pub enum InvalidTransactionError { /// Thrown if the sender of a transaction is a contract. #[error("transaction signer has bytecode set")] SignerAccountHasBytecode, + /// Thrown post Osaka if gas limit is too high. + #[error("gas limit too high")] + GasLimitTooHigh, } /// Represents error variants that can happen when trying to convert a transaction to pooled diff --git a/crates/primitives-traits/src/transaction/signed.rs b/crates/primitives-traits/src/transaction/signed.rs index 820fcde5f5a..1bbca03d74c 100644 --- a/crates/primitives-traits/src/transaction/signed.rs +++ b/crates/primitives-traits/src/transaction/signed.rs @@ -224,27 +224,60 @@ mod op { } #[cfg(feature = "scroll-alloy-traits")] -impl SignedTransaction for scroll_alloy_consensus::ScrollPooledTransaction { - fn tx_hash(&self) -> &TxHash { - match self { - Self::Legacy(tx) => tx.hash(), - Self::Eip2930(tx) => tx.hash(), - Self::Eip1559(tx) => tx.hash(), - Self::Eip7702(tx) => tx.hash(), +mod scroll { + use super::*; + use scroll_alloy_consensus::{ScrollPooledTransaction, ScrollTxEnvelope}; + + impl SignedTransaction for ScrollPooledTransaction { + fn tx_hash(&self) -> &TxHash { + match self { + Self::Legacy(tx) => tx.hash(), + Self::Eip2930(tx) => tx.hash(), + Self::Eip1559(tx) => tx.hash(), + Self::Eip7702(tx) => tx.hash(), + } + } + + fn recover_signer_unchecked_with_buf( + &self, + buf: &mut Vec, + ) -> Result { + match self { + Self::Legacy(tx) => tx.tx().encode_for_signing(buf), + Self::Eip2930(tx) => tx.tx().encode_for_signing(buf), + Self::Eip1559(tx) => tx.tx().encode_for_signing(buf), + Self::Eip7702(tx) => tx.tx().encode_for_signing(buf), + } + let signature_hash = keccak256(buf); + recover_signer_unchecked(self.signature(), signature_hash) } } - fn recover_signer_unchecked_with_buf( - &self, - buf: &mut Vec, - ) -> Result { - match self { - Self::Legacy(tx) => tx.tx().encode_for_signing(buf), - Self::Eip2930(tx) => tx.tx().encode_for_signing(buf), - Self::Eip1559(tx) => tx.tx().encode_for_signing(buf), - Self::Eip7702(tx) => tx.tx().encode_for_signing(buf), + impl SignedTransaction for ScrollTxEnvelope { + fn tx_hash(&self) -> &TxHash { + match self { + Self::Legacy(tx) => tx.hash(), + Self::Eip2930(tx) => tx.hash(), + Self::Eip1559(tx) => tx.hash(), + Self::Eip7702(tx) => tx.hash(), + Self::L1Message(tx) => tx.hash_ref(), + } + } + + fn recover_signer_unchecked_with_buf( + &self, + buf: &mut Vec, + ) -> Result { + match self { + Self::Legacy(tx) => tx.tx().encode_for_signing(buf), + Self::Eip2930(tx) => tx.tx().encode_for_signing(buf), + Self::Eip1559(tx) => tx.tx().encode_for_signing(buf), + Self::Eip7702(tx) => tx.tx().encode_for_signing(buf), + Self::L1Message(tx) => return Ok(tx.sender), + } + let signature_hash = keccak256(buf); + let signature = self.signature().expect("handled L1 message in previous match"); + recover_signer_unchecked(&signature, signature_hash) } - let signature_hash = keccak256(buf); - recover_signer_unchecked(self.signature(), signature_hash) } } diff --git a/crates/ress/provider/src/lib.rs b/crates/ress/provider/src/lib.rs index 5783e3a9364..41318ebaaf1 100644 --- a/crates/ress/provider/src/lib.rs +++ b/crates/ress/provider/src/lib.rs @@ -11,7 +11,9 @@ use alloy_consensus::BlockHeader as _; use alloy_primitives::{Bytes, B256}; use parking_lot::Mutex; -use reth_chain_state::{ExecutedBlock, ExecutedBlockWithTrieUpdates, MemoryOverlayStateProvider}; +use reth_chain_state::{ + ExecutedBlock, ExecutedBlockWithTrieUpdates, ExecutedTrieUpdates, MemoryOverlayStateProvider, +}; use reth_errors::{ProviderError, ProviderResult}; use reth_ethereum_primitives::{Block, BlockBody, EthPrimitives}; use reth_evm::{execute::Executor, ConfigureEvm}; @@ -128,7 +130,7 @@ where recovered_block: invalid, ..Default::default() }, - ..Default::default() + trie: ExecutedTrieUpdates::empty(), }); } } @@ -164,7 +166,7 @@ where let witness_state_provider = self.provider.state_by_block_hash(ancestor_hash)?; let mut trie_input = TrieInput::default(); for block in executed_ancestors.into_iter().rev() { - trie_input.append_cached_ref(&block.trie, &block.hashed_state); + trie_input.append_cached_ref(block.trie.as_ref().unwrap(), &block.hashed_state); } let mut hashed_state = db.into_state(); hashed_state.extend(record.hashed_state); diff --git a/crates/ress/provider/src/pending_state.rs b/crates/ress/provider/src/pending_state.rs index 47eb9996f9a..1c4c81e29e7 100644 --- a/crates/ress/provider/src/pending_state.rs +++ b/crates/ress/provider/src/pending_state.rs @@ -123,6 +123,7 @@ pub async fn maintain_pending_state

( } // ignore BeaconConsensusEngineEvent::CanonicalChainCommitted(_, _) | + BeaconConsensusEngineEvent::BlockReceived(_) | BeaconConsensusEngineEvent::LiveSyncProgress(_) => (), } } diff --git a/crates/revm/Cargo.toml b/crates/revm/Cargo.toml index 1e5d36ec22a..95ffe22f05a 100644 --- a/crates/revm/Cargo.toml +++ b/crates/revm/Cargo.toml @@ -54,5 +54,6 @@ serde = [ "reth-trie?/serde", "reth-ethereum-forks/serde", "reth-primitives-traits/serde", + "reth-storage-api/serde", ] portable = ["revm/portable"] diff --git a/crates/rpc/rpc-api/Cargo.toml b/crates/rpc/rpc-api/Cargo.toml index 4eb9a6e653c..7d170d342f2 100644 --- a/crates/rpc/rpc-api/Cargo.toml +++ b/crates/rpc/rpc-api/Cargo.toml @@ -16,6 +16,8 @@ workspace = true reth-rpc-eth-api.workspace = true reth-engine-primitives.workspace = true reth-network-peers.workspace = true +reth-trie-common.workspace = true +reth-chain-state.workspace = true # ethereum alloy-eips.workspace = true diff --git a/crates/rpc/rpc-api/src/debug.rs b/crates/rpc/rpc-api/src/debug.rs index 961090c34d0..8aefda4767b 100644 --- a/crates/rpc/rpc-api/src/debug.rs +++ b/crates/rpc/rpc-api/src/debug.rs @@ -7,6 +7,7 @@ use alloy_rpc_types_trace::geth::{ BlockTraceResult, GethDebugTracingCallOptions, GethDebugTracingOptions, GethTrace, TraceResult, }; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; +use reth_trie_common::{updates::TrieUpdates, HashedPostState}; /// Debug rpc interface. #[cfg_attr(not(feature = "client"), rpc(server, namespace = "debug"))] @@ -359,6 +360,15 @@ pub trait DebugApi { #[method(name = "startGoTrace")] async fn debug_start_go_trace(&self, file: String) -> RpcResult<()>; + /// Returns the state root of the `HashedPostState` on top of the state for the given block with + /// trie updates. + #[method(name = "stateRootWithUpdates")] + async fn debug_state_root_with_updates( + &self, + hashed_state: HashedPostState, + block_id: Option, + ) -> RpcResult<(B256, TrieUpdates)>; + /// Stops an ongoing CPU profile. #[method(name = "stopCPUProfile")] async fn debug_stop_cpu_profile(&self) -> RpcResult<()>; diff --git a/crates/rpc/rpc-api/src/otterscan.rs b/crates/rpc/rpc-api/src/otterscan.rs index ecff499abba..c8651e608f5 100644 --- a/crates/rpc/rpc-api/src/otterscan.rs +++ b/crates/rpc/rpc-api/src/otterscan.rs @@ -1,4 +1,4 @@ -use alloy_eips::BlockId; +use alloy_eips::{eip1898::LenientBlockNumberOrTag, BlockId}; use alloy_json_rpc::RpcObject; use alloy_primitives::{Address, Bytes, TxHash, B256}; use alloy_rpc_types_trace::otterscan::{ @@ -19,7 +19,10 @@ pub trait Otterscan { /// /// Ref: #[method(name = "getHeaderByNumber", aliases = ["erigon_getHeaderByNumber"])] - async fn get_header_by_number(&self, block_number: u64) -> RpcResult>; + async fn get_header_by_number( + &self, + block_number: LenientBlockNumberOrTag, + ) -> RpcResult>; /// Check if a certain address contains a deployed code. #[method(name = "hasCode")] @@ -47,7 +50,10 @@ pub trait Otterscan { /// Tailor-made and expanded version of `eth_getBlockByNumber` for block details page in /// Otterscan. #[method(name = "getBlockDetails")] - async fn get_block_details(&self, block_number: u64) -> RpcResult>; + async fn get_block_details( + &self, + block_number: LenientBlockNumberOrTag, + ) -> RpcResult>; /// Tailor-made and expanded version of `eth_getBlockByHash` for block details page in /// Otterscan. @@ -58,7 +64,7 @@ pub trait Otterscan { #[method(name = "getBlockTransactions")] async fn get_block_transactions( &self, - block_number: u64, + block_number: LenientBlockNumberOrTag, page_number: usize, page_size: usize, ) -> RpcResult>; @@ -68,7 +74,7 @@ pub trait Otterscan { async fn search_transactions_before( &self, address: Address, - block_number: u64, + block_number: LenientBlockNumberOrTag, page_size: usize, ) -> RpcResult; @@ -77,7 +83,7 @@ pub trait Otterscan { async fn search_transactions_after( &self, address: Address, - block_number: u64, + block_number: LenientBlockNumberOrTag, page_size: usize, ) -> RpcResult; diff --git a/crates/rpc/rpc-api/src/reth.rs b/crates/rpc/rpc-api/src/reth.rs index 0589ffc00ce..cc72705fa54 100644 --- a/crates/rpc/rpc-api/src/reth.rs +++ b/crates/rpc/rpc-api/src/reth.rs @@ -1,6 +1,7 @@ use alloy_eips::BlockId; use alloy_primitives::{Address, U256}; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; +use reth_chain_state::CanonStateNotification; use std::collections::HashMap; /// Reth API namespace for reth-specific methods @@ -13,4 +14,12 @@ pub trait RethApi { &self, block_id: BlockId, ) -> RpcResult>; + + /// Subscribe to json `ChainNotifications` + #[subscription( + name = "subscribeChainNotifications", + unsubscribe = "unsubscribeChainNotifications", + item = CanonStateNotification + )] + async fn reth_subscribe_chain_notifications(&self) -> jsonrpsee::core::SubscriptionResult; } diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 80a3cef3486..d0623ea4a94 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -694,7 +694,8 @@ where Receipt = N::Receipt, Transaction = N::SignedTx, > + AccountReader - + ChangeSetReader, + + ChangeSetReader + + CanonStateSubscriptions, Network: NetworkInfo + Peers + Clone + 'static, EthApi: EthApiServer< RpcTransaction, @@ -1942,6 +1943,54 @@ impl TransportRpcModules { self.replace_ipc(other)?; Ok(true) } + + /// Adds or replaces given [`Methods`] in http module. + /// + /// Returns `true` if the methods were replaced or added, `false` otherwise. + pub fn add_or_replace_http( + &mut self, + other: impl Into, + ) -> Result { + let other = other.into(); + self.remove_http_methods(other.method_names()); + self.merge_http(other) + } + + /// Adds or replaces given [`Methods`] in ws module. + /// + /// Returns `true` if the methods were replaced or added, `false` otherwise. + pub fn add_or_replace_ws( + &mut self, + other: impl Into, + ) -> Result { + let other = other.into(); + self.remove_ws_methods(other.method_names()); + self.merge_ws(other) + } + + /// Adds or replaces given [`Methods`] in ipc module. + /// + /// Returns `true` if the methods were replaced or added, `false` otherwise. + pub fn add_or_replace_ipc( + &mut self, + other: impl Into, + ) -> Result { + let other = other.into(); + self.remove_ipc_methods(other.method_names()); + self.merge_ipc(other) + } + + /// Adds or replaces given [`Methods`] in all configured network modules. + pub fn add_or_replace_configured( + &mut self, + other: impl Into, + ) -> Result<(), RegisterMethodError> { + let other = other.into(); + self.add_or_replace_http(other.clone())?; + self.add_or_replace_ws(other.clone())?; + self.add_or_replace_ipc(other)?; + Ok(()) + } } /// Returns the methods installed in the given module that match the given filter. diff --git a/crates/rpc/rpc-builder/tests/it/http.rs b/crates/rpc/rpc-builder/tests/it/http.rs index 507858c7f91..a32b208d939 100644 --- a/crates/rpc/rpc-builder/tests/it/http.rs +++ b/crates/rpc/rpc-builder/tests/it/http.rs @@ -2,7 +2,7 @@ //! Standalone http tests use crate::utils::{launch_http, launch_http_ws, launch_ws}; -use alloy_eips::{BlockId, BlockNumberOrTag}; +use alloy_eips::{eip1898::LenientBlockNumberOrTag, BlockId, BlockNumberOrTag}; use alloy_primitives::{hex_literal::hex, Address, Bytes, TxHash, B256, B64, U256, U64}; use alloy_rpc_types_eth::{ transaction::TransactionRequest, Block, FeeHistory, Filter, Header, Index, Log, @@ -434,9 +434,12 @@ where let nonce = 1; let block_hash = B256::default(); - OtterscanClient::::get_header_by_number(client, block_number) - .await - .unwrap(); + OtterscanClient::::get_header_by_number( + client, + LenientBlockNumberOrTag::new(BlockNumberOrTag::Number(block_number)), + ) + .await + .unwrap(); OtterscanClient::::has_code(client, address, None).await.unwrap(); OtterscanClient::::has_code(client, address, Some(block_number.into())) @@ -451,7 +454,13 @@ where OtterscanClient::::trace_transaction(client, tx_hash).await.unwrap(); - OtterscanClient::::get_block_details(client, block_number) + OtterscanClient::::get_block_details( + client, + LenientBlockNumberOrTag::new(BlockNumberOrTag::Number(block_number)), + ) + .await + .unwrap_err(); + OtterscanClient::::get_block_details(client, Default::default()) .await .unwrap_err(); @@ -461,7 +470,7 @@ where OtterscanClient::::get_block_transactions( client, - block_number, + LenientBlockNumberOrTag::new(BlockNumberOrTag::Number(block_number)), page_number, page_size, ) @@ -473,7 +482,7 @@ where OtterscanClient::::search_transactions_before( client, address, - block_number, + LenientBlockNumberOrTag::new(BlockNumberOrTag::Number(block_number)), page_size, ) .await @@ -484,7 +493,7 @@ where OtterscanClient::::search_transactions_after( client, address, - block_number, + LenientBlockNumberOrTag::new(BlockNumberOrTag::Number(block_number)), page_size, ) .await diff --git a/crates/rpc/rpc-eth-api/src/core.rs b/crates/rpc/rpc-eth-api/src/core.rs index c1f0519b348..2a3e361729c 100644 --- a/crates/rpc/rpc-eth-api/src/core.rs +++ b/crates/rpc/rpc-eth-api/src/core.rs @@ -460,7 +460,12 @@ where /// Handler for: `eth_getUncleCountByBlockHash` async fn block_uncles_count_by_hash(&self, hash: B256) -> RpcResult> { trace!(target: "rpc::eth", ?hash, "Serving eth_getUncleCountByBlockHash"); - Ok(EthBlocks::ommers(self, hash.into())?.map(|ommers| U256::from(ommers.len()))) + + if let Some(block) = self.block_by_hash(hash, false).await? { + Ok(Some(U256::from(block.uncles.len()))) + } else { + Ok(None) + } } /// Handler for: `eth_getUncleCountByBlockNumber` @@ -469,7 +474,12 @@ where number: BlockNumberOrTag, ) -> RpcResult> { trace!(target: "rpc::eth", ?number, "Serving eth_getUncleCountByBlockNumber"); - Ok(EthBlocks::ommers(self, number.into())?.map(|ommers| U256::from(ommers.len()))) + + if let Some(block) = self.block_by_number(number, false).await? { + Ok(Some(U256::from(block.uncles.len()))) + } else { + Ok(None) + } } /// Handler for: `eth_getBlockReceipts` diff --git a/crates/rpc/rpc-eth-api/src/helpers/block.rs b/crates/rpc/rpc-eth-api/src/helpers/block.rs index 98d58372294..24992560126 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/block.rs @@ -10,12 +10,11 @@ use alloy_primitives::{Sealable, U256}; use alloy_rlp::Encodable; use alloy_rpc_types_eth::{Block, BlockTransactions, Header, Index}; use futures::Future; +use reth_evm::ConfigureEvm; use reth_node_api::BlockBody; -use reth_primitives_traits::{RecoveredBlock, SealedBlock}; +use reth_primitives_traits::{NodePrimitives, RecoveredBlock, SealedBlock}; use reth_rpc_types_compat::block::from_block; -use reth_storage_api::{ - BlockIdReader, BlockReader, BlockReaderIdExt, ProviderHeader, ProviderReceipt, ProviderTx, -}; +use reth_storage_api::{BlockIdReader, BlockReader, ProviderHeader, ProviderReceipt, ProviderTx}; use reth_transaction_pool::{PoolTransaction, TransactionPool}; use std::sync::Arc; @@ -160,8 +159,15 @@ pub trait EthBlocks: LoadBlock { fn ommers( &self, block_id: BlockId, - ) -> Result>>, Self::Error> { - self.provider().ommers_by_id(block_id).map_err(Self::Error::from_eth_err) + ) -> impl Future>>, Self::Error>> + Send + { + async move { + if let Some(block) = self.recovered_block(block_id).await? { + Ok(block.body().ommers().map(|o| o.to_vec())) + } else { + Ok(None) + } + } } /// Returns uncle block at given index in given block. @@ -181,7 +187,9 @@ pub trait EthBlocks: LoadBlock { .map_err(Self::Error::from_eth_err)? .and_then(|block| block.body().ommers().map(|o| o.to_vec())) } else { - self.provider().ommers_by_id(block_id).map_err(Self::Error::from_eth_err)? + self.recovered_block(block_id) + .await? + .map(|block| block.body().ommers().map(|o| o.to_vec()).unwrap_or_default()) } .unwrap_or_default(); @@ -207,6 +215,8 @@ pub trait LoadBlock: + SpawnBlocking + RpcNodeCoreExt< Pool: TransactionPool>>, + Primitives: NodePrimitives>, + Evm: ConfigureEvm::Primitives>, > { /// Returns the block object for the given block id. @@ -223,10 +233,8 @@ pub trait LoadBlock: async move { if block_id.is_pending() { // Pending block can be fetched directly without need for caching - if let Some(pending_block) = self - .provider() - .pending_block_with_senders() - .map_err(Self::Error::from_eth_err)? + if let Some(pending_block) = + self.provider().pending_block().map_err(Self::Error::from_eth_err)? { return Ok(Some(Arc::new(pending_block))); } diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index 4723516fc23..dda235ffaf3 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -134,7 +134,7 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA apply_state_overrides(state_overrides, &mut db)?; } - let block_env = evm_env.block_env.clone(); + let block_gas_limit = evm_env.block_env.gas_limit; let chain_id = evm_env.cfg_env.chain_id; let default_gas_limit = { @@ -142,7 +142,7 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA let txs_without_gas_limit = calls.iter().filter(|tx| tx.gas.is_none()).count(); - if total_specified_gas > block_env.gas_limit { + if total_specified_gas > block_gas_limit { return Err(EthApiError::Other(Box::new( EthSimulateError::BlockGasLimitExceeded, )) @@ -150,8 +150,7 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA } if txs_without_gas_limit > 0 { - (block_env.gas_limit - total_specified_gas) / - txs_without_gas_limit as u64 + (block_gas_limit - total_specified_gas) / txs_without_gas_limit as u64 } else { 0 } @@ -216,7 +215,7 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA overrides: EvmOverrides, ) -> impl Future> + Send { async move { - let (res, _env) = + let res = self.transact_call_at(request, block_number.unwrap_or_default(), overrides).await?; ensure_success(res.result) @@ -289,7 +288,7 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA let block_transactions = block.transactions_recovered().take(num_txs); for tx in block_transactions { let tx_env = RpcNodeCore::evm_config(&this).tx_env(tx); - let (res, _) = this.transact(&mut db, evm_env.clone(), tx_env)?; + let res = this.transact(&mut db, evm_env.clone(), tx_env)?; db.commit(res.state); } } @@ -314,7 +313,7 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA let (current_evm_env, prepared_tx) = this.prepare_call_env(evm_env.clone(), tx, &mut db, overrides)?; - let (res, _) = this.transact(&mut db, current_evm_env, prepared_tx)?; + let res = this.transact(&mut db, current_evm_env, prepared_tx)?; match ensure_success::<_, Self::Error>(res.result) { Ok(output) => { @@ -427,11 +426,11 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA }; // transact again to get the exact gas used - let (result, (_, tx_env)) = self.transact(&mut db, evm_env, tx_env)?; + let gas_limit = tx_env.gas_limit(); + let result = self.transact(&mut db, evm_env, tx_env)?; let res = match result.result { ExecutionResult::Halt { reason, gas_used } => { - let error = - Some(Self::Error::from_evm_halt(reason, tx_env.gas_limit()).to_string()); + let error = Some(Self::Error::from_evm_halt(reason, gas_limit).to_string()); AccessListResult { access_list, gas_used: U256::from(gas_used), error } } ExecutionResult::Revert { output, gas_used } => { @@ -478,61 +477,47 @@ pub trait Call: /// Executes the `TxEnv` against the given [Database] without committing state /// changes. - #[expect(clippy::type_complexity)] fn transact( &self, db: DB, evm_env: EvmEnvFor, tx_env: TxEnvFor, - ) -> Result< - (ResultAndState>, (EvmEnvFor, TxEnvFor)), - Self::Error, - > + ) -> Result>, Self::Error> where DB: Database, { - let mut evm = self.evm_config().evm_with_env(db, evm_env.clone()); - let res = evm.transact(tx_env.clone()).map_err(Self::Error::from_evm_err)?; + let mut evm = self.evm_config().evm_with_env(db, evm_env); + let res = evm.transact(tx_env).map_err(Self::Error::from_evm_err)?; - Ok((res, (evm_env, tx_env))) + Ok(res) } /// Executes the [`EvmEnv`] against the given [Database] without committing state /// changes. - #[expect(clippy::type_complexity)] fn transact_with_inspector( &self, db: DB, evm_env: EvmEnvFor, tx_env: TxEnvFor, inspector: I, - ) -> Result< - (ResultAndState>, (EvmEnvFor, TxEnvFor)), - Self::Error, - > + ) -> Result>, Self::Error> where DB: Database, I: InspectorFor, { - let mut evm = self.evm_config().evm_with_env_and_inspector(db, evm_env.clone(), inspector); - let res = evm.transact(tx_env.clone()).map_err(Self::Error::from_evm_err)?; + let mut evm = self.evm_config().evm_with_env_and_inspector(db, evm_env, inspector); + let res = evm.transact(tx_env).map_err(Self::Error::from_evm_err)?; - Ok((res, (evm_env, tx_env))) + Ok(res) } /// Executes the call request at the given [`BlockId`]. - #[expect(clippy::type_complexity)] fn transact_call_at( &self, request: TransactionRequest, at: BlockId, overrides: EvmOverrides, - ) -> impl Future< - Output = Result< - (ResultAndState>, (EvmEnvFor, TxEnvFor)), - Self::Error, - >, - > + Send + ) -> impl Future>, Self::Error>> + Send where Self: LoadPendingBlock, { @@ -656,7 +641,7 @@ pub trait Call: let tx_env = RpcNodeCore::evm_config(&this).tx_env(tx); - let (res, _) = this.transact(&mut db, evm_env, tx_env)?; + let res = this.transact(&mut db, evm_env, tx_env)?; f(tx_info, res, db) }) .await diff --git a/crates/rpc/rpc-eth-api/src/helpers/estimate.rs b/crates/rpc/rpc-eth-api/src/helpers/estimate.rs index ecef2270d42..297559fbabf 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/estimate.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/estimate.rs @@ -94,7 +94,7 @@ pub trait EstimateCall: Call { // with the minimum gas limit to make sure. let mut tx_env = tx_env.clone(); tx_env.set_gas_limit(MIN_TRANSACTION_GAS); - if let Ok((res, _)) = self.transact(&mut db, evm_env.clone(), tx_env) { + if let Ok(res) = self.transact(&mut db, evm_env.clone(), tx_env) { if res.result.is_success() { return Ok(U256::from(MIN_TRANSACTION_GAS)) } @@ -119,36 +119,30 @@ pub trait EstimateCall: Call { trace!(target: "rpc::eth::estimate", ?evm_env, ?tx_env, "Starting gas estimation"); // Execute the transaction with the highest possible gas limit. - let (mut res, (mut evm_env, mut tx_env)) = - match self.transact(&mut db, evm_env.clone(), tx_env.clone()) { - // Handle the exceptional case where the transaction initialization uses too much - // gas. If the gas price or gas limit was specified in the request, - // retry the transaction with the block's gas limit to determine if - // the failure was due to insufficient gas. - Err(err) - if err.is_gas_too_high() && - (tx_request_gas_limit.is_some() || tx_request_gas_price.is_some()) => - { - return Err(self.map_out_of_gas_err( - block_env_gas_limit, - evm_env, - tx_env, - &mut db, - )) - } - Err(err) if err.is_gas_too_low() => { - // This failed because the configured gas cost of the tx was lower than what - // actually consumed by the tx This can happen if the - // request provided fee values manually and the resulting gas cost exceeds the - // sender's allowance, so we return the appropriate error here - return Err(RpcInvalidTransactionError::GasRequiredExceedsAllowance { - gas_limit: tx_env.gas_limit(), - } - .into_eth_err()) + let mut res = match self.transact(&mut db, evm_env.clone(), tx_env.clone()) { + // Handle the exceptional case where the transaction initialization uses too much + // gas. If the gas price or gas limit was specified in the request, + // retry the transaction with the block's gas limit to determine if + // the failure was due to insufficient gas. + Err(err) + if err.is_gas_too_high() && + (tx_request_gas_limit.is_some() || tx_request_gas_price.is_some()) => + { + return Err(self.map_out_of_gas_err(block_env_gas_limit, evm_env, tx_env, &mut db)) + } + Err(err) if err.is_gas_too_low() => { + // This failed because the configured gas cost of the tx was lower than what + // actually consumed by the tx This can happen if the + // request provided fee values manually and the resulting gas cost exceeds the + // sender's allowance, so we return the appropriate error here + return Err(RpcInvalidTransactionError::GasRequiredExceedsAllowance { + gas_limit: tx_env.gas_limit(), } - // Propagate other results (successful or other errors). - ethres => ethres?, - }; + .into_eth_err()) + } + // Propagate other results (successful or other errors). + ethres => ethres?, + }; let gas_refund = match res.result { ExecutionResult::Success { gas_refunded, .. } => gas_refunded, @@ -194,7 +188,7 @@ pub trait EstimateCall: Call { tx_env.set_gas_limit(optimistic_gas_limit); // Re-execute the transaction with the new gas limit and update the result and // environment. - (res, (evm_env, tx_env)) = self.transact(&mut db, evm_env, tx_env)?; + res = self.transact(&mut db, evm_env.clone(), tx_env.clone())?; // Update the gas used based on the new result. gas_used = res.result.gas_used(); // Update the gas limit estimates (highest and lowest) based on the execution result. @@ -241,7 +235,7 @@ pub trait EstimateCall: Call { // Handle other cases, including successful transactions. ethres => { // Unpack the result and environment if the transaction was successful. - (res, (evm_env, tx_env)) = ethres?; + res = ethres?; // Update the estimated gas range based on the transaction result. update_estimated_gas_range( res.result, @@ -296,7 +290,7 @@ pub trait EstimateCall: Call { { let req_gas_limit = tx_env.gas_limit(); tx_env.set_gas_limit(env_gas_limit); - let (res, _) = match self.transact(db, evm_env, tx_env) { + let res = match self.transact(db, evm_env, tx_env) { Ok(res) => res, Err(err) => return err, }; diff --git a/crates/rpc/rpc-eth-api/src/helpers/fee.rs b/crates/rpc/rpc-eth-api/src/helpers/fee.rs index 8aca5e99235..da354181aff 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/fee.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/fee.rs @@ -13,7 +13,7 @@ use reth_rpc_eth_types::{ fee_history::calculate_reward_percentiles_for_block, EthApiError, FeeHistoryCache, FeeHistoryEntry, GasPriceOracle, RpcInvalidTransactionError, }; -use reth_storage_api::{BlockIdReader, HeaderProvider}; +use reth_storage_api::{BlockIdReader, BlockReaderIdExt, HeaderProvider}; use tracing::debug; /// Fee related functions for the [`EthApiServer`](crate::EthApiServer) trait in the @@ -256,7 +256,10 @@ pub trait EthFees: LoadFee { /// Loads fee from database. /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` fees RPC methods. -pub trait LoadFee: LoadBlock { +pub trait LoadFee: LoadBlock +where + Self::Provider: BlockReaderIdExt, +{ /// Returns a handle for reading gas price. /// /// Data access in default (L1) trait method implementations. @@ -335,10 +338,9 @@ pub trait LoadFee: LoadBlock { /// /// See also: fn gas_price(&self) -> impl Future> + Send { - let header = self.recovered_block(BlockNumberOrTag::Latest.into()); - let suggested_tip = self.suggested_priority_fee(); async move { - let (header, suggested_tip) = futures::try_join!(header, suggested_tip)?; + let header = self.provider().latest_header().map_err(Self::Error::from_eth_err)?; + let suggested_tip = self.suggested_priority_fee().await?; let base_fee = header.and_then(|h| h.base_fee_per_gas()).unwrap_or_default(); Ok(suggested_tip + U256::from(base_fee)) } @@ -347,8 +349,9 @@ pub trait LoadFee: LoadBlock { /// Returns a suggestion for a base fee for blob transactions. fn blob_base_fee(&self) -> impl Future> + Send { async move { - self.recovered_block(BlockNumberOrTag::Latest.into()) - .await? + self.provider() + .latest_header() + .map_err(Self::Error::from_eth_err)? .and_then(|h| { h.maybe_next_block_blob_fee( self.provider().chain_spec().blob_params_at_timestamp(h.timestamp()), diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index 9fc6c8f6a97..62202c5b664 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -45,13 +45,12 @@ pub trait LoadPendingBlock: Provider: BlockReaderIdExt + ChainSpecProvider + StateProviderFactory, - Evm: ConfigureEvm< - Primitives: NodePrimitives< - BlockHeader = ProviderHeader, - SignedTx = ProviderTx, - Receipt = ProviderReceipt, - Block = ProviderBlock, - >, + Evm: ConfigureEvm::Primitives>, + Primitives: NodePrimitives< + BlockHeader = ProviderHeader, + SignedTx = ProviderTx, + Receipt = ProviderReceipt, + Block = ProviderBlock, >, > { @@ -77,9 +76,7 @@ pub trait LoadPendingBlock: >, Self::Error, > { - if let Some(block) = - self.provider().pending_block_with_senders().map_err(Self::Error::from_eth_err)? - { + if let Some(block) = self.provider().pending_block().map_err(Self::Error::from_eth_err)? { if let Some(receipts) = self .provider() .receipts_by_block(block.hash().into()) diff --git a/crates/rpc/rpc-eth-api/src/lib.rs b/crates/rpc/rpc-eth-api/src/lib.rs index 2d999cf7ecb..3916b5eb696 100644 --- a/crates/rpc/rpc-eth-api/src/lib.rs +++ b/crates/rpc/rpc-eth-api/src/lib.rs @@ -30,7 +30,10 @@ pub use pubsub::EthPubSubApiServer; pub use reth_rpc_eth_types::error::{ AsEthApiError, FromEthApiError, FromEvmError, IntoEthApiError, }; -pub use reth_rpc_types_compat::TransactionCompat; +pub use reth_rpc_types_compat::{ + try_into_op_tx_info, try_into_scroll_tx_info, IntoRpcTx, RpcConverter, TransactionCompat, + TransactionConversionError, TryIntoSimTx, TxInfoMapper, +}; pub use types::{EthApiTypes, FullEthApiTypes, RpcBlock, RpcHeader, RpcReceipt, RpcTransaction}; #[cfg(feature = "client")] diff --git a/crates/rpc/rpc-eth-api/src/types.rs b/crates/rpc/rpc-eth-api/src/types.rs index e69b3ac79d6..bdc8d615737 100644 --- a/crates/rpc/rpc-eth-api/src/types.rs +++ b/crates/rpc/rpc-eth-api/src/types.rs @@ -83,7 +83,7 @@ where >, > + EthApiTypes< TransactionCompat: TransactionCompat< - ::Transaction, + Primitives = ::Primitives, Transaction = RpcTransaction, Error = RpcError, >, @@ -99,7 +99,7 @@ impl FullEthApiTypes for T where >, > + EthApiTypes< TransactionCompat: TransactionCompat< - ::Transaction, + Primitives = ::Primitives, Transaction = RpcTransaction, Error = RpcError, >, diff --git a/crates/rpc/rpc-eth-types/src/cache/mod.rs b/crates/rpc/rpc-eth-types/src/cache/mod.rs index fa5594b18d9..a055acac58a 100644 --- a/crates/rpc/rpc-eth-types/src/cache/mod.rs +++ b/crates/rpc/rpc-eth-types/src/cache/mod.rs @@ -4,7 +4,7 @@ use super::{EthStateCacheConfig, MultiConsumerLruCache}; use alloy_consensus::BlockHeader; use alloy_eips::BlockHashOrNumber; use alloy_primitives::B256; -use futures::{future::Either, Stream, StreamExt}; +use futures::{future::Either, stream::FuturesOrdered, Stream, StreamExt}; use reth_chain_state::CanonStateNotification; use reth_errors::{ProviderError, ProviderResult}; use reth_execution_types::Chain; @@ -41,6 +41,9 @@ type ReceiptsResponseSender = oneshot::Sender = oneshot::Sender>>>; +type CachedBlockAndReceiptsResponseSender = + oneshot::Sender<(Option>>, Option>>)>; + /// The type that can send the response to a requested header type HeaderResponseSender = oneshot::Sender>; @@ -197,6 +200,30 @@ impl EthStateCache { Ok(receipts?.map(|r| (r, block))) } + /// Retrieves both block and receipts from cache if available. + pub async fn maybe_cached_block_and_receipts( + &self, + block_hash: B256, + ) -> ProviderResult<(Option>>, Option>>)> { + let (response_tx, rx) = oneshot::channel(); + let _ = self + .to_service + .send(CacheAction::GetCachedBlockAndReceipts { block_hash, response_tx }); + rx.await.map_err(|_| CacheServiceUnavailable.into()) + } + + /// Streams cached receipts and blocks for a list of block hashes, preserving input order. + #[allow(clippy::type_complexity)] + pub fn get_receipts_and_maybe_block_stream<'a>( + &'a self, + hashes: Vec, + ) -> impl Stream>, Option>>)>>> + 'a + { + let futures = hashes.into_iter().map(move |hash| self.get_receipts_and_maybe_block(hash)); + + futures.collect::>() + } + /// Requests the header for the given hash. /// /// Returns an error if the header is not found. @@ -424,6 +451,11 @@ where let _ = response_tx.send(this.full_block_cache.get(&block_hash).cloned()); } + CacheAction::GetCachedBlockAndReceipts { block_hash, response_tx } => { + let block = this.full_block_cache.get(&block_hash).cloned(); + let receipts = this.receipts_cache.get(&block_hash).cloned(); + let _ = response_tx.send((block, receipts)); + } CacheAction::GetBlockWithSenders { block_hash, response_tx } => { if let Some(block) = this.full_block_cache.get(&block_hash).cloned() { let _ = response_tx.send(Ok(Some(block))); @@ -612,6 +644,10 @@ enum CacheAction { block_hash: B256, response_tx: CachedBlockResponseSender, }, + GetCachedBlockAndReceipts { + block_hash: B256, + response_tx: CachedBlockAndReceiptsResponseSender, + }, BlockWithSendersResult { block_hash: B256, res: ProviderResult>>>, diff --git a/crates/rpc/rpc-eth-types/src/error/mod.rs b/crates/rpc/rpc-eth-types/src/error/mod.rs index 6f586bc1ce8..eae015060a3 100644 --- a/crates/rpc/rpc-eth-types/src/error/mod.rs +++ b/crates/rpc/rpc-eth-types/src/error/mod.rs @@ -13,6 +13,7 @@ use reth_primitives_traits::transaction::{error::InvalidTransactionError, signed use reth_rpc_server_types::result::{ block_id_to_str, internal_rpc_err, invalid_params_rpc_err, rpc_err, rpc_error_with_code, }; +use reth_rpc_types_compat::TransactionConversionError; use reth_transaction_pool::error::{ Eip4844PoolTransactionError, Eip7702PoolTransactionError, InvalidPoolTransactionError, PoolError, PoolErrorKind, PoolTransactionError, @@ -230,6 +231,12 @@ impl From for jsonrpsee_types::error::ErrorObject<'static> { } } +impl From for EthApiError { + fn from(_: TransactionConversionError) -> Self { + Self::TransactionConversionError + } +} + #[cfg(feature = "js-tracer")] impl From for EthApiError { fn from(error: revm_inspectors::tracing::js::JsInspectorError) -> Self { @@ -381,6 +388,9 @@ pub enum RpcInvalidTransactionError { /// Thrown if the transaction gas exceeds the limit #[error("intrinsic gas too high")] GasTooHigh, + /// Thrown if the transaction gas limit exceeds the maximum + #[error("gas limit too high")] + GasLimitTooHigh, /// Thrown if a transaction is not supported in the current network configuration. #[error("transaction type not supported")] TxTypeNotSupported, @@ -617,6 +627,7 @@ impl From for RpcInvalidTransactionError { InvalidTransactionError::TipAboveFeeCap => Self::TipAboveFeeCap, InvalidTransactionError::FeeCapTooLow => Self::FeeCapTooLow, InvalidTransactionError::SignerAccountHasBytecode => Self::SenderNoEOA, + InvalidTransactionError::GasLimitTooHigh => Self::GasLimitTooHigh, } } } diff --git a/crates/rpc/rpc-eth-types/src/fee_history.rs b/crates/rpc/rpc-eth-types/src/fee_history.rs index fd5313c8c0c..616b53b86fb 100644 --- a/crates/rpc/rpc-eth-types/src/fee_history.rs +++ b/crates/rpc/rpc-eth-types/src/fee_history.rs @@ -75,8 +75,8 @@ impl FeeHistoryCache { async fn insert_blocks<'a, I, B, R, C>(&self, blocks: I, chain_spec: &C) where B: Block + 'a, - R: TxReceipt, - I: IntoIterator, Arc>)>, + R: TxReceipt + 'a, + I: IntoIterator, &'a [R])>, C: EthChainSpec, { let mut entries = self.inner.entries.write().await; @@ -93,7 +93,7 @@ impl FeeHistoryCache { fee_history_entry.gas_used, fee_history_entry.base_fee_per_gas, block.body().transactions(), - &receipts, + receipts, ) .unwrap_or_default(); entries.insert(block.number(), fee_history_entry); @@ -242,7 +242,7 @@ pub async fn fee_history_cache_new_blocks_task( res = &mut fetch_missing_block => { if let Ok(res) = res { let res = res.as_ref() - .map(|(b, r)| (b.sealed_block(), r.clone())); + .map(|(b, r)| (b.sealed_block(), r.as_slice())); fee_history_cache.insert_blocks(res, &chain_spec).await; } } @@ -253,13 +253,12 @@ pub async fn fee_history_cache_new_blocks_task( }; let committed = event.committed(); - let (blocks, receipts): (Vec<_>, Vec<_>) = committed + let blocks_and_receipts = committed .blocks_and_receipts() .map(|(block, receipts)| { - (block.clone_sealed_block(), Arc::new(receipts.clone())) - }) - .unzip(); - fee_history_cache.insert_blocks(blocks.iter().zip(receipts), &chain_spec).await; + (block.sealed_block(), receipts.as_slice()) + }); + fee_history_cache.insert_blocks(blocks_and_receipts, &chain_spec).await; // keep track of missing blocks missing_blocks = fee_history_cache.missing_consecutive_blocks().await; diff --git a/crates/rpc/rpc-eth-types/src/gas_oracle.rs b/crates/rpc/rpc-eth-types/src/gas_oracle.rs index 48c4d1e11a6..27b23b54e40 100644 --- a/crates/rpc/rpc-eth-types/src/gas_oracle.rs +++ b/crates/rpc/rpc-eth-types/src/gas_oracle.rs @@ -2,15 +2,12 @@ //! previous blocks. use super::{EthApiError, EthResult, EthStateCache, RpcInvalidTransactionError}; -use alloy_consensus::{ - constants::GWEI_TO_WEI, transaction::SignerRecoverable, BlockHeader, Transaction, -}; +use alloy_consensus::{constants::GWEI_TO_WEI, BlockHeader, Transaction}; use alloy_eips::BlockNumberOrTag; use alloy_primitives::{B256, U256}; use alloy_rpc_types_eth::BlockId; use derive_more::{Deref, DerefMut, From, Into}; use itertools::Itertools; -use reth_primitives_traits::BlockBody; use reth_rpc_server_types::{ constants, constants::gas_oracle::{ @@ -234,7 +231,7 @@ where let parent_hash = block.parent_hash(); // sort the functions by ascending effective tip first - let sorted_transactions = block.body().transactions_iter().sorted_by_cached_key(|tx| { + let sorted_transactions = block.transactions_recovered().sorted_by_cached_key(|tx| { if let Some(base_fee) = base_fee_per_gas { (*tx).effective_tip_per_gas(base_fee) } else { @@ -259,10 +256,8 @@ where } // check if the sender was the coinbase, if so, ignore - if let Ok(sender) = tx.recover_signer() { - if sender == block.beneficiary() { - continue - } + if tx.signer() == block.beneficiary() { + continue } // a `None` effective_gas_tip represents a transaction where the max_fee_per_gas is @@ -277,8 +272,44 @@ where Ok(Some((parent_hash, prices))) } -} + /// Get the median tip value for the given block. This is useful for determining + /// tips when a block is at capacity. + /// + /// If the block cannot be found or has no transactions, this will return `None`. + pub async fn get_block_median_tip(&self, block_hash: B256) -> EthResult> { + // check the cache (this will hit the disk if the block is not cached) + let Some(block) = self.cache.get_recovered_block(block_hash).await? else { + return Ok(None) + }; + + let base_fee_per_gas = block.base_fee_per_gas(); + + // Filter, sort and collect the prices + let prices = block + .transactions_recovered() + .filter_map(|tx| { + if let Some(base_fee) = base_fee_per_gas { + (*tx).effective_tip_per_gas(base_fee) + } else { + Some((*tx).priority_fee_or_price()) + } + }) + .sorted() + .collect::>(); + + let median = if prices.is_empty() { + // if there are no prices, return `None` + None + } else if prices.len() % 2 == 1 { + Some(U256::from(prices[prices.len() / 2])) + } else { + Some(U256::from((prices[prices.len() / 2 - 1] + prices[prices.len() / 2]) / 2)) + }; + + Ok(median) + } +} /// Container type for mutable inner state of the [`GasPriceOracle`] #[derive(Debug)] struct GasPriceOracleInner { diff --git a/crates/rpc/rpc-eth-types/src/logs_utils.rs b/crates/rpc/rpc-eth-types/src/logs_utils.rs index fb03f24e38c..dee33a7a175 100644 --- a/crates/rpc/rpc-eth-types/src/logs_utils.rs +++ b/crates/rpc/rpc-eth-types/src/logs_utils.rs @@ -16,6 +16,7 @@ use std::sync::Arc; pub fn matching_block_logs_with_tx_hashes<'a, I, R>( filter: &Filter, block_num_hash: BlockNumHash, + block_timestamp: u64, tx_hashes_and_receipts: I, removed: bool, ) -> Vec @@ -44,7 +45,7 @@ where transaction_index: Some(receipt_idx as u64), log_index: Some(log_index), removed, - block_timestamp: None, + block_timestamp: Some(block_timestamp), }; all_logs.push(log); } diff --git a/crates/rpc/rpc-eth-types/src/simulate.rs b/crates/rpc/rpc-eth-types/src/simulate.rs index a82bb934f21..1b4ed709eff 100644 --- a/crates/rpc/rpc-eth-types/src/simulate.rs +++ b/crates/rpc/rpc-eth-types/src/simulate.rs @@ -20,7 +20,7 @@ use reth_evm::{ Evm, }; use reth_primitives_traits::{ - block::BlockTx, BlockBody as _, Recovered, RecoveredBlock, SignedTransaction, TxTy, + block::BlockTx, BlockBody as _, NodePrimitives, Recovered, RecoveredBlock, SignedTransaction, }; use reth_rpc_server_types::result::rpc_err; use reth_rpc_types_compat::{block::from_block, TransactionCompat}; @@ -77,7 +77,7 @@ pub fn execute_transactions( > where S: BlockBuilder>>>>, - T: TransactionCompat>, + T: TransactionCompat, { builder.apply_pre_execution_changes()?; @@ -111,7 +111,7 @@ where /// them into primitive transactions. /// /// This will set the defaults as defined in -pub fn resolve_transaction>( +pub fn resolve_transaction( mut tx: TransactionRequest, default_gas_limit: u64, block_base_fee_per_gas: u64, @@ -121,6 +121,7 @@ pub fn resolve_transaction>( ) -> Result, EthApiError> where DB::Error: Into, + T: TransactionCompat>, { // If we're missing any fields we try to fill nonce, gas and // gas price. @@ -192,23 +193,26 @@ pub fn build_simulated_block( tx_resp_builder: &T, ) -> Result>>, T::Error> where - T: TransactionCompat, Error: FromEthApiError + FromEvmHalt>, + T: TransactionCompat< + Primitives: NodePrimitives>, + Error: FromEthApiError + FromEvmHalt, + >, B: reth_primitives_traits::Block, { let mut calls: Vec = Vec::with_capacity(results.len()); let mut log_index = 0; - for (index, (result, tx)) in results.iter().zip(block.body().transactions()).enumerate() { + for (index, (result, tx)) in results.into_iter().zip(block.body().transactions()).enumerate() { let call = match result { ExecutionResult::Halt { reason, gas_used } => { - let error = T::Error::from_evm_halt(reason.clone(), tx.gas_limit()); + let error = T::Error::from_evm_halt(reason, tx.gas_limit()); SimCallResult { return_data: Bytes::new(), error: Some(SimulateError { message: error.to_string(), code: error.into().code(), }), - gas_used: *gas_used, + gas_used, logs: Vec::new(), status: false, } @@ -216,26 +220,26 @@ where ExecutionResult::Revert { output, gas_used } => { let error = RevertError::new(output.clone()); SimCallResult { - return_data: output.clone(), + return_data: output, error: Some(SimulateError { code: error.error_code(), message: error.to_string(), }), - gas_used: *gas_used, + gas_used, status: false, logs: Vec::new(), } } ExecutionResult::Success { output, gas_used, logs, .. } => SimCallResult { - return_data: output.clone().into_data(), + return_data: output.into_data(), error: None, - gas_used: *gas_used, + gas_used, logs: logs - .iter() + .into_iter() .map(|log| { log_index += 1; alloy_rpc_types_eth::Log { - inner: log.clone(), + inner: log, log_index: Some(log_index - 1), transaction_index: Some(index as u64), transaction_hash: Some(*tx.tx_hash()), diff --git a/crates/rpc/rpc-eth-types/src/transaction.rs b/crates/rpc/rpc-eth-types/src/transaction.rs index 34d80d91145..de11acc8dc8 100644 --- a/crates/rpc/rpc-eth-types/src/transaction.rs +++ b/crates/rpc/rpc-eth-types/src/transaction.rs @@ -5,7 +5,7 @@ use alloy_primitives::B256; use alloy_rpc_types_eth::TransactionInfo; use reth_ethereum_primitives::TransactionSigned; -use reth_primitives_traits::{Recovered, SignedTransaction}; +use reth_primitives_traits::{NodePrimitives, Recovered, SignedTransaction}; use reth_rpc_types_compat::TransactionCompat; /// Represents from where a transaction was fetched. @@ -39,10 +39,13 @@ impl TransactionSource { } /// Conversion into network specific transaction type. - pub fn into_transaction>( + pub fn into_transaction( self, resp_builder: &Builder, - ) -> Result { + ) -> Result + where + Builder: TransactionCompat>, + { match self { Self::Pool(tx) => resp_builder.fill_pending(tx), Self::Block { transaction, index, block_hash, block_number, base_fee } => { diff --git a/crates/rpc/rpc-types-compat/Cargo.toml b/crates/rpc/rpc-types-compat/Cargo.toml index 76cb92505e0..0fa228c7d04 100644 --- a/crates/rpc/rpc-types-compat/Cargo.toml +++ b/crates/rpc/rpc-types-compat/Cargo.toml @@ -14,12 +14,27 @@ workspace = true [dependencies] # reth reth-primitives-traits.workspace = true +reth-storage-api = { workspace = true, features = ["serde", "serde-bincode-compat"] } # ethereum alloy-primitives.workspace = true alloy-rpc-types-eth = { workspace = true, default-features = false, features = ["serde"] } alloy-consensus.workspace = true +alloy-network.workspace = true + +# scroll +reth-scroll-primitives = { workspace = true, features = ["serde", "serde-bincode-compat"] } +scroll-alloy-consensus.workspace = true +scroll-alloy-rpc-types.workspace = true + +# optimism +op-alloy-consensus.workspace = true +op-alloy-rpc-types.workspace = true +reth-optimism-primitives = { workspace = true, features = ["serde", "serde-bincode-compat"] } # io serde.workspace = true jsonrpsee-types.workspace = true + +# error +thiserror.workspace = true diff --git a/crates/rpc/rpc-types-compat/src/block.rs b/crates/rpc/rpc-types-compat/src/block.rs index 4f73a2c3f2f..92f90f3c150 100644 --- a/crates/rpc/rpc-types-compat/src/block.rs +++ b/crates/rpc/rpc-types-compat/src/block.rs @@ -7,7 +7,8 @@ use alloy_rpc_types_eth::{ Block, BlockTransactions, BlockTransactionsKind, Header, TransactionInfo, }; use reth_primitives_traits::{ - Block as BlockTrait, BlockBody as BlockBodyTrait, RecoveredBlock, SignedTransaction, + Block as BlockTrait, BlockBody as BlockBodyTrait, NodePrimitives, RecoveredBlock, + SignedTransaction, }; /// Converts the given primitive block into a [`Block`] response with the given @@ -21,8 +22,8 @@ pub fn from_block( tx_resp_builder: &T, ) -> Result>, T::Error> where - T: TransactionCompat<<::Body as BlockBodyTrait>::Transaction>, - B: BlockTrait, + T: TransactionCompat, + B: BlockTrait::SignedTx>>, { match kind { BlockTransactionsKind::Hashes => Ok(from_block_with_tx_hashes::(block)), @@ -62,8 +63,8 @@ pub fn from_block_full( tx_resp_builder: &T, ) -> Result>, T::Error> where - T: TransactionCompat<<::Body as BlockBodyTrait>::Transaction>, - B: BlockTrait, + T: TransactionCompat, + B: BlockTrait::SignedTx>>, { let block_number = block.header().number(); let base_fee = block.header().base_fee_per_gas(); diff --git a/crates/rpc/rpc-types-compat/src/lib.rs b/crates/rpc/rpc-types-compat/src/lib.rs index 40e2a20c4a9..8b0e5a4d0eb 100644 --- a/crates/rpc/rpc-types-compat/src/lib.rs +++ b/crates/rpc/rpc-types-compat/src/lib.rs @@ -12,4 +12,7 @@ pub mod block; pub mod transaction; -pub use transaction::TransactionCompat; +pub use transaction::{ + try_into_op_tx_info, try_into_scroll_tx_info, IntoRpcTx, RpcConverter, TransactionCompat, + TransactionConversionError, TryIntoSimTx, TxInfoMapper, +}; diff --git a/crates/rpc/rpc-types-compat/src/transaction.rs b/crates/rpc/rpc-types-compat/src/transaction.rs index b722c9aa48e..40bac0a985d 100644 --- a/crates/rpc/rpc-types-compat/src/transaction.rs +++ b/crates/rpc/rpc-types-compat/src/transaction.rs @@ -1,21 +1,34 @@ //! Compatibility functions for rpc `Transaction` type. -use alloy_consensus::transaction::Recovered; -use alloy_rpc_types_eth::{request::TransactionRequest, TransactionInfo}; +use alloy_consensus::{ + error::ValueError, transaction::Recovered, EthereumTxEnvelope, SignableTransaction, TxEip4844, +}; +use alloy_network::Network; +use alloy_primitives::{Address, Signature}; +use alloy_rpc_types_eth::{request::TransactionRequest, Transaction, TransactionInfo}; use core::error; +use op_alloy_consensus::{ + transaction::{OpDepositInfo, OpTransactionInfo}, + OpTxEnvelope, +}; +use op_alloy_rpc_types::OpTransactionRequest; +use reth_optimism_primitives::DepositReceipt; +use reth_primitives_traits::{NodePrimitives, SignedTransaction, TxTy}; +use reth_scroll_primitives::ScrollReceipt; +use reth_storage_api::{errors::ProviderError, ReceiptProvider}; +use scroll_alloy_consensus::{ScrollAdditionalInfo, ScrollTransactionInfo, ScrollTxEnvelope}; +use scroll_alloy_rpc_types::ScrollTransactionRequest; use serde::{Deserialize, Serialize}; -use std::fmt; +use std::{convert::Infallible, error::Error, fmt::Debug, marker::PhantomData}; +use thiserror::Error; /// Builds RPC transaction w.r.t. network. -pub trait TransactionCompat: Send + Sync + Unpin + Clone + fmt::Debug { +pub trait TransactionCompat: Send + Sync + Unpin + Clone + Debug { + /// The lower layer consensus types to convert from. + type Primitives: NodePrimitives; + /// RPC transaction response type. - type Transaction: Serialize - + for<'de> Deserialize<'de> - + Send - + Sync - + Unpin - + Clone - + fmt::Debug; + type Transaction: Serialize + for<'de> Deserialize<'de> + Send + Sync + Unpin + Clone + Debug; /// RPC transaction error type. type Error: error::Error + Into>; @@ -23,7 +36,10 @@ pub trait TransactionCompat: Send + Sync + Unpin + Clone + fmt::Debug { /// Wrapper for `fill()` with default `TransactionInfo` /// Create a new rpc transaction result for a _pending_ signed transaction, setting block /// environment related fields to `None`. - fn fill_pending(&self, tx: Recovered) -> Result { + fn fill_pending( + &self, + tx: Recovered>, + ) -> Result { self.fill(tx, TransactionInfo::default()) } @@ -34,16 +50,269 @@ pub trait TransactionCompat: Send + Sync + Unpin + Clone + fmt::Debug { /// transaction was mined. fn fill( &self, - tx: Recovered, + tx: Recovered>, tx_inf: TransactionInfo, ) -> Result; /// Builds a fake transaction from a transaction request for inclusion into block built in /// `eth_simulateV1`. - fn build_simulate_v1_transaction(&self, request: TransactionRequest) -> Result; + fn build_simulate_v1_transaction( + &self, + request: TransactionRequest, + ) -> Result, Self::Error>; +} + +/// Converts `self` into `T`. +/// +/// Should create an RPC transaction response object based on a consensus transaction, its signer +/// [`Address`] and an additional context. +pub trait IntoRpcTx { + /// An additional context, usually [`TransactionInfo`] in a wrapper that carries some + /// implementation specific extra information. + type TxInfo; + + /// Performs the conversion. + fn into_rpc_tx(self, signer: Address, tx_info: Self::TxInfo) -> T; +} + +/// Converts `self` into `T`. +/// +/// Should create a fake transaction for simulation using [`TransactionRequest`]. +pub trait TryIntoSimTx +where + Self: Sized, +{ + /// Performs the conversion. + /// + /// Should return a signed typed transaction envelope for the [`eth_simulateV1`] endpoint with a + /// dummy signature or an error if [required fields] are missing. + /// + /// [`eth_simulateV1`]: + /// [required fields]: TransactionRequest::buildable_type + fn try_into_sim_tx(self) -> Result>; +} + +impl IntoRpcTx for EthereumTxEnvelope { + type TxInfo = TransactionInfo; + + fn into_rpc_tx(self, signer: Address, tx_info: TransactionInfo) -> Transaction { + Transaction::from_transaction(self.with_signer(signer).convert(), tx_info) + } +} + +/// Adds extra context to [`TransactionInfo`]. +pub trait TxInfoMapper { + /// An associated output type that carries [`TransactionInfo`] with some extra context. + type Out; + /// An associated error that can occur during the mapping. + type Err; + + /// Performs the conversion. + fn try_map(&self, tx: T, tx_info: TransactionInfo) -> Result; +} + +impl TxInfoMapper<&T> for () { + type Out = TransactionInfo; + type Err = Infallible; + + fn try_map(&self, _tx: &T, tx_info: TransactionInfo) -> Result { + Ok(tx_info) + } +} + +/// Creates [`OpTransactionInfo`] by adding [`OpDepositInfo`] to [`TransactionInfo`] if `tx` is a +/// deposit. +pub fn try_into_op_tx_info>( + provider: &T, + tx: &OpTxEnvelope, + tx_info: TransactionInfo, +) -> Result { + let deposit_meta = if tx.is_deposit() { + provider.receipt_by_hash(tx.tx_hash())?.and_then(|receipt| { + receipt.as_deposit_receipt().map(|receipt| OpDepositInfo { + deposit_receipt_version: receipt.deposit_receipt_version, + deposit_nonce: receipt.deposit_nonce, + }) + }) + } else { + None + } + .unwrap_or_default(); + + Ok(OpTransactionInfo::new(tx_info, deposit_meta)) +} + +/// Creates [`ScrollTransactionInfo`] by adding [`ScrollAdditionalInfo`] to [`TransactionInfo`] if +/// `tx` is not a L1 message. +pub fn try_into_scroll_tx_info>( + provider: &T, + tx: &ScrollTxEnvelope, + tx_info: TransactionInfo, +) -> Result { + let additional_info = if tx.is_l1_message() { + None + } else { + provider + .receipt_by_hash(*tx.tx_hash())? + .map(|receipt| ScrollAdditionalInfo { l1_fee: receipt.l1_fee() }) + } + .unwrap_or_default(); + + Ok(ScrollTransactionInfo::new(tx_info, additional_info)) +} + +impl IntoRpcTx for OpTxEnvelope { + type TxInfo = OpTransactionInfo; + + fn into_rpc_tx( + self, + signer: Address, + tx_info: OpTransactionInfo, + ) -> op_alloy_rpc_types::Transaction { + op_alloy_rpc_types::Transaction::from_transaction(self.with_signer(signer), tx_info) + } +} + +impl IntoRpcTx for ScrollTxEnvelope { + type TxInfo = ScrollTransactionInfo; + + fn into_rpc_tx( + self, + signer: Address, + tx_info: Self::TxInfo, + ) -> scroll_alloy_rpc_types::Transaction { + scroll_alloy_rpc_types::Transaction::from_transaction(self.with_signer(signer), tx_info) + } +} + +impl TryIntoSimTx> for TransactionRequest { + fn try_into_sim_tx(self) -> Result, ValueError> { + Self::build_typed_simulate_transaction(self) + } +} - /// Truncates the input of a transaction to only the first 4 bytes. - // todo: remove in favour of using constructor on `TransactionResponse` or similar - // . - fn otterscan_api_truncate_input(tx: &mut Self::Transaction); +impl TryIntoSimTx for TransactionRequest { + fn try_into_sim_tx(self) -> Result> { + let request: OpTransactionRequest = self.into(); + let tx = request.build_typed_tx().map_err(|request| { + ValueError::new(request.as_ref().clone(), "Required fields missing") + })?; + + // Create an empty signature for the transaction. + let signature = Signature::new(Default::default(), Default::default(), false); + + Ok(tx.into_signed(signature).into()) + } +} + +impl TryIntoSimTx for TransactionRequest { + fn try_into_sim_tx(self) -> Result> { + let request: ScrollTransactionRequest = self.into(); + let tx = request.build_typed_tx().map_err(|request| { + ValueError::new(request.as_ref().clone(), "Required fields missing") + })?; + + // Create an empty signature for the transaction. + let signature = Signature::new(Default::default(), Default::default(), false); + + Ok(tx.into_signed(signature).into()) + } +} + +/// Conversion into transaction RPC response failed. +#[derive(Debug, Clone, Error)] +#[error("Failed to convert transaction into RPC response: {0}")] +pub struct TransactionConversionError(String); + +/// Generic RPC response object converter for primitives `N` and network `E`. +#[derive(Debug)] +pub struct RpcConverter { + phantom: PhantomData<(N, E, Err)>, + mapper: Map, +} + +impl RpcConverter { + /// Creates a new [`RpcConverter`] with the default mapper. + pub const fn new() -> Self { + Self::with_mapper(()) + } +} + +impl RpcConverter { + /// Creates a new [`RpcConverter`] with `mapper`. + pub const fn with_mapper(mapper: Map) -> Self { + Self { phantom: PhantomData, mapper } + } + + /// Converts the generic types. + pub fn convert(self) -> RpcConverter { + RpcConverter::with_mapper(self.mapper) + } + + /// Swaps the inner `mapper`. + pub fn map(self, mapper: Map2) -> RpcConverter { + RpcConverter::with_mapper(mapper) + } + + /// Converts the generic types and swaps the inner `mapper`. + pub fn convert_map(self, mapper: Map2) -> RpcConverter { + self.convert().map(mapper) + } +} + +impl Clone for RpcConverter { + fn clone(&self) -> Self { + Self::with_mapper(self.mapper.clone()) + } +} + +impl Default for RpcConverter { + fn default() -> Self { + Self::new() + } +} + +impl TransactionCompat for RpcConverter +where + N: NodePrimitives, + E: Network + Unpin, + TxTy: IntoRpcTx<::TransactionResponse> + Clone + Debug, + TransactionRequest: TryIntoSimTx>, + Err: From + + for<'a> From<>>::Err> + + Error + + Unpin + + Sync + + Send + + Into>, + Map: for<'a> TxInfoMapper< + &'a TxTy, + Out = as IntoRpcTx<::TransactionResponse>>::TxInfo, + > + Clone + + Debug + + Unpin + + Send + + Sync, +{ + type Primitives = N; + type Transaction = ::TransactionResponse; + type Error = Err; + + fn fill( + &self, + tx: Recovered>, + tx_info: TransactionInfo, + ) -> Result { + let (tx, signer) = tx.into_parts(); + let tx_info = self.mapper.try_map(&tx, tx_info)?; + + Ok(tx.into_rpc_tx(signer, tx_info)) + } + + fn build_simulate_v1_transaction( + &self, + request: TransactionRequest, + ) -> Result, Self::Error> { + Ok(request.try_into_sim_tx().map_err(|e| TransactionConversionError(e.to_string()))?) + } } diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index a3642aa16cf..e289c60a459 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -38,6 +38,7 @@ reth-rpc-server-types.workspace = true reth-network-types.workspace = true reth-consensus.workspace = true reth-node-api.workspace = true +reth-trie-common.workspace = true # ethereum alloy-evm.workspace = true @@ -85,6 +86,7 @@ tracing.workspace = true tracing-futures.workspace = true futures.workspace = true serde.workspace = true +sha2.workspace = true thiserror.workspace = true derive_more.workspace = true diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 4823b854388..3f9518fd38c 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -34,9 +34,10 @@ use reth_rpc_eth_types::{EthApiError, StateCacheDb}; use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; use reth_storage_api::{ BlockIdReader, BlockReaderIdExt, HeaderProvider, ProviderBlock, ReceiptProviderIdExt, - StateProofProvider, StateProvider, StateProviderFactory, TransactionVariant, + StateProofProvider, StateProvider, StateProviderFactory, StateRootProvider, TransactionVariant, }; use reth_tasks::pool::BlockingTaskGuard; +use reth_trie_common::{updates::TrieUpdates, HashedPostState}; use revm::{context_interface::Transaction, state::EvmState, DatabaseCommit}; use revm_inspectors::tracing::{ FourByteInspector, MuxInspector, TracingInspector, TracingInspectorConfig, TransactionContext, @@ -528,7 +529,7 @@ where // Execute all transactions until index for tx in transactions { let tx_env = this.eth_api().evm_config().tx_env(tx); - let (res, _) = this.eth_api().transact(&mut db, evm_env.clone(), tx_env)?; + let res = this.eth_api().transact(&mut db, evm_env.clone(), tx_env)?; db.commit(res.state); } } @@ -863,6 +864,25 @@ where Ok((frame.into(), res.state)) } + + /// Returns the state root of the `HashedPostState` on top of the state for the given block with + /// trie updates. + async fn debug_state_root_with_updates( + &self, + hashed_state: HashedPostState, + block_id: Option, + ) -> Result<(B256, TrieUpdates), Eth::Error> { + self.inner + .eth_api + .spawn_blocking_io(move |this| { + let state = this + .provider() + .state_by_block_id(block_id.unwrap_or_default()) + .map_err(Eth::Error::from_eth_err)?; + state.state_root_with_updates(hashed_state).map_err(Eth::Error::from_eth_err) + }) + .await + } } #[async_trait] @@ -1218,6 +1238,14 @@ where Ok(()) } + async fn debug_state_root_with_updates( + &self, + hashed_state: HashedPostState, + block_id: Option, + ) -> RpcResult<(B256, TrieUpdates)> { + Self::debug_state_root_with_updates(self, hashed_state, block_id).await.map_err(Into::into) + } + async fn debug_stop_cpu_profile(&self) -> RpcResult<()> { Ok(()) } diff --git a/crates/rpc/rpc/src/eth/builder.rs b/crates/rpc/rpc/src/eth/builder.rs index 83b9a074a15..dbc7af09d0b 100644 --- a/crates/rpc/rpc/src/eth/builder.rs +++ b/crates/rpc/rpc/src/eth/builder.rs @@ -1,9 +1,6 @@ //! `EthApiBuilder` implementation -use crate::{ - eth::{core::EthApiInner, EthTxBuilder}, - EthApi, -}; +use crate::{eth::core::EthApiInner, EthApi}; use reth_chain_state::CanonStateSubscriptions; use reth_chainspec::ChainSpecProvider; use reth_node_api::NodePrimitives; @@ -241,6 +238,6 @@ where + Unpin + 'static, { - EthApi { inner: Arc::new(self.build_inner()), tx_resp_builder: EthTxBuilder } + EthApi { inner: Arc::new(self.build_inner()), tx_resp_builder: Default::default() } } } diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs index fe1f8bdcd4c..cf70176ebb5 100644 --- a/crates/rpc/rpc/src/eth/core.rs +++ b/crates/rpc/rpc/src/eth/core.rs @@ -3,7 +3,7 @@ use std::sync::Arc; -use crate::{eth::EthTxBuilder, EthApiBuilder}; +use crate::{eth::helpers::types::EthRpcConverter, EthApiBuilder}; use alloy_consensus::BlockHeader; use alloy_eips::BlockNumberOrTag; use alloy_network::Ethereum; @@ -65,7 +65,7 @@ pub struct EthApi { #[deref] pub(super) inner: Arc>, /// Transaction RPC response builder. - pub tx_resp_builder: EthTxBuilder, + pub tx_resp_builder: EthRpcConverter, } impl Clone for EthApi @@ -73,7 +73,7 @@ where Provider: BlockReader, { fn clone(&self) -> Self { - Self { inner: self.inner.clone(), tx_resp_builder: EthTxBuilder } + Self { inner: self.inner.clone(), tx_resp_builder: self.tx_resp_builder.clone() } } } @@ -147,7 +147,7 @@ where proof_permits, ); - Self { inner: Arc::new(inner), tx_resp_builder: EthTxBuilder } + Self { inner: Arc::new(inner), tx_resp_builder: Default::default() } } } @@ -158,7 +158,7 @@ where { type Error = EthApiError; type NetworkTypes = Ethereum; - type TransactionCompat = EthTxBuilder; + type TransactionCompat = EthRpcConverter; fn tx_resp_builder(&self) -> &Self::TransactionCompat { &self.tx_resp_builder diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index d1b626bcaf8..1f67bee8958 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -10,9 +10,10 @@ use async_trait::async_trait; use futures::future::TryFutureExt; use jsonrpsee::{core::RpcResult, server::IdProvider}; use reth_errors::ProviderError; +use reth_primitives_traits::NodePrimitives; use reth_rpc_eth_api::{ - EngineEthFilter, EthApiTypes, EthFilterApiServer, FullEthApiTypes, QueryLimits, RpcNodeCoreExt, - RpcTransaction, TransactionCompat, + EngineEthFilter, EthApiTypes, EthFilterApiServer, FullEthApiTypes, QueryLimits, RpcNodeCore, + RpcNodeCoreExt, RpcTransaction, TransactionCompat, }; use reth_rpc_eth_types::{ logs_utils::{self, append_matching_block_logs, ProviderOrBlock}, @@ -21,7 +22,7 @@ use reth_rpc_eth_types::{ use reth_rpc_server_types::{result::rpc_error_with_code, ToRpcResult}; use reth_storage_api::{ BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, HeaderProvider, ProviderBlock, - ProviderReceipt, + ProviderReceipt, TransactionsProvider, }; use reth_tasks::TaskSpawner; use reth_transaction_pool::{NewSubpoolTransactionStream, PoolTransaction, TransactionPool}; @@ -292,7 +293,13 @@ where #[async_trait] impl EthFilterApiServer> for EthFilter where - Eth: FullEthApiTypes + RpcNodeCoreExt + 'static, + Eth: FullEthApiTypes + + RpcNodeCoreExt< + Provider: BlockIdReader, + Primitives: NodePrimitives< + SignedTx = <::Provider as TransactionsProvider>::Transaction, + >, + > + 'static, { /// Handler for `eth_newFilter` async fn new_filter(&self, filter: Filter) -> RpcResult { @@ -678,7 +685,7 @@ struct FullTransactionsReceiver { impl FullTransactionsReceiver where T: PoolTransaction + 'static, - TxCompat: TransactionCompat, + TxCompat: TransactionCompat>, { /// Creates a new `FullTransactionsReceiver` encapsulating the provided transaction stream. fn new(stream: NewSubpoolTransactionStream, tx_resp_builder: TxCompat) -> Self { @@ -716,7 +723,7 @@ impl FullTransactionsFilter for FullTransactionsReceiver where T: PoolTransaction + 'static, - TxCompat: TransactionCompat + 'static, + TxCompat: TransactionCompat> + 'static, { async fn drain(&self) -> FilterChanges { Self::drain(self).await diff --git a/crates/rpc/rpc/src/eth/helpers/block.rs b/crates/rpc/rpc/src/eth/helpers/block.rs index 6304b73dcc1..0cb0b57a423 100644 --- a/crates/rpc/rpc/src/eth/helpers/block.rs +++ b/crates/rpc/rpc/src/eth/helpers/block.rs @@ -3,11 +3,12 @@ use alloy_consensus::{transaction::TransactionMeta, BlockHeader}; use alloy_rpc_types_eth::{BlockId, TransactionReceipt}; use reth_chainspec::{ChainSpecProvider, EthChainSpec}; -use reth_primitives_traits::BlockBody; +use reth_evm::ConfigureEvm; +use reth_primitives_traits::{BlockBody, NodePrimitives}; use reth_rpc_eth_api::{ helpers::{EthBlocks, LoadBlock, LoadPendingBlock, LoadReceipt, SpawnBlocking}, types::RpcTypes, - RpcNodeCoreExt, RpcReceipt, + RpcNodeCore, RpcNodeCoreExt, RpcReceipt, }; use reth_rpc_eth_types::{EthApiError, EthReceiptBuilder}; use reth_storage_api::{BlockReader, ProviderTx}; @@ -77,7 +78,10 @@ where Pool: TransactionPool< Transaction: PoolTransaction>, >, + Primitives: NodePrimitives>, + Evm = EvmConfig, >, Provider: BlockReader, + EvmConfig: ConfigureEvm::Primitives>, { } diff --git a/crates/rpc/rpc/src/eth/helpers/call.rs b/crates/rpc/rpc/src/eth/helpers/call.rs index 53981868d6a..ab6adb53f39 100644 --- a/crates/rpc/rpc/src/eth/helpers/call.rs +++ b/crates/rpc/rpc/src/eth/helpers/call.rs @@ -9,15 +9,26 @@ use reth_evm::{ConfigureEvm, EvmEnv, EvmFactory, SpecFor}; use reth_node_api::NodePrimitives; use reth_rpc_eth_api::{ helpers::{estimate::EstimateCall, Call, EthCall, LoadPendingBlock, LoadState, SpawnBlocking}, - FromEthApiError, FromEvmError, FullEthApiTypes, IntoEthApiError, + FromEthApiError, FromEvmError, FullEthApiTypes, IntoEthApiError, RpcNodeCore, RpcNodeCoreExt, }; use reth_rpc_eth_types::{revm_utils::CallFees, EthApiError, RpcInvalidTransactionError}; use reth_storage_api::{BlockReader, ProviderHeader, ProviderTx}; +use reth_transaction_pool::{PoolTransaction, TransactionPool}; use revm::{context::TxEnv, context_interface::Block, Database}; impl EthCall for EthApi where - Self: EstimateCall + LoadPendingBlock + FullEthApiTypes, + Self: EstimateCall + + LoadPendingBlock + + FullEthApiTypes + + RpcNodeCoreExt< + Pool: TransactionPool< + Transaction: PoolTransaction>, + >, + Primitives: NodePrimitives>, + Evm = EvmConfig, + >, + EvmConfig: ConfigureEvm::Primitives>, Provider: BlockReader, { } diff --git a/crates/rpc/rpc/src/eth/helpers/pending_block.rs b/crates/rpc/rpc/src/eth/helpers/pending_block.rs index ad30c5f3da8..dac1ace7d82 100644 --- a/crates/rpc/rpc/src/eth/helpers/pending_block.rs +++ b/crates/rpc/rpc/src/eth/helpers/pending_block.rs @@ -38,14 +38,15 @@ where Transaction: PoolTransaction>, >, Evm: ConfigureEvm< - Primitives: NodePrimitives< - BlockHeader = ProviderHeader, - SignedTx = ProviderTx, - Receipt = ProviderReceipt, - Block = ProviderBlock, - >, + Primitives = ::Primitives, NextBlockEnvCtx = NextBlockEnvAttributes, >, + Primitives: NodePrimitives< + BlockHeader = ProviderHeader, + SignedTx = ProviderTx, + Receipt = ProviderReceipt, + Block = ProviderBlock, + >, >, Provider: BlockReader< Block = reth_ethereum_primitives::Block, diff --git a/crates/rpc/rpc/src/eth/helpers/types.rs b/crates/rpc/rpc/src/eth/helpers/types.rs index 1465e6e9eeb..90b6e6c9283 100644 --- a/crates/rpc/rpc/src/eth/helpers/types.rs +++ b/crates/rpc/rpc/src/eth/helpers/types.rs @@ -1,97 +1,25 @@ //! L1 `eth` API types. -use alloy_consensus::{SignableTransaction, Transaction as _, TxEnvelope}; -use alloy_network::{Ethereum, Network}; -use alloy_primitives::Signature; -use alloy_rpc_types::TransactionRequest; -use alloy_rpc_types_eth::{Transaction, TransactionInfo}; -use reth_ethereum_primitives::TransactionSigned; -use reth_primitives_traits::Recovered; -use reth_rpc_eth_api::EthApiTypes; +use alloy_network::Ethereum; +use reth_ethereum_primitives::EthPrimitives; use reth_rpc_eth_types::EthApiError; -use reth_rpc_types_compat::TransactionCompat; +use reth_rpc_types_compat::RpcConverter; -/// A standalone [`EthApiTypes`] implementation for Ethereum. -#[derive(Debug, Clone, Copy, Default)] -pub struct EthereumEthApiTypes(EthTxBuilder); - -impl EthApiTypes for EthereumEthApiTypes { - type Error = EthApiError; - type NetworkTypes = Ethereum; - type TransactionCompat = EthTxBuilder; - - fn tx_resp_builder(&self) -> &Self::TransactionCompat { - &self.0 - } -} - -/// Builds RPC transaction response for l1. -#[derive(Debug, Clone, Copy, Default)] -#[non_exhaustive] -pub struct EthTxBuilder; - -impl TransactionCompat for EthTxBuilder -where - Self: Send + Sync, -{ - type Transaction = ::TransactionResponse; - - type Error = EthApiError; - - fn fill( - &self, - tx: Recovered, - tx_info: TransactionInfo, - ) -> Result { - let tx = tx.convert::(); - - let TransactionInfo { - block_hash, block_number, index: transaction_index, base_fee, .. - } = tx_info; - - let effective_gas_price = base_fee - .map(|base_fee| { - tx.effective_tip_per_gas(base_fee).unwrap_or_default() + base_fee as u128 - }) - .unwrap_or_else(|| tx.max_fee_per_gas()); - - Ok(Transaction { - inner: tx, - block_hash, - block_number, - transaction_index, - effective_gas_price: Some(effective_gas_price), - }) - } - - fn build_simulate_v1_transaction( - &self, - request: TransactionRequest, - ) -> Result { - let Ok(tx) = request.build_typed_tx() else { - return Err(EthApiError::TransactionConversionError) - }; - let signature = Signature::new(Default::default(), Default::default(), false); - Ok(tx.into_signed(signature).into()) - } - - fn otterscan_api_truncate_input(tx: &mut Self::Transaction) { - let input = tx.inner.inner_mut().input_mut(); - *input = input.slice(..4); - } -} +/// An [`RpcConverter`] with its generics set to Ethereum specific. +pub type EthRpcConverter = RpcConverter; //tests for simulate #[cfg(test)] mod tests { use super::*; - use alloy_consensus::TxType; + use alloy_consensus::{Transaction, TxType}; + use alloy_rpc_types_eth::TransactionRequest; use reth_rpc_eth_types::simulate::resolve_transaction; use revm::database::CacheDB; #[test] fn test_resolve_transaction_empty_request() { - let builder = EthTxBuilder::default(); + let builder = EthRpcConverter::default(); let mut db = CacheDB::>::default(); let tx = TransactionRequest::default(); let result = resolve_transaction(tx, 21000, 0, 1, &mut db, &builder).unwrap(); @@ -106,7 +34,7 @@ mod tests { #[test] fn test_resolve_transaction_legacy() { let mut db = CacheDB::>::default(); - let builder = EthTxBuilder::default(); + let builder = EthRpcConverter::default(); let tx = TransactionRequest { gas_price: Some(100), ..Default::default() }; @@ -122,7 +50,7 @@ mod tests { #[test] fn test_resolve_transaction_partial_eip1559() { let mut db = CacheDB::>::default(); - let builder = EthTxBuilder::default(); + let builder = EthRpcConverter::default(); let tx = TransactionRequest { max_fee_per_gas: Some(200), diff --git a/crates/rpc/rpc/src/eth/mod.rs b/crates/rpc/rpc/src/eth/mod.rs index e6c844aa2f9..b4dca3b9f2b 100644 --- a/crates/rpc/rpc/src/eth/mod.rs +++ b/crates/rpc/rpc/src/eth/mod.rs @@ -15,9 +15,6 @@ pub use core::{EthApi, EthApiFor}; pub use filter::EthFilter; pub use pubsub::EthPubSub; -pub use helpers::{ - signer::DevSigner, - types::{EthTxBuilder, EthereumEthApiTypes}, -}; +pub use helpers::signer::DevSigner; pub use reth_rpc_eth_api::{EthApiServer, EthApiTypes, FullEthApiServer, RpcNodeCore}; diff --git a/crates/rpc/rpc/src/eth/pubsub.rs b/crates/rpc/rpc/src/eth/pubsub.rs index 4ddcd35d78d..b91318d498b 100644 --- a/crates/rpc/rpc/src/eth/pubsub.rs +++ b/crates/rpc/rpc/src/eth/pubsub.rs @@ -62,8 +62,11 @@ where Provider: BlockNumReader + CanonStateSubscriptions, Pool: TransactionPool, Network: NetworkInfo, - > + EthApiTypes>> - + 'static, + > + EthApiTypes< + TransactionCompat: TransactionCompat< + Primitives: NodePrimitives>, + >, + > + 'static, { /// Handler for `eth_subscribe` async fn subscribe( @@ -94,7 +97,11 @@ where Provider: BlockNumReader + CanonStateSubscriptions, Pool: TransactionPool, Network: NetworkInfo, - > + EthApiTypes>>, + > + EthApiTypes< + TransactionCompat: TransactionCompat< + Primitives: NodePrimitives>, + >, + >, { match kind { SubscriptionKind::NewHeads => { @@ -333,6 +340,7 @@ where let all_logs = logs_utils::matching_block_logs_with_tx_hashes( &filter, block_receipts.block, + block_receipts.timestamp, block_receipts.tx_receipts.iter().map(|(tx, receipt)| (*tx, receipt)), removed, ); diff --git a/crates/rpc/rpc/src/otterscan.rs b/crates/rpc/rpc/src/otterscan.rs index d0807216e1b..502a71ec6d4 100644 --- a/crates/rpc/rpc/src/otterscan.rs +++ b/crates/rpc/rpc/src/otterscan.rs @@ -1,5 +1,5 @@ -use alloy_consensus::{BlockHeader, Transaction, Typed2718}; -use alloy_eips::{BlockId, BlockNumberOrTag}; +use alloy_consensus::{BlockHeader, Typed2718}; +use alloy_eips::{eip1898::LenientBlockNumberOrTag, BlockId}; use alloy_network::{ReceiptResponse, TransactionResponse}; use alloy_primitives::{Address, Bytes, TxHash, B256, U256}; use alloy_rpc_types_eth::{BlockTransactions, TransactionReceipt}; @@ -15,7 +15,7 @@ use jsonrpsee::{core::RpcResult, types::ErrorObjectOwned}; use reth_rpc_api::{EthApiServer, OtterscanServer}; use reth_rpc_eth_api::{ helpers::{EthTransactions, TraceExt}, - FullEthApiTypes, RpcBlock, RpcHeader, RpcReceipt, RpcTransaction, TransactionCompat, + FullEthApiTypes, RpcBlock, RpcHeader, RpcReceipt, RpcTransaction, }; use reth_rpc_eth_types::{utils::binary_search, EthApiError}; use reth_rpc_server_types::result::internal_rpc_err; @@ -78,9 +78,9 @@ where /// Handler for `ots_getHeaderByNumber` and `erigon_getHeaderByNumber` async fn get_header_by_number( &self, - block_number: u64, + block_number: LenientBlockNumberOrTag, ) -> RpcResult>> { - self.eth.header_by_number(BlockNumberOrTag::Number(block_number)).await + self.eth.header_by_number(block_number.into()).await } /// Handler for `ots_hasCode` @@ -173,11 +173,11 @@ where /// Handler for `ots_getBlockDetails` async fn get_block_details( &self, - block_number: u64, + block_number: LenientBlockNumberOrTag, ) -> RpcResult>> { + let block_number = block_number.into_inner(); + let block = self.eth.block_by_number(block_number, true); let block_id = block_number.into(); - let block = self.eth.block_by_number(block_id, true); - let block_id = block_id.into(); let receipts = self.eth.block_receipts(block_id); let (block, receipts) = futures::try_join!(block, receipts)?; self.block_details( @@ -204,16 +204,16 @@ where /// Handler for `ots_getBlockTransactions` async fn get_block_transactions( &self, - block_number: u64, + block_number: LenientBlockNumberOrTag, page_number: usize, page_size: usize, ) -> RpcResult< OtsBlockTransactions, RpcHeader>, > { - let block_id = block_number.into(); + let block_number = block_number.into_inner(); // retrieve full block and its receipts - let block = self.eth.block_by_number(block_id, true); - let block_id = block_id.into(); + let block = self.eth.block_by_number(block_number, true); + let block_id = block_number.into(); let receipts = self.eth.block_receipts(block_id); let (block, receipts) = futures::try_join!(block, receipts)?; @@ -240,15 +240,6 @@ where // Crop transactions *transactions = transactions.drain(page_start..page_end).collect::>(); - // The input field returns only the 4 bytes method selector instead of the entire - // calldata byte blob - // See also: - for tx in transactions.iter_mut() { - if tx.input().len() > 4 { - Eth::TransactionCompat::otterscan_api_truncate_input(tx); - } - } - // Crop receipts and transform them into OtsTransactionReceipt let timestamp = Some(block.header.timestamp()); let receipts = receipts @@ -292,7 +283,7 @@ where async fn search_transactions_before( &self, _address: Address, - _block_number: u64, + _block_number: LenientBlockNumberOrTag, _page_size: usize, ) -> RpcResult { Err(internal_rpc_err("unimplemented")) @@ -302,7 +293,7 @@ where async fn search_transactions_after( &self, _address: Address, - _block_number: u64, + _block_number: LenientBlockNumberOrTag, _page_size: usize, ) -> RpcResult { Err(internal_rpc_err("unimplemented")) diff --git a/crates/rpc/rpc/src/reth.rs b/crates/rpc/rpc/src/reth.rs index b868d72d3a2..3aaa1ebc5e0 100644 --- a/crates/rpc/rpc/src/reth.rs +++ b/crates/rpc/rpc/src/reth.rs @@ -3,10 +3,15 @@ use std::{collections::HashMap, future::Future, sync::Arc}; use alloy_eips::BlockId; use alloy_primitives::{Address, U256}; use async_trait::async_trait; -use jsonrpsee::core::RpcResult; +use futures::StreamExt; +use jsonrpsee::{core::RpcResult, PendingSubscriptionSink, SubscriptionMessage, SubscriptionSink}; +use jsonrpsee_types::ErrorObject; +use reth_chain_state::{CanonStateNotificationStream, CanonStateSubscriptions}; use reth_errors::RethResult; +use reth_primitives_traits::NodePrimitives; use reth_rpc_api::RethApiServer; use reth_rpc_eth_types::{EthApiError, EthResult}; +use reth_rpc_server_types::result::internal_rpc_err; use reth_storage_api::{BlockReaderIdExt, ChangeSetReader, StateProviderFactory}; use reth_tasks::TaskSpawner; use tokio::sync::oneshot; @@ -89,7 +94,11 @@ where #[async_trait] impl RethApiServer for RethApi where - Provider: BlockReaderIdExt + ChangeSetReader + StateProviderFactory + 'static, + Provider: BlockReaderIdExt + + ChangeSetReader + + StateProviderFactory + + CanonStateSubscriptions + + 'static, { /// Handler for `reth_getBalanceChangesInBlock` async fn reth_get_balance_changes_in_block( @@ -98,6 +107,50 @@ where ) -> RpcResult> { Ok(Self::balance_changes_in_block(self, block_id).await?) } + + /// Handler for `reth_subscribeChainNotifications` + async fn reth_subscribe_chain_notifications( + &self, + pending: PendingSubscriptionSink, + ) -> jsonrpsee::core::SubscriptionResult { + let sink = pending.accept().await?; + let stream = self.provider().canonical_state_stream(); + self.inner.task_spawner.spawn(Box::pin(async move { + let _ = pipe_from_stream(sink, stream).await; + })); + + Ok(()) + } +} + +/// Pipes all stream items to the subscription sink. +async fn pipe_from_stream( + sink: SubscriptionSink, + mut stream: CanonStateNotificationStream, +) -> Result<(), ErrorObject<'static>> { + loop { + tokio::select! { + _ = sink.closed() => { + // connection dropped + break Ok(()) + } + maybe_item = stream.next() => { + let item = match maybe_item { + Some(item) => item, + None => { + // stream ended + break Ok(()) + }, + }; + let msg = SubscriptionMessage::new(sink.method_name(), sink.subscription_id(), &item) + .map_err(|e| internal_rpc_err(e.to_string()))?; + + if sink.send(msg).await.is_err() { + break Ok(()); + } + } + } + } } impl std::fmt::Debug for RethApi { diff --git a/crates/rpc/rpc/src/txpool.rs b/crates/rpc/rpc/src/txpool.rs index 7facf6dd91e..8c69aaf7e0b 100644 --- a/crates/rpc/rpc/src/txpool.rs +++ b/crates/rpc/rpc/src/txpool.rs @@ -8,6 +8,7 @@ use alloy_rpc_types_txpool::{ }; use async_trait::async_trait; use jsonrpsee::core::RpcResult; +use reth_primitives_traits::NodePrimitives; use reth_rpc_api::TxPoolApiServer; use reth_rpc_types_compat::TransactionCompat; use reth_transaction_pool::{ @@ -35,7 +36,7 @@ impl TxPoolApi { impl TxPoolApi where Pool: TransactionPool> + 'static, - Eth: TransactionCompat>, + Eth: TransactionCompat>>, { fn content(&self) -> Result, Eth::Error> { #[inline] @@ -46,7 +47,7 @@ where ) -> Result<(), RpcTxB::Error> where Tx: PoolTransaction, - RpcTxB: TransactionCompat, + RpcTxB: TransactionCompat>, { content.entry(tx.sender()).or_default().insert( tx.nonce().to_string(), @@ -74,7 +75,7 @@ where impl TxPoolApiServer for TxPoolApi where Pool: TransactionPool> + 'static, - Eth: TransactionCompat> + 'static, + Eth: TransactionCompat>> + 'static, { /// Returns the number of transactions currently pending for inclusion in the next block(s), as /// well as the ones that are being scheduled for future execution only. diff --git a/crates/rpc/rpc/src/validation.rs b/crates/rpc/rpc/src/validation.rs index d21e8f13e74..cb64e10e047 100644 --- a/crates/rpc/rpc/src/validation.rs +++ b/crates/rpc/rpc/src/validation.rs @@ -20,7 +20,11 @@ use reth_engine_primitives::PayloadValidator; use reth_errors::{BlockExecutionError, ConsensusError, ProviderError}; use reth_evm::{execute::Executor, ConfigureEvm}; use reth_execution_types::BlockExecutionOutput; -use reth_metrics::{metrics, metrics::Gauge, Metrics}; +use reth_metrics::{ + metrics, + metrics::{gauge, Gauge}, + Metrics, +}; use reth_node_api::NewPayloadError; use reth_primitives_traits::{ constants::GAS_LIMIT_BOUND_DIVISOR, BlockBody, GotExpected, NodePrimitives, RecoveredBlock, @@ -33,6 +37,7 @@ use reth_storage_api::{BlockReaderIdExt, StateProviderFactory}; use reth_tasks::TaskSpawner; use revm_primitives::{Address, B256, U256}; use serde::{Deserialize, Serialize}; +use sha2::{Digest, Sha256}; use std::{collections::HashSet, sync::Arc}; use tokio::sync::{oneshot, RwLock}; use tracing::warn; @@ -77,6 +82,11 @@ where }); inner.metrics.disallow_size.set(inner.disallow.len() as f64); + + let disallow_hash = hash_disallow_list(&inner.disallow); + let hash_gauge = gauge!("builder_validation_disallow_hash", "hash" => disallow_hash); + hash_gauge.set(1.0); + Self { inner } } @@ -229,7 +239,7 @@ where expected: header.gas_limit(), })) } else if header.gas_used() != message.gas_used { - return Err(ValidationApiError::GasUsedMismatch(GotExpected { + Err(ValidationApiError::GasUsedMismatch(GotExpected { got: message.gas_used, expected: header.gas_used(), })) @@ -498,6 +508,22 @@ pub struct ValidationApiInner { metrics: ValidationMetrics, } +/// Calculates a deterministic hash of the blocklist for change detection. +/// +/// This function sorts addresses to ensure deterministic output regardless of +/// insertion order, then computes a SHA256 hash of the concatenated addresses. +fn hash_disallow_list(disallow: &HashSet

) -> String { + let mut sorted: Vec<_> = disallow.iter().collect(); + sorted.sort(); // sort for deterministic hashing + + let mut hasher = Sha256::new(); + for addr in sorted { + hasher.update(addr.as_slice()); + } + + format!("{:x}", hasher.finalize()) +} + impl fmt::Debug for ValidationApiInner { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("ValidationApiInner").finish_non_exhaustive() @@ -597,3 +623,63 @@ pub(crate) struct ValidationMetrics { /// The number of entries configured in the builder validation disallow list. pub(crate) disallow_size: Gauge, } + +#[cfg(test)] +mod tests { + use super::hash_disallow_list; + use revm_primitives::Address; + use std::collections::HashSet; + + #[test] + fn test_hash_disallow_list_deterministic() { + let mut addresses = HashSet::new(); + addresses.insert(Address::from([1u8; 20])); + addresses.insert(Address::from([2u8; 20])); + + let hash1 = hash_disallow_list(&addresses); + let hash2 = hash_disallow_list(&addresses); + + assert_eq!(hash1, hash2); + } + + #[test] + fn test_hash_disallow_list_different_content() { + let mut addresses1 = HashSet::new(); + addresses1.insert(Address::from([1u8; 20])); + + let mut addresses2 = HashSet::new(); + addresses2.insert(Address::from([2u8; 20])); + + let hash1 = hash_disallow_list(&addresses1); + let hash2 = hash_disallow_list(&addresses2); + + assert_ne!(hash1, hash2); + } + + #[test] + fn test_hash_disallow_list_order_independent() { + let mut addresses1 = HashSet::new(); + addresses1.insert(Address::from([1u8; 20])); + addresses1.insert(Address::from([2u8; 20])); + + let mut addresses2 = HashSet::new(); + addresses2.insert(Address::from([2u8; 20])); // Different insertion order + addresses2.insert(Address::from([1u8; 20])); + + let hash1 = hash_disallow_list(&addresses1); + let hash2 = hash_disallow_list(&addresses2); + + assert_eq!(hash1, hash2); + } + + #[test] + //ensures parity with rbuilder hashing https://github.com/flashbots/rbuilder/blob/962c8444cdd490a216beda22c7eec164db9fc3ac/crates/rbuilder/src/live_builder/block_list_provider.rs#L248 + fn test_disallow_list_hash_rbuilder_parity() { + let json = r#"["0x05E0b5B40B7b66098C2161A5EE11C5740A3A7C45","0x01e2919679362dFBC9ee1644Ba9C6da6D6245BB1","0x03893a7c7463AE47D46bc7f091665f1893656003","0x04DBA1194ee10112fE6C3207C0687DEf0e78baCf"]"#; + let blocklist: Vec
= serde_json::from_str(json).unwrap(); + let blocklist: HashSet
= blocklist.into_iter().collect(); + let expected_hash = "ee14e9d115e182f61871a5a385ab2f32ecf434f3b17bdbacc71044810d89e608"; + let hash = hash_disallow_list(&blocklist); + assert_eq!(expected_hash, hash); + } +} diff --git a/crates/scroll/alloy/consensus/Cargo.toml b/crates/scroll/alloy/consensus/Cargo.toml index 6c45faeee8f..a8f43962645 100644 --- a/crates/scroll/alloy/consensus/Cargo.toml +++ b/crates/scroll/alloy/consensus/Cargo.toml @@ -12,7 +12,7 @@ exclude.workspace = true workspace = true [dependencies] -# Alloy +# alloy alloy-rlp.workspace = true alloy-eips.workspace = true alloy-consensus.workspace = true @@ -20,6 +20,7 @@ alloy-primitives = { workspace = true, features = ["rlp"] } # misc derive_more = { workspace = true, features = ["display"] } +serde_with = { workspace = true, optional = true } # arbitrary arbitrary = { workspace = true, features = ["derive"], optional = true } @@ -62,6 +63,7 @@ std = [ "rand/std", "derive_more/std", "serde_json/std", + "serde_with?/std", ] k256 = [ "alloy-primitives/k256", @@ -98,6 +100,7 @@ serde = [ "reth-codecs?/serde", ] serde-bincode-compat = [ + "dep:serde_with", "alloy-consensus/serde-bincode-compat", "alloy-eips/serde-bincode-compat", ] diff --git a/crates/scroll/alloy/consensus/src/lib.rs b/crates/scroll/alloy/consensus/src/lib.rs index 1f3d755906a..c6eb0dad6ea 100644 --- a/crates/scroll/alloy/consensus/src/lib.rs +++ b/crates/scroll/alloy/consensus/src/lib.rs @@ -12,8 +12,9 @@ extern crate alloc as std; mod transaction; pub use transaction::{ - ScrollL1MessageTransactionFields, ScrollPooledTransaction, ScrollTxEnvelope, ScrollTxType, - ScrollTypedTransaction, TxL1Message, L1_MESSAGE_TRANSACTION_TYPE, L1_MESSAGE_TX_TYPE_ID, + ScrollAdditionalInfo, ScrollL1MessageTransactionFields, ScrollPooledTransaction, + ScrollTransactionInfo, ScrollTxEnvelope, ScrollTxType, ScrollTypedTransaction, TxL1Message, + L1_MESSAGE_TRANSACTION_TYPE, L1_MESSAGE_TX_TYPE_ID, }; mod receipt; @@ -21,3 +22,9 @@ pub use receipt::{ScrollReceiptEnvelope, ScrollReceiptWithBloom, ScrollTransacti #[cfg(feature = "serde")] pub use transaction::serde_l1_message_tx_rpc; + +/// Bincode-compatible serde implementations. +#[cfg(all(feature = "serde", feature = "serde-bincode-compat"))] +pub mod serde_bincode_compat { + pub use super::transaction::serde_bincode_compat::*; +} diff --git a/crates/scroll/alloy/consensus/src/transaction/envelope.rs b/crates/scroll/alloy/consensus/src/transaction/envelope.rs index 419925e4dfd..4fd4b8d1e2b 100644 --- a/crates/scroll/alloy/consensus/src/transaction/envelope.rs +++ b/crates/scroll/alloy/consensus/src/transaction/envelope.rs @@ -1,6 +1,9 @@ +use crate::{ScrollPooledTransaction, ScrollTxType, ScrollTypedTransaction, TxL1Message}; +use core::hash::Hash; + use alloy_consensus::{ - transaction::RlpEcdsaDecodableTx, Sealable, Sealed, Signed, Transaction, TxEip1559, TxEip2930, - TxEip7702, TxLegacy, Typed2718, + error::ValueError, transaction::RlpEcdsaDecodableTx, Sealable, Sealed, Signed, Transaction, + TxEip1559, TxEip2930, TxEip7702, TxLegacy, Typed2718, }; use alloy_eips::{ eip2718::{Decodable2718, Eip2718Error, Eip2718Result, Encodable2718}, @@ -9,8 +12,12 @@ use alloy_eips::{ }; use alloy_primitives::{Address, Bytes, Signature, TxKind, B256, U256}; use alloy_rlp::{Decodable, Encodable}; - -use crate::{ScrollTxType, TxL1Message}; +#[cfg(feature = "reth-codec")] +use reth_codecs::{ + Compact, + __private::bytes::BufMut, + alloy::transaction::{CompactEnvelope, Envelope, FromTxCompact, ToTxCompact}, +}; /// The Ethereum [EIP-2718] Transaction Envelope, modified for Scroll chains. /// @@ -23,14 +30,13 @@ use crate::{ScrollTxType, TxL1Message}; /// flag. /// /// [EIP-2718]: https://eips.ethereum.org/EIPS/eip-2718 -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq, Hash)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( feature = "serde", serde(into = "serde_from::TaggedTxEnvelope", from = "serde_from::MaybeTaggedTxEnvelope") )] #[cfg_attr(all(any(test, feature = "arbitrary"), feature = "k256"), derive(arbitrary::Arbitrary))] -#[non_exhaustive] pub enum ScrollTxEnvelope { /// An untagged [`TxLegacy`]. Legacy(Signed), @@ -80,6 +86,33 @@ impl From> for ScrollTxEnvelope { } } +impl From> for ScrollTxEnvelope { + fn from(value: Signed) -> Self { + let (tx, sig, hash) = value.into_parts(); + match tx { + ScrollTypedTransaction::Legacy(tx_legacy) => { + let tx = Signed::new_unchecked(tx_legacy, sig, hash); + Self::Legacy(tx) + } + ScrollTypedTransaction::Eip2930(tx_eip2930) => { + let tx = Signed::new_unchecked(tx_eip2930, sig, hash); + Self::Eip2930(tx) + } + ScrollTypedTransaction::Eip1559(tx_eip1559) => { + let tx = Signed::new_unchecked(tx_eip1559, sig, hash); + Self::Eip1559(tx) + } + ScrollTypedTransaction::Eip7702(tx_eip7702) => { + let tx = Signed::new_unchecked(tx_eip7702, sig, hash); + Self::Eip7702(tx) + } + ScrollTypedTransaction::L1Message(tx) => { + Self::L1Message(Sealed::new_unchecked(tx, hash)) + } + } + } +} + impl Typed2718 for ScrollTxEnvelope { fn ty(&self) -> u8 { match self { @@ -377,6 +410,100 @@ impl ScrollTxEnvelope { Self::L1Message(_) => None, } } + + /// Converts the [`ScrollTxEnvelope`] into a [`ScrollPooledTransaction`], returns an error if + /// the transaction is a L1 message. + pub fn try_into_pooled(self) -> Result> { + match self { + Self::Legacy(tx) => Ok(tx.into()), + Self::Eip2930(tx) => Ok(tx.into()), + Self::Eip1559(tx) => Ok(tx.into()), + Self::Eip7702(tx) => Ok(tx.into()), + Self::L1Message(tx) => Err(ValueError::new(tx.into(), "L1 messages cannot be pooled")), + } + } +} + +#[cfg(feature = "reth-codec")] +impl ToTxCompact for ScrollTxEnvelope { + fn to_tx_compact(&self, buf: &mut (impl BufMut + AsMut<[u8]>)) { + match self { + Self::Legacy(tx) => tx.tx().to_compact(buf), + Self::Eip2930(tx) => tx.tx().to_compact(buf), + Self::Eip1559(tx) => tx.tx().to_compact(buf), + Self::Eip7702(tx) => tx.tx().to_compact(buf), + Self::L1Message(tx) => tx.to_compact(buf), + }; + } +} + +#[cfg(feature = "reth-codec")] +impl FromTxCompact for ScrollTxEnvelope { + type TxType = ScrollTxType; + + fn from_tx_compact(buf: &[u8], tx_type: ScrollTxType, signature: Signature) -> (Self, &[u8]) { + match tx_type { + ScrollTxType::Legacy => { + let (tx, buf) = TxLegacy::from_compact(buf, buf.len()); + let tx = Signed::new_unhashed(tx, signature); + (Self::Legacy(tx), buf) + } + ScrollTxType::Eip2930 => { + let (tx, buf) = TxEip2930::from_compact(buf, buf.len()); + let tx = Signed::new_unhashed(tx, signature); + (Self::Eip2930(tx), buf) + } + ScrollTxType::Eip1559 => { + let (tx, buf) = TxEip1559::from_compact(buf, buf.len()); + let tx = Signed::new_unhashed(tx, signature); + (Self::Eip1559(tx), buf) + } + ScrollTxType::Eip7702 => { + let (tx, buf) = TxEip7702::from_compact(buf, buf.len()); + let tx = Signed::new_unhashed(tx, signature); + (Self::Eip7702(tx), buf) + } + ScrollTxType::L1Message => { + let (tx, buf) = TxL1Message::from_compact(buf, buf.len()); + let tx = Sealed::new(tx); + (Self::L1Message(tx), buf) + } + } + } +} + +#[cfg(feature = "reth-codec")] +const L1_MESSAGE_SIGNATURE: Signature = Signature::new(U256::ZERO, U256::ZERO, false); + +#[cfg(feature = "reth-codec")] +impl Envelope for ScrollTxEnvelope { + fn signature(&self) -> &Signature { + match self { + Self::Legacy(tx) => tx.signature(), + Self::Eip2930(tx) => tx.signature(), + Self::Eip1559(tx) => tx.signature(), + Self::Eip7702(tx) => tx.signature(), + Self::L1Message(_) => &L1_MESSAGE_SIGNATURE, + } + } + + fn tx_type(&self) -> Self::TxType { + Self::tx_type(self) + } +} + +#[cfg(feature = "reth-codec")] +impl Compact for ScrollTxEnvelope { + fn to_compact(&self, buf: &mut B) -> usize + where + B: BufMut + AsMut<[u8]>, + { + CompactEnvelope::to_compact(self, buf) + } + + fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { + CompactEnvelope::from_compact(buf, len) + } } impl Encodable for ScrollTxEnvelope { @@ -459,6 +586,33 @@ impl Encodable2718 for ScrollTxEnvelope { } } +#[cfg(feature = "k256")] +impl alloy_consensus::transaction::SignerRecoverable for ScrollTxEnvelope { + fn recover_signer(&self) -> Result { + let signature_hash = match self { + Self::Legacy(tx) => tx.signature_hash(), + Self::Eip2930(tx) => tx.signature_hash(), + Self::Eip1559(tx) => tx.signature_hash(), + Self::Eip7702(tx) => tx.signature_hash(), + Self::L1Message(tx) => return Ok(tx.sender), + }; + let signature = self.signature().expect("handled L1 message in previous match"); + alloy_consensus::crypto::secp256k1::recover_signer(&signature, signature_hash) + } + + fn recover_signer_unchecked(&self) -> Result { + let signature_hash = match self { + Self::Legacy(tx) => tx.signature_hash(), + Self::Eip2930(tx) => tx.signature_hash(), + Self::Eip1559(tx) => tx.signature_hash(), + Self::Eip7702(tx) => tx.signature_hash(), + Self::L1Message(tx) => return Ok(tx.sender), + }; + let signature = self.signature().expect("handled L1 message in previous match"); + alloy_consensus::crypto::secp256k1::recover_signer_unchecked(&signature, signature_hash) + } +} + #[cfg(feature = "serde")] mod serde_from { //! NB: Why do we need this? @@ -537,6 +691,166 @@ mod serde_from { } } +/// Bincode-compatible serde implementation for `ScrollTxEnvelope`. +#[cfg(all(feature = "serde", feature = "serde-bincode-compat"))] +pub(super) mod serde_bincode_compat { + use crate::TxL1Message; + + use alloy_consensus::{ + transaction::serde_bincode_compat::{TxEip1559, TxEip2930, TxEip7702, TxLegacy}, + Sealed, Signed, + }; + use alloy_primitives::{Signature, B256}; + use serde::{Deserialize, Deserializer, Serialize, Serializer}; + use serde_with::{DeserializeAs, SerializeAs}; + + /// Bincode-compatible representation of an `ScrollTxEnvelope`. + #[derive(Debug, Serialize, Deserialize)] + pub enum ScrollTxEnvelope<'a> { + /// Legacy variant. + Legacy { + /// Transaction signature. + signature: Signature, + /// Borrowed legacy transaction data. + transaction: TxLegacy<'a>, + }, + /// EIP-2930 variant. + Eip2930 { + /// Transaction signature. + signature: Signature, + /// Borrowed EIP-2930 transaction data. + transaction: TxEip2930<'a>, + }, + /// EIP-1559 variant. + Eip1559 { + /// Transaction signature. + signature: Signature, + /// Borrowed EIP-1559 transaction data. + transaction: TxEip1559<'a>, + }, + /// EIP-7702 variant. + Eip7702 { + /// Transaction signature. + signature: Signature, + /// Borrowed EIP-7702 transaction data. + transaction: TxEip7702<'a>, + }, + /// L1 message variant. + TxL1Message { + /// Precomputed hash. + hash: B256, + /// Borrowed deposit transaction data. + transaction: TxL1Message, + }, + } + + impl<'a> From<&'a super::ScrollTxEnvelope> for ScrollTxEnvelope<'a> { + fn from(value: &'a super::ScrollTxEnvelope) -> Self { + match value { + super::ScrollTxEnvelope::Legacy(signed_legacy) => Self::Legacy { + signature: *signed_legacy.signature(), + transaction: signed_legacy.tx().into(), + }, + super::ScrollTxEnvelope::Eip2930(signed_2930) => Self::Eip2930 { + signature: *signed_2930.signature(), + transaction: signed_2930.tx().into(), + }, + super::ScrollTxEnvelope::Eip1559(signed_1559) => Self::Eip1559 { + signature: *signed_1559.signature(), + transaction: signed_1559.tx().into(), + }, + super::ScrollTxEnvelope::Eip7702(signed_7702) => Self::Eip7702 { + signature: *signed_7702.signature(), + transaction: signed_7702.tx().into(), + }, + super::ScrollTxEnvelope::L1Message(sealed_l1_message) => Self::TxL1Message { + hash: sealed_l1_message.seal(), + transaction: sealed_l1_message.inner().clone(), + }, + } + } + } + + impl<'a> From> for super::ScrollTxEnvelope { + fn from(value: ScrollTxEnvelope<'a>) -> Self { + match value { + ScrollTxEnvelope::Legacy { signature, transaction } => { + Self::Legacy(Signed::new_unhashed(transaction.into(), signature)) + } + ScrollTxEnvelope::Eip2930 { signature, transaction } => { + Self::Eip2930(Signed::new_unhashed(transaction.into(), signature)) + } + ScrollTxEnvelope::Eip1559 { signature, transaction } => { + Self::Eip1559(Signed::new_unhashed(transaction.into(), signature)) + } + ScrollTxEnvelope::Eip7702 { signature, transaction } => { + Self::Eip7702(Signed::new_unhashed(transaction.into(), signature)) + } + ScrollTxEnvelope::TxL1Message { hash, transaction } => { + Self::L1Message(Sealed::new_unchecked(transaction, hash)) + } + } + } + } + + impl SerializeAs for ScrollTxEnvelope<'_> { + fn serialize_as( + source: &super::ScrollTxEnvelope, + serializer: S, + ) -> Result + where + S: Serializer, + { + let borrowed = ScrollTxEnvelope::from(source); + borrowed.serialize(serializer) + } + } + + impl<'de> DeserializeAs<'de, super::ScrollTxEnvelope> for ScrollTxEnvelope<'de> { + fn deserialize_as(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let borrowed = ScrollTxEnvelope::deserialize(deserializer)?; + Ok(borrowed.into()) + } + } + + #[cfg(test)] + mod tests { + use super::*; + use arbitrary::Arbitrary; + use rand::Rng; + use serde::{Deserialize, Serialize}; + use serde_with::serde_as; + + /// Tests a bincode round-trip for `ScrollTxEnvelope` using an arbitrary instance. + #[test] + fn test_scroll_tx_envelope_bincode_roundtrip_arbitrary() { + #[serde_as] + #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] + struct Data { + // Use the bincode-compatible representation defined in this module. + #[serde_as(as = "ScrollTxEnvelope<'_>")] + envelope: super::super::ScrollTxEnvelope, + } + + let mut bytes = [0u8; 1024]; + rand::rng().fill(bytes.as_mut_slice()); + let data = Data { + envelope: super::super::ScrollTxEnvelope::arbitrary( + &mut arbitrary::Unstructured::new(&bytes), + ) + .unwrap(), + }; + + let encoded = bincode::serialize(&data).unwrap(); + let decoded: Data = bincode::deserialize(&encoded).unwrap(); + assert_eq!(decoded, data); + } + } +} + #[cfg(test)] mod tests { extern crate alloc; diff --git a/crates/scroll/alloy/consensus/src/transaction/l1_message.rs b/crates/scroll/alloy/consensus/src/transaction/l1_message.rs index 40eb8e4a121..0c200904fbc 100644 --- a/crates/scroll/alloy/consensus/src/transaction/l1_message.rs +++ b/crates/scroll/alloy/consensus/src/transaction/l1_message.rs @@ -101,7 +101,7 @@ impl TxL1Message { } /// Outputs the length of the transaction's fields, without a RLP header. - fn rlp_encoded_fields_length(&self) -> usize { + pub fn rlp_encoded_fields_length(&self) -> usize { self.queue_index.length() + self.gas_limit.length() + self.to.length() + @@ -112,7 +112,7 @@ impl TxL1Message { /// Encode the fields of the transaction without a RLP header. /// - fn rlp_encode_fields(&self, out: &mut dyn alloy_rlp::BufMut) { + pub fn rlp_encode_fields(&self, out: &mut dyn alloy_rlp::BufMut) { self.queue_index.encode(out); self.gas_limit.encode(out); self.to.encode(out); diff --git a/crates/scroll/alloy/consensus/src/transaction/meta.rs b/crates/scroll/alloy/consensus/src/transaction/meta.rs new file mode 100644 index 00000000000..d1ffc5f5acd --- /dev/null +++ b/crates/scroll/alloy/consensus/src/transaction/meta.rs @@ -0,0 +1,28 @@ +use alloy_consensus::transaction::TransactionInfo; +use alloy_primitives::U256; + +/// Additional receipt metadata required for Scroll transactions. +/// +/// These fields are used to provide additional context for in RPC responses. +#[derive(Debug, Clone, Copy, Default, Eq, PartialEq)] +pub struct ScrollAdditionalInfo { + /// Only present in RPC responses. + pub l1_fee: U256, +} + +/// Additional fields in the context of a block that contains this transaction. +#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)] +pub struct ScrollTransactionInfo { + /// Additional transaction information. + pub inner: TransactionInfo, + /// Additional metadata for Scroll. + pub additional_info: ScrollAdditionalInfo, +} + +impl ScrollTransactionInfo { + /// Creates a new [`ScrollTransactionInfo`] with the given [`TransactionInfo`] and + /// [`ScrollAdditionalInfo`]. + pub const fn new(inner: TransactionInfo, additional_info: ScrollAdditionalInfo) -> Self { + Self { inner, additional_info } + } +} diff --git a/crates/scroll/alloy/consensus/src/transaction/mod.rs b/crates/scroll/alloy/consensus/src/transaction/mod.rs index ce98f42336f..c43aab42897 100644 --- a/crates/scroll/alloy/consensus/src/transaction/mod.rs +++ b/crates/scroll/alloy/consensus/src/transaction/mod.rs @@ -9,6 +9,9 @@ pub use envelope::ScrollTxEnvelope; mod l1_message; pub use l1_message::{ScrollL1MessageTransactionFields, TxL1Message, L1_MESSAGE_TRANSACTION_TYPE}; +mod meta; +pub use meta::{ScrollAdditionalInfo, ScrollTransactionInfo}; + mod typed; pub use typed::ScrollTypedTransaction; @@ -17,3 +20,9 @@ pub use pooled::ScrollPooledTransaction; #[cfg(feature = "serde")] pub use l1_message::serde_l1_message_tx_rpc; + +/// Bincode-compatible serde implementations for transaction types. +#[cfg(all(feature = "serde", feature = "serde-bincode-compat"))] +pub(super) mod serde_bincode_compat { + pub use super::envelope::serde_bincode_compat::*; +} diff --git a/crates/scroll/alloy/consensus/src/transaction/pooled.rs b/crates/scroll/alloy/consensus/src/transaction/pooled.rs index 309baa570b3..f59dca54ee2 100644 --- a/crates/scroll/alloy/consensus/src/transaction/pooled.rs +++ b/crates/scroll/alloy/consensus/src/transaction/pooled.rs @@ -3,6 +3,7 @@ use crate::{ScrollTxEnvelope, ScrollTxType}; use alloy_consensus::{ + error::ValueError, transaction::{RlpEcdsaDecodableTx, TxEip1559, TxEip2930, TxLegacy}, SignableTransaction, Signed, Transaction, TxEip7702, TxEnvelope, Typed2718, }; @@ -441,6 +442,14 @@ impl From for ScrollTxEnvelope { } } +impl TryFrom for ScrollPooledTransaction { + type Error = ValueError; + + fn try_from(value: ScrollTxEnvelope) -> Result { + value.try_into_pooled() + } +} + #[cfg(feature = "k256")] impl alloy_consensus::transaction::SignerRecoverable for ScrollPooledTransaction { fn recover_signer( diff --git a/crates/scroll/alloy/consensus/src/transaction/typed.rs b/crates/scroll/alloy/consensus/src/transaction/typed.rs index 6ea71853619..9bd6a4cb76c 100644 --- a/crates/scroll/alloy/consensus/src/transaction/typed.rs +++ b/crates/scroll/alloy/consensus/src/transaction/typed.rs @@ -1,9 +1,10 @@ use crate::{ScrollTxEnvelope, ScrollTxType, TxL1Message}; use alloy_consensus::{ - SignableTransaction, Transaction, TxEip1559, TxEip2930, TxEip7702, TxLegacy, Typed2718, + transaction::RlpEcdsaEncodableTx, SignableTransaction, Signed, Transaction, TxEip1559, + TxEip2930, TxEip7702, TxLegacy, Typed2718, }; -use alloy_eips::eip2930::AccessList; -use alloy_primitives::{Address, Bytes, TxKind, B256}; +use alloy_eips::{eip2930::AccessList, Encodable2718}; +use alloy_primitives::{bytes::BufMut, Address, Bytes, ChainId, Signature, TxHash, TxKind, B256}; #[cfg(feature = "reth-codec")] use { reth_codecs::{Compact, __private::bytes}, @@ -342,6 +343,128 @@ impl Transaction for ScrollTypedTransaction { } } +impl RlpEcdsaEncodableTx for ScrollTypedTransaction { + fn rlp_encoded_fields_length(&self) -> usize { + match self { + Self::Legacy(tx) => tx.rlp_encoded_fields_length(), + Self::Eip2930(tx) => tx.rlp_encoded_fields_length(), + Self::Eip1559(tx) => tx.rlp_encoded_fields_length(), + Self::Eip7702(tx) => tx.rlp_encoded_fields_length(), + Self::L1Message(tx) => tx.rlp_encoded_fields_length(), + } + } + + fn rlp_encode_fields(&self, out: &mut dyn alloy_rlp::BufMut) { + match self { + Self::Legacy(tx) => tx.rlp_encode_fields(out), + Self::Eip2930(tx) => tx.rlp_encode_fields(out), + Self::Eip1559(tx) => tx.rlp_encode_fields(out), + Self::Eip7702(tx) => tx.rlp_encode_fields(out), + Self::L1Message(tx) => tx.rlp_encode_fields(out), + } + } + + fn eip2718_encode_with_type(&self, signature: &Signature, _ty: u8, out: &mut dyn BufMut) { + match self { + Self::Legacy(tx) => tx.eip2718_encode_with_type(signature, tx.ty(), out), + Self::Eip2930(tx) => tx.eip2718_encode_with_type(signature, tx.ty(), out), + Self::Eip1559(tx) => tx.eip2718_encode_with_type(signature, tx.ty(), out), + Self::Eip7702(tx) => tx.eip2718_encode_with_type(signature, tx.ty(), out), + Self::L1Message(tx) => tx.encode_2718(out), + } + } + + fn eip2718_encode(&self, signature: &Signature, out: &mut dyn BufMut) { + match self { + Self::Legacy(tx) => tx.eip2718_encode(signature, out), + Self::Eip2930(tx) => tx.eip2718_encode(signature, out), + Self::Eip1559(tx) => tx.eip2718_encode(signature, out), + Self::Eip7702(tx) => tx.eip2718_encode(signature, out), + Self::L1Message(tx) => tx.encode_2718(out), + } + } + + fn network_encode_with_type(&self, signature: &Signature, _ty: u8, out: &mut dyn BufMut) { + match self { + Self::Legacy(tx) => tx.network_encode_with_type(signature, tx.ty(), out), + Self::Eip2930(tx) => tx.network_encode_with_type(signature, tx.ty(), out), + Self::Eip1559(tx) => tx.network_encode_with_type(signature, tx.ty(), out), + Self::Eip7702(tx) => tx.network_encode_with_type(signature, tx.ty(), out), + Self::L1Message(tx) => tx.network_encode(out), + } + } + + fn network_encode(&self, signature: &Signature, out: &mut dyn BufMut) { + match self { + Self::Legacy(tx) => tx.network_encode(signature, out), + Self::Eip2930(tx) => tx.network_encode(signature, out), + Self::Eip1559(tx) => tx.network_encode(signature, out), + Self::Eip7702(tx) => tx.network_encode(signature, out), + Self::L1Message(tx) => tx.network_encode(out), + } + } + + fn tx_hash_with_type(&self, signature: &Signature, _ty: u8) -> TxHash { + match self { + Self::Legacy(tx) => tx.tx_hash_with_type(signature, tx.ty()), + Self::Eip2930(tx) => tx.tx_hash_with_type(signature, tx.ty()), + Self::Eip1559(tx) => tx.tx_hash_with_type(signature, tx.ty()), + Self::Eip7702(tx) => tx.tx_hash_with_type(signature, tx.ty()), + Self::L1Message(tx) => tx.tx_hash(), + } + } + + fn tx_hash(&self, signature: &Signature) -> TxHash { + match self { + Self::Legacy(tx) => tx.tx_hash(signature), + Self::Eip2930(tx) => tx.tx_hash(signature), + Self::Eip1559(tx) => tx.tx_hash(signature), + Self::Eip7702(tx) => tx.tx_hash(signature), + Self::L1Message(tx) => tx.tx_hash(), + } + } +} + +impl SignableTransaction for ScrollTypedTransaction { + fn set_chain_id(&mut self, chain_id: ChainId) { + match self { + Self::Legacy(tx) => tx.set_chain_id(chain_id), + Self::Eip2930(tx) => tx.set_chain_id(chain_id), + Self::Eip1559(tx) => tx.set_chain_id(chain_id), + Self::Eip7702(tx) => tx.set_chain_id(chain_id), + Self::L1Message(_) => {} + } + } + + fn encode_for_signing(&self, out: &mut dyn BufMut) { + match self { + Self::Legacy(tx) => tx.encode_for_signing(out), + Self::Eip2930(tx) => tx.encode_for_signing(out), + Self::Eip1559(tx) => tx.encode_for_signing(out), + Self::Eip7702(tx) => tx.encode_for_signing(out), + Self::L1Message(_) => {} + } + } + + fn payload_len_for_signature(&self) -> usize { + match self { + Self::Legacy(tx) => tx.payload_len_for_signature(), + Self::Eip2930(tx) => tx.payload_len_for_signature(), + Self::Eip1559(tx) => tx.payload_len_for_signature(), + Self::Eip7702(tx) => tx.payload_len_for_signature(), + Self::L1Message(_) => 0, + } + } + + fn into_signed(self, signature: Signature) -> Signed + where + Self: Sized, + { + let hash = self.tx_hash(&signature); + Signed::new_unchecked(self, signature, hash) + } +} + #[cfg(feature = "reth-codec")] impl Compact for ScrollTypedTransaction { fn to_compact(&self, out: &mut B) -> usize diff --git a/crates/scroll/alloy/evm/src/tx.rs b/crates/scroll/alloy/evm/src/tx.rs index 3d68b7a1c4b..87174d5522c 100644 --- a/crates/scroll/alloy/evm/src/tx.rs +++ b/crates/scroll/alloy/evm/src/tx.rs @@ -1,8 +1,15 @@ -use alloy_evm::IntoTxEnv; +use alloy_consensus::crypto::secp256k1::recover_signer; +use alloy_eips::{Encodable2718, Typed2718}; +use alloy_evm::{FromRecoveredTx, FromTxWithEncoded, IntoTxEnv}; use alloy_primitives::{Address, Bytes, TxKind, B256, U256}; use core::ops::{Deref, DerefMut}; -use revm::context::Transaction; +use revm::context::{ + either::Either, + transaction::{RecoveredAuthority, RecoveredAuthorization}, + Transaction, TxEnv, +}; use revm_scroll::ScrollTransaction; +use scroll_alloy_consensus::{ScrollTxEnvelope, TxL1Message, L1_MESSAGE_TRANSACTION_TYPE}; /// This structure wraps around a [`ScrollTransaction`] and allows us to implement the [`IntoTxEnv`] /// trait. This can be removed when the interface is improved. Without this wrapper, we would need @@ -124,3 +131,136 @@ impl Transaction for ScrollTransactionIntoTxEnv { self.0.max_priority_fee_per_gas() } } + +impl FromTxWithEncoded for ScrollTransactionIntoTxEnv { + fn from_encoded_tx(tx: &ScrollTxEnvelope, caller: Address, encoded: Bytes) -> Self { + let base = match &tx { + ScrollTxEnvelope::Legacy(tx) => TxEnv::from_recovered_tx(tx.tx(), caller), + ScrollTxEnvelope::Eip2930(tx) => TxEnv::from_recovered_tx(tx.tx(), caller), + ScrollTxEnvelope::Eip1559(tx) => TxEnv::from_recovered_tx(tx.tx(), caller), + ScrollTxEnvelope::Eip7702(tx) => TxEnv::from_recovered_tx(tx.tx(), caller), + ScrollTxEnvelope::L1Message(tx) => { + let TxL1Message { to, value, gas_limit, input, queue_index: _, sender: _ } = &**tx; + TxEnv { + tx_type: tx.ty(), + caller, + gas_limit: *gas_limit, + kind: TxKind::Call(*to), + value: *value, + data: input.clone(), + ..Default::default() + } + } + }; + + let encoded = (!tx.is_l1_message()).then_some(encoded); + Self::new(base, encoded) + } +} + +impl FromRecoveredTx for ScrollTransactionIntoTxEnv { + fn from_recovered_tx(tx: &ScrollTxEnvelope, sender: Address) -> Self { + let envelope = tx.encoded_2718(); + + let base = match &tx { + ScrollTxEnvelope::Legacy(tx) => TxEnv { + gas_limit: tx.tx().gas_limit, + gas_price: tx.tx().gas_price, + gas_priority_fee: None, + kind: tx.tx().to, + value: tx.tx().value, + data: tx.tx().input.clone(), + chain_id: tx.tx().chain_id, + nonce: tx.tx().nonce, + access_list: Default::default(), + blob_hashes: Default::default(), + max_fee_per_blob_gas: Default::default(), + authorization_list: Default::default(), + tx_type: 0, + caller: sender, + }, + ScrollTxEnvelope::Eip2930(tx) => TxEnv { + gas_limit: tx.tx().gas_limit, + gas_price: tx.tx().gas_price, + gas_priority_fee: None, + kind: tx.tx().to, + value: tx.tx().value, + data: tx.tx().input.clone(), + chain_id: Some(tx.tx().chain_id), + nonce: tx.tx().nonce, + access_list: tx.tx().access_list.clone(), + blob_hashes: Default::default(), + max_fee_per_blob_gas: Default::default(), + authorization_list: Default::default(), + tx_type: 1, + caller: sender, + }, + ScrollTxEnvelope::Eip1559(tx) => TxEnv { + gas_limit: tx.tx().gas_limit, + gas_price: tx.tx().max_fee_per_gas, + gas_priority_fee: Some(tx.tx().max_priority_fee_per_gas), + kind: tx.tx().to, + value: tx.tx().value, + data: tx.tx().input.clone(), + chain_id: Some(tx.tx().chain_id), + nonce: tx.tx().nonce, + access_list: tx.tx().access_list.clone(), + blob_hashes: Default::default(), + max_fee_per_blob_gas: Default::default(), + authorization_list: Default::default(), + tx_type: 2, + caller: sender, + }, + ScrollTxEnvelope::Eip7702(tx) => TxEnv { + gas_limit: tx.tx().gas_limit, + gas_price: tx.tx().max_fee_per_gas, + gas_priority_fee: Some(tx.tx().max_priority_fee_per_gas), + kind: tx.tx().to.into(), + value: tx.tx().value, + data: tx.tx().input.clone(), + chain_id: Some(tx.tx().chain_id), + nonce: tx.tx().nonce, + access_list: tx.tx().access_list.clone(), + blob_hashes: Default::default(), + max_fee_per_blob_gas: Default::default(), + authorization_list: tx + .tx() + .authorization_list + .iter() + .map(|auth| { + Either::Right(RecoveredAuthorization::new_unchecked( + auth.inner().clone(), + auth.signature() + .ok() + .and_then(|signature| { + recover_signer(&signature, auth.signature_hash()).ok() + }) + .map_or(RecoveredAuthority::Invalid, RecoveredAuthority::Valid), + )) + }) + .collect(), + tx_type: 4, + caller: sender, + }, + ScrollTxEnvelope::L1Message(tx) => TxEnv { + gas_limit: tx.gas_limit, + gas_price: 0, + gas_priority_fee: None, + kind: TxKind::Call(tx.to), + value: tx.value, + data: tx.input.clone(), + chain_id: None, + nonce: 0, + access_list: Default::default(), + blob_hashes: Default::default(), + max_fee_per_blob_gas: Default::default(), + authorization_list: Default::default(), + tx_type: L1_MESSAGE_TRANSACTION_TYPE, + caller: sender, + }, + }; + + let rlp_bytes = (!tx.is_l1_message()).then_some(envelope.into()); + Self::new(base, rlp_bytes) + } +} diff --git a/crates/scroll/alloy/rpc-types/src/transaction.rs b/crates/scroll/alloy/rpc-types/src/transaction.rs index 63d1d5e06e9..9aa28541527 100644 --- a/crates/scroll/alloy/rpc-types/src/transaction.rs +++ b/crates/scroll/alloy/rpc-types/src/transaction.rs @@ -1,10 +1,10 @@ //! Scroll specific types related to transactions. -use alloy_consensus::{Transaction as _, Typed2718}; +use alloy_consensus::{transaction::Recovered, Transaction as _, Typed2718}; use alloy_eips::{eip2930::AccessList, eip7702::SignedAuthorization}; use alloy_primitives::{Address, BlockHash, Bytes, ChainId, TxKind, B256, U256}; use alloy_serde::OtherFields; -use scroll_alloy_consensus::ScrollTxEnvelope; +use scroll_alloy_consensus::{ScrollTransactionInfo, ScrollTxEnvelope}; use serde::{Deserialize, Serialize}; mod request; @@ -23,6 +23,37 @@ pub struct Transaction { pub inner: alloy_rpc_types_eth::Transaction, } +impl Transaction { + /// Returns a rpc [`Transaction`] with a [`ScrollTransactionInfo`] and + /// [`Recovered`] as input. + pub fn from_transaction( + tx: Recovered, + tx_info: ScrollTransactionInfo, + ) -> Self { + let base_fee = tx_info.inner.base_fee; + let effective_gas_price = if tx.is_l1_message() { + // For l1 messages, we set the `gasPrice` field to 0 in rpc + 0 + } else { + base_fee + .map(|base_fee| { + tx.effective_tip_per_gas(base_fee).unwrap_or_default() + base_fee as u128 + }) + .unwrap_or_else(|| tx.max_fee_per_gas()) + }; + + Self { + inner: alloy_rpc_types_eth::Transaction { + inner: tx, + block_hash: tx_info.inner.block_hash, + block_number: tx_info.inner.block_number, + transaction_index: tx_info.inner.index, + effective_gas_price: Some(effective_gas_price), + }, + } + } +} + impl Typed2718 for Transaction { fn ty(&self) -> u8 { self.inner.ty() diff --git a/crates/scroll/evm/src/execute.rs b/crates/scroll/evm/src/execute.rs index 8786cec4dea..4c6706ef2a7 100644 --- a/crates/scroll/evm/src/execute.rs +++ b/crates/scroll/evm/src/execute.rs @@ -44,7 +44,10 @@ mod tests { use crate::{ScrollEvmConfig, ScrollRethReceiptBuilder}; use std::{convert::Infallible, sync::Arc}; - use alloy_consensus::{transaction::SignerRecoverable, Block, BlockBody, Header}; + use alloy_consensus::{ + transaction::{Recovered, SignerRecoverable}, + Block, BlockBody, Header, SignableTransaction, Signed, TxLegacy, + }; use alloy_eips::{ eip7702::{constants::PER_EMPTY_ACCOUNT_COST, Authorization, SignedAuthorization}, Typed2718, @@ -54,9 +57,10 @@ mod tests { precompiles::PrecompilesMap, Evm, }; + use alloy_primitives::Sealed; use reth_chainspec::MIN_TRANSACTION_GAS; use reth_evm::ConfigureEvm; - use reth_primitives_traits::{NodePrimitives, RecoveredBlock}; + use reth_primitives_traits::{NodePrimitives, RecoveredBlock, SignedTransaction}; use reth_scroll_chainspec::{ScrollChainConfig, ScrollChainSpec, ScrollChainSpecBuilder}; use reth_scroll_primitives::{ ScrollBlock, ScrollPrimitives, ScrollReceipt, ScrollTransactionSigned, @@ -71,7 +75,7 @@ mod tests { primitives::{Address, TxKind, B256, U256}, state::AccountInfo, }; - use scroll_alloy_consensus::{ScrollTransactionReceipt, ScrollTxType, ScrollTypedTransaction}; + use scroll_alloy_consensus::{ScrollTransactionReceipt, ScrollTxEnvelope, ScrollTxType}; use scroll_alloy_evm::{ curie::{ BLOB_SCALAR_SLOT, COMMIT_SCALAR_SLOT, CURIE_L1_GAS_PRICE_ORACLE_BYTECODE, @@ -136,26 +140,39 @@ mod tests { ) } - fn transaction(typ: ScrollTxType, gas_limit: u64) -> ScrollTransactionSigned { - let transaction = match typ { - ScrollTxType::Legacy => ScrollTypedTransaction::Legacy(alloy_consensus::TxLegacy { - to: TxKind::Call(Address::ZERO), - chain_id: Some(SCROLL_CHAIN_ID), - gas_limit, - ..Default::default() - }), - ScrollTxType::Eip2930 => ScrollTypedTransaction::Eip2930(alloy_consensus::TxEip2930 { - to: TxKind::Call(Address::ZERO), - chain_id: SCROLL_CHAIN_ID, - gas_limit, - ..Default::default() - }), - ScrollTxType::Eip1559 => ScrollTypedTransaction::Eip1559(alloy_consensus::TxEip1559 { - to: TxKind::Call(Address::ZERO), - chain_id: SCROLL_CHAIN_ID, - gas_limit, - ..Default::default() - }), + fn transaction(typ: ScrollTxType, gas_limit: u64) -> ScrollTxEnvelope { + let pk = B256::random(); + match typ { + ScrollTxType::Legacy => { + let tx = TxLegacy { + to: TxKind::Call(Address::ZERO), + chain_id: Some(SCROLL_CHAIN_ID), + gas_limit, + ..Default::default() + }; + let signature = reth_primitives::sign_message(pk, tx.signature_hash()).unwrap(); + ScrollTxEnvelope::Legacy(Signed::new_unhashed(tx, signature)) + } + ScrollTxType::Eip2930 => { + let tx = alloy_consensus::TxEip2930 { + to: TxKind::Call(Address::ZERO), + chain_id: SCROLL_CHAIN_ID, + gas_limit, + ..Default::default() + }; + let signature = reth_primitives::sign_message(pk, tx.signature_hash()).unwrap(); + ScrollTxEnvelope::Eip2930(Signed::new_unhashed(tx, signature)) + } + ScrollTxType::Eip1559 => { + let tx = alloy_consensus::TxEip1559 { + to: TxKind::Call(Address::ZERO), + chain_id: SCROLL_CHAIN_ID, + gas_limit, + ..Default::default() + }; + let signature = reth_primitives::sign_message(pk, tx.signature_hash()).unwrap(); + ScrollTxEnvelope::Eip1559(Signed::new_unhashed(tx, signature)) + } ScrollTxType::Eip7702 => { let authorization = Authorization { chain_id: Default::default(), @@ -165,7 +182,8 @@ mod tests { let signature = reth_primitives::sign_message(B256::random(), authorization.signature_hash()) .unwrap(); - ScrollTypedTransaction::Eip7702(alloy_consensus::TxEip7702 { + + let tx = alloy_consensus::TxEip7702 { to: Address::ZERO, chain_id: SCROLL_CHAIN_ID, gas_limit: gas_limit + PER_EMPTY_ACCOUNT_COST, @@ -176,21 +194,19 @@ mod tests { signature.s(), )], ..Default::default() - }) + }; + let signature = reth_primitives::sign_message(pk, tx.signature_hash()).unwrap(); + ScrollTxEnvelope::Eip7702(Signed::new_unhashed(tx, signature)) } ScrollTxType::L1Message => { - ScrollTypedTransaction::L1Message(scroll_alloy_consensus::TxL1Message { + ScrollTxEnvelope::L1Message(Sealed::new(scroll_alloy_consensus::TxL1Message { sender: Address::random(), to: Address::ZERO, gas_limit, ..Default::default() - }) + })) } - }; - - let pk = B256::random(); - let signature = reth_primitives::sign_message(pk, transaction.signature_hash()).unwrap(); - ScrollTransactionSigned::new_unhashed(transaction, signature) + } } fn execute_transaction( @@ -243,8 +259,9 @@ mod tests { } // execute and verify output - let res = - strategy.execute_transaction(transaction.try_into_recovered()?.as_recovered_ref()); + let sender = transaction.try_recover()?; + let tx = Recovered::new_unchecked(transaction, sender); + let res = strategy.execute_transaction(&tx); // check for error or execution outcome let output = strategy.apply_post_execution_changes()?; @@ -345,9 +362,9 @@ mod tests { let mut strategy = executor(&block, &mut state); // execute and verify error - let res = strategy.execute_transaction( - transaction.try_into_recovered().expect("failed to recover tx").as_recovered_ref(), - ); + let sender = transaction.try_recover()?; + let tx = Recovered::new_unchecked(transaction, sender); + let res = strategy.execute_transaction(&tx); assert_eq!( res.unwrap_err().to_string(), "transaction gas limit 10000001 is more than blocks available gas 10000000" diff --git a/crates/scroll/node/src/addons.rs b/crates/scroll/node/src/addons.rs index 9fc2ddab871..880ee6a50ce 100644 --- a/crates/scroll/node/src/addons.rs +++ b/crates/scroll/node/src/addons.rs @@ -72,12 +72,9 @@ where { type Handle = RpcHandle>; - async fn launch_add_ons( - self, - ctx: reth_node_api::AddOnsContext<'_, N>, - ) -> eyre::Result { + async fn launch_add_ons(self, ctx: AddOnsContext<'_, N>) -> eyre::Result { let Self { rpc_add_ons } = self; - rpc_add_ons.launch_add_ons_with(ctx, |_, _, _| Ok(())).await + rpc_add_ons.launch_add_ons_with(ctx, |_| Ok(())).await } } diff --git a/crates/scroll/node/src/builder/network.rs b/crates/scroll/node/src/builder/network.rs index d72fba57734..c69adf6ca27 100644 --- a/crates/scroll/node/src/builder/network.rs +++ b/crates/scroll/node/src/builder/network.rs @@ -1,15 +1,14 @@ +use reth_eth_wire_types::BasicNetworkPrimitives; use reth_network::{ config::NetworkMode, transform::header::HeaderTransform, NetworkConfig, NetworkHandle, - NetworkManager, NetworkPrimitives, PeersInfo, + NetworkManager, PeersInfo, }; use reth_node_api::TxTy; use reth_node_builder::{components::NetworkBuilder, BuilderContext, FullNodeTypes}; use reth_node_types::NodeTypes; use reth_primitives_traits::BlockHeader; use reth_scroll_chainspec::ScrollChainSpec; -use reth_scroll_primitives::{ - ScrollBlock, ScrollBlockBody, ScrollPrimitives, ScrollReceipt, ScrollTransactionSigned, -}; +use reth_scroll_primitives::ScrollPrimitives; use reth_tracing::tracing::info; use reth_transaction_pool::{PoolTransaction, TransactionPool}; use scroll_alloy_hardforks::ScrollHardforks; @@ -58,18 +57,8 @@ where } /// Network primitive types used by Scroll networks. -#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Hash)] -#[non_exhaustive] -pub struct ScrollNetworkPrimitives; - -impl NetworkPrimitives for ScrollNetworkPrimitives { - type BlockHeader = alloy_consensus::Header; - type BlockBody = ScrollBlockBody; - type Block = ScrollBlock; - type BroadcastedTransaction = ScrollTransactionSigned; - type PooledTransaction = scroll_alloy_consensus::ScrollPooledTransaction; - type Receipt = ScrollReceipt; -} +pub type ScrollNetworkPrimitives = + BasicNetworkPrimitives; /// An implementation of a [`HeaderTransform`] for Scroll. #[derive(Debug, Clone)] diff --git a/crates/scroll/node/src/test_utils.rs b/crates/scroll/node/src/test_utils.rs index 6abaaefd982..f69ba9f1a63 100644 --- a/crates/scroll/node/src/test_utils.rs +++ b/crates/scroll/node/src/test_utils.rs @@ -36,6 +36,7 @@ pub async fn setup( .build(Default::default()), ), is_dev, + Default::default(), scroll_payload_attributes, ) .await diff --git a/crates/scroll/payload/src/builder.rs b/crates/scroll/payload/src/builder.rs index 2950c79a526..26c83cc7781 100644 --- a/crates/scroll/payload/src/builder.rs +++ b/crates/scroll/payload/src/builder.rs @@ -11,7 +11,7 @@ use reth_basic_payload_builder::{ is_better_payload, BuildArguments, BuildOutcome, BuildOutcomeKind, MissingPayloadBehaviour, PayloadBuilder, PayloadConfig, }; -use reth_chain_state::{ExecutedBlock, ExecutedBlockWithTrieUpdates}; +use reth_chain_state::{ExecutedBlock, ExecutedBlockWithTrieUpdates, ExecutedTrieUpdates}; use reth_chainspec::{ChainSpecProvider, EthChainSpec}; use reth_evm::{ block::{BlockExecutionError, BlockValidationError}, @@ -294,7 +294,7 @@ impl ScrollBuilder<'_, Txs> { execution_output: Arc::new(execution_outcome), hashed_state: Arc::new(hashed_state), }, - trie: Arc::new(trie_updates), + trie: ExecutedTrieUpdates::Present(Arc::new(trie_updates)), }; let no_tx_pool = ctx.attributes().no_tx_pool; diff --git a/crates/scroll/primitives/Cargo.toml b/crates/scroll/primitives/Cargo.toml index afb54b933d4..d9ee3c03a12 100644 --- a/crates/scroll/primitives/Cargo.toml +++ b/crates/scroll/primitives/Cargo.toml @@ -19,17 +19,11 @@ reth-zstd-compressors = { workspace = true, optional = true } # alloy alloy-consensus.workspace = true alloy-eips.workspace = true -alloy-evm.workspace = true alloy-primitives.workspace = true alloy-rlp.workspace = true -# revm -revm-context.workspace = true - # scroll scroll-alloy-consensus.workspace = true -scroll-alloy-evm.workspace = true -revm-scroll.workspace = true # codec bytes = { workspace = true, optional = true } @@ -37,23 +31,15 @@ modular-bitfield = { workspace = true, optional = true } serde = { workspace = true, optional = true } # misc -derive_more.workspace = true once_cell.workspace = true -rand_08 = { workspace = true, optional = true } # test arbitrary = { workspace = true, features = ["derive"], optional = true } -proptest = { workspace = true, optional = true } -secp256k1 = { workspace = true, optional = true } [dev-dependencies] -proptest-arbitrary-interop.workspace = true reth-codecs = { workspace = true, features = ["test-utils"] } rstest.workspace = true -proptest.workspace = true -secp256k1 = { workspace = true, features = ["rand"] } rand.workspace = true -rand_08.workspace = true [features] default = ["std"] @@ -68,21 +54,12 @@ std = [ "reth-primitives-traits/std", "reth-zstd-compressors?/std", "reth-codecs?/std", - "derive_more/std", "once_cell/std", - "proptest?/std", "serde?/std", - "rand_08?/std", - "secp256k1?/std", - "revm-scroll/std", - "revm-context/std", - "alloy-evm/std", - "scroll-alloy-evm/std", ] reth-codec = [ "dep:reth-codecs", "std", - "dep:proptest", "dep:arbitrary", "reth-primitives-traits/reth-codec", "scroll-alloy-consensus/reth-codec", @@ -93,17 +70,12 @@ reth-codec = [ serde = [ "dep:serde", "scroll-alloy-consensus/serde", - "secp256k1?/serde", "alloy-consensus/serde", "alloy-eips/serde", "alloy-primitives/serde", "bytes?/serde", - "rand_08?/serde", "reth-codecs?/serde", "reth-primitives-traits/serde", - "revm-scroll/serde", - "revm-context/serde", - "scroll-alloy-evm/serde", "rand/serde", ] serde-bincode-compat = [ @@ -114,9 +86,6 @@ serde-bincode-compat = [ ] arbitrary = [ "dep:arbitrary", - "dep:secp256k1", - "secp256k1?/rand", - "rand_08", "alloy-consensus/arbitrary", "alloy-eips/arbitrary", "alloy-primitives/arbitrary", diff --git a/crates/scroll/primitives/src/lib.rs b/crates/scroll/primitives/src/lib.rs index 2f4f6cafb96..e8be9a0a61e 100644 --- a/crates/scroll/primitives/src/lib.rs +++ b/crates/scroll/primitives/src/lib.rs @@ -11,10 +11,11 @@ use once_cell as _; -extern crate alloc; +#[cfg(not(feature = "std"))] +extern crate alloc as std; pub mod transaction; -pub use transaction::{signed::ScrollTransactionSigned, tx_type::ScrollTxType}; +pub use transaction::{tx_type::ScrollTxType, ScrollTransactionSigned}; use reth_primitives_traits::Block; diff --git a/crates/scroll/primitives/src/receipt.rs b/crates/scroll/primitives/src/receipt.rs index 9cfda2a28c1..405e33eba23 100644 --- a/crates/scroll/primitives/src/receipt.rs +++ b/crates/scroll/primitives/src/receipt.rs @@ -371,9 +371,9 @@ impl reth_primitives_traits::serde_bincode_compat::SerdeBincodeCompat for Scroll #[cfg(feature = "reth-codec")] mod compact { use super::*; - use alloc::borrow::Cow; use alloy_primitives::U256; use reth_codecs::Compact; + use std::borrow::Cow; #[derive(reth_codecs::CompactZstd)] #[reth_zstd( diff --git a/crates/scroll/primitives/src/transaction/mod.rs b/crates/scroll/primitives/src/transaction/mod.rs index 0b23824d496..87471e507e9 100644 --- a/crates/scroll/primitives/src/transaction/mod.rs +++ b/crates/scroll/primitives/src/transaction/mod.rs @@ -1,4 +1,6 @@ //! Scroll primitives transaction types. -pub mod signed; pub mod tx_type; + +/// Signed transaction. +pub type ScrollTransactionSigned = scroll_alloy_consensus::ScrollTxEnvelope; diff --git a/crates/scroll/primitives/src/transaction/signed.rs b/crates/scroll/primitives/src/transaction/signed.rs deleted file mode 100644 index 569a064ec0f..00000000000 --- a/crates/scroll/primitives/src/transaction/signed.rs +++ /dev/null @@ -1,833 +0,0 @@ -//! A signed Scroll transaction. - -use crate::ScrollTxType; -use alloc::{vec, vec::Vec}; -use core::{ - hash::{Hash, Hasher}, - mem, - ops::Deref, -}; -#[cfg(feature = "std")] -use std::sync::OnceLock; - -use alloy_consensus::{ - transaction::{Either, RlpEcdsaDecodableTx, RlpEcdsaEncodableTx, SignerRecoverable}, - SignableTransaction, Signed, Transaction, TxEip1559, TxEip2930, TxEip7702, TxLegacy, Typed2718, -}; -use alloy_eips::{ - eip2718::{Decodable2718, Eip2718Error, Eip2718Result, Encodable2718}, - eip2930::AccessList, - eip7702::{RecoveredAuthority, RecoveredAuthorization, SignedAuthorization}, -}; -use alloy_evm::{FromRecoveredTx, FromTxWithEncoded}; -use alloy_primitives::{keccak256, Address, Bytes, Signature, TxHash, TxKind, Uint, B256}; -use alloy_rlp::Header; -#[cfg(feature = "reth-codec")] -use arbitrary as _; -use derive_more::{AsRef, Deref}; -#[cfg(not(feature = "std"))] -use once_cell::sync::OnceCell as OnceLock; -#[cfg(any(test, feature = "reth-codec"))] -use proptest as _; -use reth_primitives_traits::{ - crypto::secp256k1::{recover_signer, recover_signer_unchecked}, - transaction::{error::TryFromRecoveredTransactionError, signed::RecoveryError}, - InMemorySize, SignedTransaction, -}; -use revm_context::TxEnv; -use scroll_alloy_consensus::{ - ScrollPooledTransaction, ScrollTypedTransaction, TxL1Message, L1_MESSAGE_TRANSACTION_TYPE, -}; -use scroll_alloy_evm::ScrollTransactionIntoTxEnv; - -/// Signed transaction. -#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(rlp))] -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -#[derive(Debug, Clone, Eq, AsRef, Deref)] -pub struct ScrollTransactionSigned { - /// Transaction hash - #[cfg_attr(feature = "serde", serde(skip))] - pub hash: OnceLock, - /// The transaction signature values - pub signature: Signature, - /// Raw transaction info - #[deref] - #[as_ref] - pub transaction: ScrollTypedTransaction, -} - -impl ScrollTransactionSigned { - /// Calculates hash of given transaction and signature and returns new instance. - pub fn new(transaction: ScrollTypedTransaction, signature: Signature) -> Self { - let signed_tx = Self::new_unhashed(transaction, signature); - if signed_tx.ty() != ScrollTxType::L1Message { - signed_tx.hash.get_or_init(|| signed_tx.recalculate_hash()); - } - - signed_tx - } - - /// Creates a new signed transaction from the given transaction and signature without the hash. - /// - /// Note: this only calculates the hash on the first [`ScrollTransactionSigned::hash`] call. - pub fn new_unhashed(transaction: ScrollTypedTransaction, signature: Signature) -> Self { - Self { hash: Default::default(), signature, transaction } - } - - /// Returns whether this transaction is a l1 message. - pub const fn is_l1_message(&self) -> bool { - matches!(self.transaction, ScrollTypedTransaction::L1Message(_)) - } -} - -impl SignedTransaction for ScrollTransactionSigned { - fn tx_hash(&self) -> &TxHash { - self.hash.get_or_init(|| self.recalculate_hash()) - } - - fn recover_signer_unchecked_with_buf( - &self, - buf: &mut Vec, - ) -> Result { - match &self.transaction { - // Scroll's L1 message does not have a signature. Directly return the `sender` address. - ScrollTypedTransaction::Legacy(tx) => tx.encode_for_signing(buf), - ScrollTypedTransaction::Eip2930(tx) => tx.encode_for_signing(buf), - ScrollTypedTransaction::Eip1559(tx) => tx.encode_for_signing(buf), - ScrollTypedTransaction::Eip7702(tx) => tx.encode_for_signing(buf), - ScrollTypedTransaction::L1Message(tx) => return Ok(tx.sender), - }; - recover_signer_unchecked(&self.signature, keccak256(buf)) - } - - fn recalculate_hash(&self) -> B256 { - keccak256(self.encoded_2718()) - } -} - -impl SignerRecoverable for ScrollTransactionSigned { - fn recover_signer(&self) -> Result { - // Scroll's L1 message does not have a signature. Directly return the `sender` address. - if let ScrollTypedTransaction::L1Message(TxL1Message { sender, .. }) = self.transaction { - return Ok(sender); - } - - let Self { transaction, signature, .. } = self; - let signature_hash = transaction.signature_hash(); - recover_signer(signature, signature_hash) - } - - fn recover_signer_unchecked(&self) -> Result { - // Scroll's L1 message does not have a signature. Directly return the `sender` address. - if let ScrollTypedTransaction::L1Message(TxL1Message { sender, .. }) = &self.transaction { - return Ok(*sender); - } - - let Self { transaction, signature, .. } = self; - let signature_hash = transaction.signature_hash(); - recover_signer_unchecked(signature, signature_hash) - } -} - -impl InMemorySize for ScrollTransactionSigned { - #[inline] - fn size(&self) -> usize { - mem::size_of::() + self.transaction.size() + mem::size_of::() - } -} - -impl alloy_rlp::Encodable for ScrollTransactionSigned { - fn encode(&self, out: &mut dyn alloy_rlp::bytes::BufMut) { - self.network_encode(out); - } - - fn length(&self) -> usize { - let mut payload_length = self.encode_2718_len(); - if !self.is_legacy() { - payload_length += Header { list: false, payload_length }.length(); - } - - payload_length - } -} - -impl alloy_rlp::Decodable for ScrollTransactionSigned { - fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { - Self::network_decode(buf).map_err(Into::into) - } -} - -impl Encodable2718 for ScrollTransactionSigned { - fn type_flag(&self) -> Option { - if Typed2718::is_legacy(self) { - None - } else { - Some(self.ty()) - } - } - - fn encode_2718_len(&self) -> usize { - match &self.transaction { - ScrollTypedTransaction::Legacy(legacy_tx) => { - legacy_tx.eip2718_encoded_length(&self.signature) - } - ScrollTypedTransaction::Eip2930(access_list_tx) => { - access_list_tx.eip2718_encoded_length(&self.signature) - } - ScrollTypedTransaction::Eip1559(dynamic_fee_tx) => { - dynamic_fee_tx.eip2718_encoded_length(&self.signature) - } - ScrollTypedTransaction::Eip7702(authorization_list_tx) => { - authorization_list_tx.eip2718_encoded_length(&self.signature) - } - ScrollTypedTransaction::L1Message(l1_message) => l1_message.eip2718_encoded_length(), - } - } - - fn encode_2718(&self, out: &mut dyn alloy_rlp::BufMut) { - let Self { transaction, signature, .. } = self; - - match &transaction { - ScrollTypedTransaction::Legacy(legacy_tx) => { - // do nothing w/ with_header - legacy_tx.eip2718_encode(signature, out) - } - ScrollTypedTransaction::Eip2930(access_list_tx) => { - access_list_tx.eip2718_encode(signature, out) - } - ScrollTypedTransaction::Eip1559(dynamic_fee_tx) => { - dynamic_fee_tx.eip2718_encode(signature, out) - } - ScrollTypedTransaction::Eip7702(authorization_list_tx) => { - authorization_list_tx.eip2718_encode(signature, out) - } - ScrollTypedTransaction::L1Message(l1_message) => l1_message.encode_2718(out), - } - } -} - -impl Decodable2718 for ScrollTransactionSigned { - fn typed_decode(ty: u8, buf: &mut &[u8]) -> Eip2718Result { - match ty.try_into().map_err(|_| Eip2718Error::UnexpectedType(ty))? { - ScrollTxType::Legacy => Err(Eip2718Error::UnexpectedType(0)), - ScrollTxType::Eip2930 => { - let (tx, signature, hash) = TxEip2930::rlp_decode_signed(buf)?.into_parts(); - let signed_tx = Self::new_unhashed(ScrollTypedTransaction::Eip2930(tx), signature); - signed_tx.hash.get_or_init(|| hash); - Ok(signed_tx) - } - ScrollTxType::Eip1559 => { - let (tx, signature, hash) = TxEip1559::rlp_decode_signed(buf)?.into_parts(); - let signed_tx = Self::new_unhashed(ScrollTypedTransaction::Eip1559(tx), signature); - signed_tx.hash.get_or_init(|| hash); - Ok(signed_tx) - } - ScrollTxType::Eip7702 => { - let (tx, signature, hash) = TxEip7702::rlp_decode_signed(buf)?.into_parts(); - let signed_tx = Self::new_unhashed(ScrollTypedTransaction::Eip7702(tx), signature); - signed_tx.hash.get_or_init(|| hash); - Ok(signed_tx) - } - ScrollTxType::L1Message => Ok(Self::new_unhashed( - ScrollTypedTransaction::L1Message(TxL1Message::rlp_decode(buf)?), - TxL1Message::signature(), - )), - } - } - - fn fallback_decode(buf: &mut &[u8]) -> Eip2718Result { - let (transaction, signature) = TxLegacy::rlp_decode_with_signature(buf)?; - let signed_tx = Self::new_unhashed(ScrollTypedTransaction::Legacy(transaction), signature); - - Ok(signed_tx) - } -} - -impl Transaction for ScrollTransactionSigned { - fn chain_id(&self) -> Option { - self.deref().chain_id() - } - - fn nonce(&self) -> u64 { - self.deref().nonce() - } - - fn gas_limit(&self) -> u64 { - self.deref().gas_limit() - } - - fn gas_price(&self) -> Option { - self.deref().gas_price() - } - - fn max_fee_per_gas(&self) -> u128 { - self.deref().max_fee_per_gas() - } - - fn max_priority_fee_per_gas(&self) -> Option { - self.deref().max_priority_fee_per_gas() - } - - fn max_fee_per_blob_gas(&self) -> Option { - self.deref().max_fee_per_blob_gas() - } - - fn priority_fee_or_price(&self) -> u128 { - self.deref().priority_fee_or_price() - } - - fn effective_gas_price(&self, base_fee: Option) -> u128 { - self.deref().effective_gas_price(base_fee) - } - - fn effective_tip_per_gas(&self, base_fee: u64) -> Option { - self.deref().effective_tip_per_gas(base_fee) - } - - fn is_dynamic_fee(&self) -> bool { - self.deref().is_dynamic_fee() - } - - fn kind(&self) -> TxKind { - self.deref().kind() - } - - fn is_create(&self) -> bool { - self.deref().is_create() - } - - fn value(&self) -> Uint<256, 4> { - self.deref().value() - } - - fn input(&self) -> &Bytes { - self.deref().input() - } - - fn access_list(&self) -> Option<&AccessList> { - self.deref().access_list() - } - - fn blob_versioned_hashes(&self) -> Option<&[B256]> { - self.deref().blob_versioned_hashes() - } - - fn authorization_list(&self) -> Option<&[SignedAuthorization]> { - self.deref().authorization_list() - } -} - -/// A trait that allows to verify if a transaction is a l1 message. -pub trait IsL1Message { - /// Whether the transaction is a l1 transaction. - fn is_l1_message(&self) -> bool; -} - -impl IsL1Message for ScrollTransactionSigned { - fn is_l1_message(&self) -> bool { - matches!(self.transaction, ScrollTypedTransaction::L1Message(_)) - } -} - -impl Typed2718 for ScrollTransactionSigned { - fn ty(&self) -> u8 { - self.deref().ty() - } -} - -impl PartialEq for ScrollTransactionSigned { - fn eq(&self, other: &Self) -> bool { - self.signature == other.signature && - self.transaction == other.transaction && - self.tx_hash() == other.tx_hash() - } -} - -impl Hash for ScrollTransactionSigned { - fn hash(&self, state: &mut H) { - self.signature.hash(state); - self.transaction.hash(state); - } -} - -impl FromTxWithEncoded for ScrollTransactionIntoTxEnv { - fn from_encoded_tx(tx: &ScrollTransactionSigned, caller: Address, encoded: Bytes) -> Self { - let base = match &tx.transaction { - ScrollTypedTransaction::Legacy(tx) => TxEnv::from_recovered_tx(tx, caller), - ScrollTypedTransaction::Eip2930(tx) => TxEnv::from_recovered_tx(tx, caller), - ScrollTypedTransaction::Eip1559(tx) => TxEnv::from_recovered_tx(tx, caller), - ScrollTypedTransaction::Eip7702(tx) => TxEnv::from_recovered_tx(tx, caller), - ScrollTypedTransaction::L1Message(tx) => { - let TxL1Message { to, value, gas_limit, input, queue_index: _, sender: _ } = tx; - TxEnv { - tx_type: tx.ty(), - caller, - gas_limit: *gas_limit, - kind: TxKind::Call(*to), - value: *value, - data: input.clone(), - ..Default::default() - } - } - }; - - let encoded = (!tx.is_l1_message()).then_some(encoded); - Self::new(base, encoded) - } -} - -#[cfg(feature = "reth-codec")] -impl reth_codecs::Compact for ScrollTransactionSigned { - fn to_compact(&self, buf: &mut B) -> usize - where - B: bytes::BufMut + AsMut<[u8]>, - { - let start = buf.as_mut().len(); - - // Placeholder for bitflags. - // The first byte uses 4 bits as flags: IsCompressed[1bit], TxType[2bits], Signature[1bit] - buf.put_u8(0); - - let sig_bit = self.signature.to_compact(buf) as u8; - let zstd_bit = self.transaction.input().len() >= 32; - - let tx_bits = if zstd_bit { - let mut tmp = Vec::with_capacity(256); - if cfg!(feature = "std") { - reth_zstd_compressors::TRANSACTION_COMPRESSOR.with(|compressor| { - let mut compressor = compressor.borrow_mut(); - let tx_bits = self.transaction.to_compact(&mut tmp); - buf.put_slice(&compressor.compress(&tmp).expect("Failed to compress")); - tx_bits as u8 - }) - } else { - let mut compressor = reth_zstd_compressors::create_tx_compressor(); - let tx_bits = self.transaction.to_compact(&mut tmp); - buf.put_slice(&compressor.compress(&tmp).expect("Failed to compress")); - tx_bits as u8 - } - } else { - self.transaction.to_compact(buf) as u8 - }; - - // Replace bitflags with the actual values - buf.as_mut()[start] = sig_bit | (tx_bits << 1) | ((zstd_bit as u8) << 3); - - buf.as_mut().len() - start - } - - fn from_compact(mut buf: &[u8], _len: usize) -> (Self, &[u8]) { - use bytes::Buf; - - // The first byte uses 4 bits as flags: IsCompressed[1], TxType[2], Signature[1] - let bitflags = buf.get_u8() as usize; - - let sig_bit = bitflags & 1; - let (signature, buf) = Signature::from_compact(buf, sig_bit); - - let zstd_bit = bitflags >> 3; - let (transaction, buf) = if zstd_bit != 0 { - if cfg!(feature = "std") { - reth_zstd_compressors::TRANSACTION_DECOMPRESSOR.with(|decompressor| { - let mut decompressor = decompressor.borrow_mut(); - - // TODO: enforce that zstd is only present at a "top" level type - let transaction_type = (bitflags & 0b110) >> 1; - let (transaction, _) = ScrollTypedTransaction::from_compact( - decompressor.decompress(buf), - transaction_type, - ); - - (transaction, buf) - }) - } else { - let mut decompressor = reth_zstd_compressors::create_tx_decompressor(); - let transaction_type = (bitflags & 0b110) >> 1; - let (transaction, _) = ScrollTypedTransaction::from_compact( - decompressor.decompress(buf), - transaction_type, - ); - - (transaction, buf) - } - } else { - let transaction_type = bitflags >> 1; - ScrollTypedTransaction::from_compact(buf, transaction_type) - }; - - (Self { signature, transaction, hash: Default::default() }, buf) - } -} - -#[cfg(any(test, feature = "arbitrary"))] -impl<'a> arbitrary::Arbitrary<'a> for ScrollTransactionSigned { - fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { - #[allow(unused_mut)] - let mut transaction = ScrollTypedTransaction::arbitrary(u)?; - - let secp = secp256k1::Secp256k1::new(); - let key_pair = secp256k1::Keypair::new(&secp, &mut rand_08::thread_rng()); - let signature = reth_primitives_traits::crypto::secp256k1::sign_message( - B256::from_slice(&key_pair.secret_bytes()[..]), - transaction.signature_hash(), - ) - .unwrap(); - - let signature = - if is_l1_message(&transaction) { TxL1Message::signature() } else { signature }; - - Ok(Self::new(transaction, signature)) - } -} - -/// Returns `true` if transaction is l1 message. -pub const fn is_l1_message(tx: &ScrollTypedTransaction) -> bool { - matches!(tx, ScrollTypedTransaction::L1Message(_)) -} - -impl + RlpEcdsaEncodableTx> From> - for ScrollTransactionSigned -{ - fn from(value: Signed) -> Self { - let (tx, sig, hash) = value.into_parts(); - let this = Self::new(tx.into(), sig); - this.hash.get_or_init(|| hash); - this - } -} - -impl TryFrom for ScrollPooledTransaction { - type Error = TryFromRecoveredTransactionError; - - fn try_from(value: ScrollTransactionSigned) -> Result { - let hash = *value.tx_hash(); - let ScrollTransactionSigned { hash: _, signature, transaction } = value; - - match transaction { - ScrollTypedTransaction::Legacy(tx) => { - Ok(Self::Legacy(Signed::new_unchecked(tx, signature, hash))) - } - ScrollTypedTransaction::Eip2930(tx) => { - Ok(Self::Eip2930(Signed::new_unchecked(tx, signature, hash))) - } - ScrollTypedTransaction::Eip1559(tx) => { - Ok(Self::Eip1559(Signed::new_unchecked(tx, signature, hash))) - } - ScrollTypedTransaction::Eip7702(tx) => { - Ok(Self::Eip7702(Signed::new_unchecked(tx, signature, hash))) - } - ScrollTypedTransaction::L1Message(_) => { - Err(TryFromRecoveredTransactionError::UnsupportedTransactionType(0xfe)) - } - } - } -} - -impl From for ScrollTransactionSigned { - fn from(value: ScrollPooledTransaction) -> Self { - match value { - ScrollPooledTransaction::Legacy(tx) => tx.into(), - ScrollPooledTransaction::Eip2930(tx) => tx.into(), - ScrollPooledTransaction::Eip1559(tx) => tx.into(), - ScrollPooledTransaction::Eip7702(tx) => tx.into(), - } - } -} - -impl FromRecoveredTx for revm_scroll::ScrollTransaction { - fn from_recovered_tx(tx: &ScrollTransactionSigned, sender: Address) -> Self { - let envelope = tx.encoded_2718(); - - let base = match &tx.transaction { - ScrollTypedTransaction::Legacy(tx) => TxEnv { - gas_limit: tx.gas_limit, - gas_price: tx.gas_price, - gas_priority_fee: None, - kind: tx.to, - value: tx.value, - data: tx.input.clone(), - chain_id: tx.chain_id, - nonce: tx.nonce, - access_list: Default::default(), - blob_hashes: Default::default(), - max_fee_per_blob_gas: Default::default(), - authorization_list: Default::default(), - tx_type: 0, - caller: sender, - }, - ScrollTypedTransaction::Eip2930(tx) => TxEnv { - gas_limit: tx.gas_limit, - gas_price: tx.gas_price, - gas_priority_fee: None, - kind: tx.to, - value: tx.value, - data: tx.input.clone(), - chain_id: Some(tx.chain_id), - nonce: tx.nonce, - access_list: tx.access_list.clone(), - blob_hashes: Default::default(), - max_fee_per_blob_gas: Default::default(), - authorization_list: Default::default(), - tx_type: 1, - caller: sender, - }, - ScrollTypedTransaction::Eip1559(tx) => TxEnv { - gas_limit: tx.gas_limit, - gas_price: tx.max_fee_per_gas, - gas_priority_fee: Some(tx.max_priority_fee_per_gas), - kind: tx.to, - value: tx.value, - data: tx.input.clone(), - chain_id: Some(tx.chain_id), - nonce: tx.nonce, - access_list: tx.access_list.clone(), - blob_hashes: Default::default(), - max_fee_per_blob_gas: Default::default(), - authorization_list: Default::default(), - tx_type: 2, - caller: sender, - }, - ScrollTypedTransaction::Eip7702(tx) => TxEnv { - gas_limit: tx.gas_limit, - gas_price: tx.max_fee_per_gas, - gas_priority_fee: Some(tx.max_priority_fee_per_gas), - kind: tx.to.into(), - value: tx.value, - data: tx.input.clone(), - chain_id: Some(tx.chain_id), - nonce: tx.nonce, - access_list: tx.access_list.clone(), - blob_hashes: Default::default(), - max_fee_per_blob_gas: Default::default(), - authorization_list: tx - .authorization_list - .iter() - .map(|auth| { - Either::Right(RecoveredAuthorization::new_unchecked( - auth.inner().clone(), - auth.signature() - .ok() - .and_then(|signature| { - recover_signer(&signature, auth.signature_hash()).ok() - }) - .map_or(RecoveredAuthority::Invalid, RecoveredAuthority::Valid), - )) - }) - .collect(), - tx_type: 4, - caller: sender, - }, - ScrollTypedTransaction::L1Message(tx) => TxEnv { - gas_limit: tx.gas_limit, - gas_price: 0, - gas_priority_fee: None, - kind: TxKind::Call(tx.to), - value: tx.value, - data: tx.input.clone(), - chain_id: None, - nonce: 0, - access_list: Default::default(), - blob_hashes: Default::default(), - max_fee_per_blob_gas: 0, - authorization_list: vec![], - tx_type: L1_MESSAGE_TRANSACTION_TYPE, - caller: sender, - }, - }; - - Self { base, rlp_bytes: (!tx.is_l1_message()).then_some(envelope.into()) } - } -} - -impl FromRecoveredTx for ScrollTransactionIntoTxEnv { - fn from_recovered_tx(tx: &ScrollTransactionSigned, sender: Address) -> Self { - let envelope = tx.encoded_2718(); - - let base = match &tx.transaction { - ScrollTypedTransaction::Legacy(tx) => TxEnv { - gas_limit: tx.gas_limit, - gas_price: tx.gas_price, - gas_priority_fee: None, - kind: tx.to, - value: tx.value, - data: tx.input.clone(), - chain_id: tx.chain_id, - nonce: tx.nonce, - access_list: Default::default(), - blob_hashes: Default::default(), - max_fee_per_blob_gas: Default::default(), - authorization_list: Default::default(), - tx_type: 0, - caller: sender, - }, - ScrollTypedTransaction::Eip2930(tx) => TxEnv { - gas_limit: tx.gas_limit, - gas_price: tx.gas_price, - gas_priority_fee: None, - kind: tx.to, - value: tx.value, - data: tx.input.clone(), - chain_id: Some(tx.chain_id), - nonce: tx.nonce, - access_list: tx.access_list.clone(), - blob_hashes: Default::default(), - max_fee_per_blob_gas: Default::default(), - authorization_list: Default::default(), - tx_type: 1, - caller: sender, - }, - ScrollTypedTransaction::Eip1559(tx) => TxEnv { - gas_limit: tx.gas_limit, - gas_price: tx.max_fee_per_gas, - gas_priority_fee: Some(tx.max_priority_fee_per_gas), - kind: tx.to, - value: tx.value, - data: tx.input.clone(), - chain_id: Some(tx.chain_id), - nonce: tx.nonce, - access_list: tx.access_list.clone(), - blob_hashes: Default::default(), - max_fee_per_blob_gas: Default::default(), - authorization_list: Default::default(), - tx_type: 2, - caller: sender, - }, - ScrollTypedTransaction::Eip7702(tx) => TxEnv { - gas_limit: tx.gas_limit, - gas_price: tx.max_fee_per_gas, - gas_priority_fee: Some(tx.max_priority_fee_per_gas), - kind: tx.to.into(), - value: tx.value, - data: tx.input.clone(), - chain_id: Some(tx.chain_id), - nonce: tx.nonce, - access_list: tx.access_list.clone(), - blob_hashes: Default::default(), - max_fee_per_blob_gas: Default::default(), - authorization_list: tx - .authorization_list - .iter() - .map(|auth| { - Either::Right(RecoveredAuthorization::new_unchecked( - auth.inner().clone(), - auth.signature() - .ok() - .and_then(|signature| { - recover_signer(&signature, auth.signature_hash()).ok() - }) - .map_or(RecoveredAuthority::Invalid, RecoveredAuthority::Valid), - )) - }) - .collect(), - tx_type: 4, - caller: sender, - }, - ScrollTypedTransaction::L1Message(tx) => TxEnv { - gas_limit: tx.gas_limit, - gas_price: 0, - gas_priority_fee: None, - kind: TxKind::Call(tx.to), - value: tx.value, - data: tx.input.clone(), - chain_id: None, - nonce: 0, - access_list: Default::default(), - blob_hashes: Default::default(), - max_fee_per_blob_gas: Default::default(), - authorization_list: Default::default(), - tx_type: L1_MESSAGE_TRANSACTION_TYPE, - caller: sender, - }, - }; - - let rlp_bytes = (!tx.is_l1_message()).then_some(envelope.into()); - Self::new(base, rlp_bytes) - } -} - -/// Bincode-compatible transaction type serde implementations. -#[cfg(feature = "serde-bincode-compat")] -pub mod serde_bincode_compat { - use alloc::borrow::Cow; - use alloy_consensus::transaction::serde_bincode_compat::{ - TxEip1559, TxEip2930, TxEip7702, TxLegacy, - }; - use alloy_primitives::{Signature, TxHash}; - use reth_primitives_traits::{serde_bincode_compat::SerdeBincodeCompat, SignedTransaction}; - use serde::{Deserialize, Serialize}; - - /// Bincode-compatible [`super::ScrollTypedTransaction`] serde implementation. - #[derive(Debug, Serialize, Deserialize)] - #[allow(missing_docs)] - enum ScrollTypedTransaction<'a> { - Legacy(TxLegacy<'a>), - Eip2930(TxEip2930<'a>), - Eip1559(TxEip1559<'a>), - Eip7702(TxEip7702<'a>), - L1Message(Cow<'a, scroll_alloy_consensus::TxL1Message>), - } - - impl<'a> From<&'a super::ScrollTypedTransaction> for ScrollTypedTransaction<'a> { - fn from(value: &'a super::ScrollTypedTransaction) -> Self { - match value { - super::ScrollTypedTransaction::Legacy(tx) => Self::Legacy(TxLegacy::from(tx)), - super::ScrollTypedTransaction::Eip2930(tx) => Self::Eip2930(TxEip2930::from(tx)), - super::ScrollTypedTransaction::Eip1559(tx) => Self::Eip1559(TxEip1559::from(tx)), - super::ScrollTypedTransaction::Eip7702(tx) => Self::Eip7702(TxEip7702::from(tx)), - super::ScrollTypedTransaction::L1Message(tx) => Self::L1Message(Cow::Borrowed(tx)), - } - } - } - - impl<'a> From> for super::ScrollTypedTransaction { - fn from(value: ScrollTypedTransaction<'a>) -> Self { - match value { - ScrollTypedTransaction::Legacy(tx) => Self::Legacy(tx.into()), - ScrollTypedTransaction::Eip2930(tx) => Self::Eip2930(tx.into()), - ScrollTypedTransaction::Eip1559(tx) => Self::Eip1559(tx.into()), - ScrollTypedTransaction::Eip7702(tx) => Self::Eip7702(tx.into()), - ScrollTypedTransaction::L1Message(tx) => Self::L1Message(tx.into_owned()), - } - } - } - - /// Bincode-compatible [`super::ScrollTransactionSigned`] serde implementation. - #[derive(Debug, Serialize, Deserialize)] - pub struct ScrollTransactionSigned<'a> { - hash: TxHash, - signature: Signature, - transaction: ScrollTypedTransaction<'a>, - } - - impl<'a> From<&'a super::ScrollTransactionSigned> for ScrollTransactionSigned<'a> { - fn from(value: &'a super::ScrollTransactionSigned) -> Self { - Self { - hash: *value.tx_hash(), - signature: value.signature, - transaction: ScrollTypedTransaction::from(&value.transaction), - } - } - } - - impl<'a> From> for super::ScrollTransactionSigned { - fn from(value: ScrollTransactionSigned<'a>) -> Self { - Self { - hash: value.hash.into(), - signature: value.signature, - transaction: value.transaction.into(), - } - } - } - - impl SerdeBincodeCompat for super::ScrollTransactionSigned { - type BincodeRepr<'a> = ScrollTransactionSigned<'a>; - - fn as_repr(&self) -> Self::BincodeRepr<'_> { - self.into() - } - - fn from_repr(repr: Self::BincodeRepr<'_>) -> Self { - repr.into() - } - } -} diff --git a/crates/scroll/rpc/src/error.rs b/crates/scroll/rpc/src/error.rs index 5f55b5d4fa9..63570531669 100644 --- a/crates/scroll/rpc/src/error.rs +++ b/crates/scroll/rpc/src/error.rs @@ -1,7 +1,8 @@ //! RPC errors specific to Scroll. use alloy_rpc_types_eth::BlockError; -use reth_rpc_eth_api::AsEthApiError; +use reth_evm::execute::ProviderError; +use reth_rpc_eth_api::{AsEthApiError, TransactionConversionError}; use reth_rpc_eth_types::{error::api::FromEvmHalt, EthApiError}; use revm::context::result::{EVMError, HaltReason}; @@ -49,3 +50,15 @@ impl FromEvmHalt for ScrollEthApiError { EthApiError::from_evm_halt(halt, gas_limit).into() } } + +impl From for ScrollEthApiError { + fn from(value: TransactionConversionError) -> Self { + Self::Eth(EthApiError::from(value)) + } +} + +impl From for ScrollEthApiError { + fn from(value: ProviderError) -> Self { + Self::Eth(EthApiError::from(value)) + } +} diff --git a/crates/scroll/rpc/src/eth/mod.rs b/crates/scroll/rpc/src/eth/mod.rs index c27eb729c5f..c89c1509fbb 100644 --- a/crates/scroll/rpc/src/eth/mod.rs +++ b/crates/scroll/rpc/src/eth/mod.rs @@ -1,7 +1,5 @@ //! Scroll-Reth `eth_` endpoint implementation. -use std::{fmt, sync::Arc}; - use alloy_primitives::U256; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_evm::ConfigureEvm; @@ -18,7 +16,7 @@ use reth_rpc_eth_api::{ AddDevSigners, EthApiSpec, EthFees, EthSigner, EthState, LoadBlock, LoadFee, LoadState, SpawnBlocking, Trace, }, - EthApiTypes, FullEthApiServer, RpcNodeCore, RpcNodeCoreExt, + EthApiTypes, FullEthApiServer, RpcConverter, RpcNodeCore, RpcNodeCoreExt, }; use reth_rpc_eth_types::{EthStateCache, FeeHistoryCache, GasPriceOracle}; use reth_tasks::{ @@ -26,15 +24,15 @@ use reth_tasks::{ TaskSpawner, }; use reth_transaction_pool::TransactionPool; +use std::{fmt, marker::PhantomData, sync::Arc}; +use crate::{eth::transaction::ScrollTxInfoMapper, ScrollEthApiError}; pub use receipt::ScrollReceiptBuilder; use reth_node_builder::rpc::{EthApiBuilder, EthApiCtx}; use reth_primitives_traits::NodePrimitives; use reth_rpc_eth_types::error::FromEvmError; use reth_scroll_primitives::ScrollPrimitives; -use scroll_alloy_network::Scroll; - -use crate::ScrollEthApiError; +use scroll_alloy_network::{Network, Scroll}; mod block; mod call; @@ -65,9 +63,25 @@ impl ScrollNodeCore for T where T: RpcNodeCore {} /// This type implements the [`FullEthApi`](reth_rpc_eth_api::helpers::FullEthApi) by implemented /// all the `Eth` helper traits and prerequisite traits. #[derive(Clone)] -pub struct ScrollEthApi { +pub struct ScrollEthApi { /// Gateway to node's core components. inner: Arc>, + /// Marker for the network types. + _nt: PhantomData, + tx_resp_builder: + RpcConverter>, +} + +impl ScrollEthApi { + /// Creates a new [`ScrollEthApi`]. + pub fn new(eth_api: EthApiNodeBackend) -> Self { + let inner = Arc::new(ScrollEthApiInner { eth_api }); + Self { + inner: inner.clone(), + _nt: PhantomData, + tx_resp_builder: RpcConverter::with_mapper(ScrollTxInfoMapper::new(inner)), + } + } } impl ScrollEthApi @@ -91,23 +105,27 @@ where } } -impl EthApiTypes for ScrollEthApi +impl EthApiTypes for ScrollEthApi where - Self: Send + Sync, + Self: Send + Sync + fmt::Debug, N: ScrollNodeCore, + NetworkT: Network + Clone + fmt::Debug, + ::Primitives: fmt::Debug, { type Error = ScrollEthApiError; type NetworkTypes = Scroll; - type TransactionCompat = Self; + type TransactionCompat = + RpcConverter>; fn tx_resp_builder(&self) -> &Self::TransactionCompat { - self + &self.tx_resp_builder } } -impl RpcNodeCore for ScrollEthApi +impl RpcNodeCore for ScrollEthApi where N: ScrollNodeCore, + NetworkT: Network, { type Primitives = N::Primitives; type Provider = N::Provider; @@ -142,9 +160,10 @@ where } } -impl RpcNodeCoreExt for ScrollEthApi +impl RpcNodeCoreExt for ScrollEthApi where N: ScrollNodeCore, + NetworkT: Network, { #[inline] fn cache(&self) -> &EthStateCache, ProviderReceipt> { @@ -152,7 +171,7 @@ where } } -impl EthApiSpec for ScrollEthApi +impl EthApiSpec for ScrollEthApi where N: ScrollNodeCore< Provider: ChainSpecProvider @@ -160,6 +179,7 @@ where + StageCheckpointReader, Network: NetworkInfo, >, + NetworkT: Network, { type Transaction = ProviderTx; @@ -174,10 +194,12 @@ where } } -impl SpawnBlocking for ScrollEthApi +impl SpawnBlocking for ScrollEthApi where Self: Send + Sync + Clone + 'static, N: ScrollNodeCore, + NetworkT: Network, + ::Primitives: fmt::Debug, { #[inline] fn io_task_spawner(&self) -> impl TaskSpawner { @@ -195,7 +217,7 @@ where } } -impl LoadFee for ScrollEthApi +impl LoadFee for ScrollEthApi where Self: LoadBlock, N: ScrollNodeCore< @@ -215,15 +237,18 @@ where } } -impl LoadState for ScrollEthApi where +impl LoadState for ScrollEthApi +where N: ScrollNodeCore< Provider: StateProviderFactory + ChainSpecProvider, Pool: TransactionPool, - > + >, + NetworkT: Network, + ::Primitives: fmt::Debug, { } -impl EthState for ScrollEthApi +impl EthState for ScrollEthApi where Self: LoadState + SpawnBlocking, N: ScrollNodeCore, @@ -234,14 +259,14 @@ where } } -impl EthFees for ScrollEthApi +impl EthFees for ScrollEthApi where Self: LoadFee, N: ScrollNodeCore, { } -impl Trace for ScrollEthApi +impl Trace for ScrollEthApi where Self: RpcNodeCore + LoadState< @@ -257,7 +282,7 @@ where { } -impl AddDevSigners for ScrollEthApi +impl AddDevSigners for ScrollEthApi where N: ScrollNodeCore, { @@ -266,7 +291,7 @@ where } } -impl fmt::Debug for ScrollEthApi { +impl fmt::Debug for ScrollEthApi { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("ScrollEthApi").finish_non_exhaustive() } @@ -274,9 +299,9 @@ impl fmt::Debug for ScrollEthApi { /// Container type `ScrollEthApi` #[allow(missing_debug_implementations)] -struct ScrollEthApiInner { +pub struct ScrollEthApiInner { /// Gateway to node's core components. - eth_api: EthApiNodeBackend, + pub eth_api: EthApiNodeBackend, } impl ScrollEthApiInner { @@ -320,6 +345,6 @@ where .proof_permits(ctx.config.proof_permits) .build_inner(); - Ok(ScrollEthApi { inner: Arc::new(ScrollEthApiInner { eth_api }) }) + Ok(ScrollEthApi::new(eth_api)) } } diff --git a/crates/scroll/rpc/src/eth/pending_block.rs b/crates/scroll/rpc/src/eth/pending_block.rs index 644a70e2dff..9bdb6b9b7d4 100644 --- a/crates/scroll/rpc/src/eth/pending_block.rs +++ b/crates/scroll/rpc/src/eth/pending_block.rs @@ -40,14 +40,15 @@ where + StateProviderFactory, Pool: TransactionPool>>, Evm: ConfigureEvm< - Primitives: NodePrimitives< - SignedTx = ProviderTx, - BlockHeader = ProviderHeader, - Receipt = ProviderReceipt, - Block = ProviderBlock, - >, + Primitives = ::Primitives, NextBlockEnvCtx = ScrollNextBlockEnvAttributes, >, + Primitives: NodePrimitives< + BlockHeader = ProviderHeader, + SignedTx = ProviderTx, + Receipt = ProviderReceipt, + Block = ProviderBlock, + >, >, { #[inline] diff --git a/crates/scroll/rpc/src/eth/transaction.rs b/crates/scroll/rpc/src/eth/transaction.rs index b0702686f30..d044b1c7d33 100644 --- a/crates/scroll/rpc/src/eth/transaction.rs +++ b/crates/scroll/rpc/src/eth/transaction.rs @@ -1,26 +1,29 @@ //! Loads and formats Scroll transaction RPC response. -use alloy_consensus::{Signed, Transaction as _}; -use alloy_primitives::{Bytes, Sealable, Sealed, Signature, B256}; -use alloy_rpc_types_eth::TransactionInfo; +use crate::{ + eth::{ScrollEthApiInner, ScrollNodeCore}, + ScrollEthApi, +}; +use alloy_consensus::transaction::TransactionInfo; +use alloy_primitives::{Bytes, B256}; +use reth_evm::execute::ProviderError; use reth_node_api::FullNodeComponents; -use reth_primitives::Recovered; -use reth_primitives_traits::SignedTransaction; use reth_provider::{ BlockReader, BlockReaderIdExt, ProviderTx, ReceiptProvider, TransactionsProvider, }; use reth_rpc_eth_api::{ helpers::{EthSigner, EthTransactions, LoadTransaction, SpawnBlocking}, - FromEthApiError, FullEthApiTypes, RpcNodeCore, RpcNodeCoreExt, TransactionCompat, + try_into_scroll_tx_info, FromEthApiError, FullEthApiTypes, RpcNodeCore, RpcNodeCoreExt, + TxInfoMapper, }; -use reth_rpc_eth_types::{utils::recover_raw_transaction, EthApiError}; -use reth_scroll_primitives::{ScrollReceipt, ScrollTransactionSigned}; +use reth_rpc_eth_types::utils::recover_raw_transaction; +use reth_scroll_primitives::ScrollReceipt; use reth_transaction_pool::{PoolTransaction, TransactionOrigin, TransactionPool}; - -use scroll_alloy_consensus::{ScrollTxEnvelope, ScrollTypedTransaction}; -use scroll_alloy_rpc_types::{ScrollTransactionRequest, Transaction}; - -use crate::{eth::ScrollNodeCore, ScrollEthApi, ScrollEthApiError}; +use scroll_alloy_consensus::{ScrollTransactionInfo, ScrollTxEnvelope}; +use std::{ + fmt::{Debug, Formatter}, + sync::Arc, +}; impl EthTransactions for ScrollEthApi where @@ -57,100 +60,38 @@ where { } -impl TransactionCompat for ScrollEthApi -where - N: FullNodeComponents>, -{ - type Transaction = Transaction; - type Error = ScrollEthApiError; - - fn fill( - &self, - tx: Recovered, - tx_info: TransactionInfo, - ) -> Result { - let from = tx.signer(); - let hash = *tx.tx_hash(); - let ScrollTransactionSigned { transaction, signature, .. } = tx.into_inner(); - - let inner = match transaction { - ScrollTypedTransaction::Legacy(tx) => Signed::new_unchecked(tx, signature, hash).into(), - ScrollTypedTransaction::Eip2930(tx) => { - Signed::new_unchecked(tx, signature, hash).into() - } - ScrollTypedTransaction::Eip1559(tx) => { - Signed::new_unchecked(tx, signature, hash).into() - } - ScrollTypedTransaction::Eip7702(tx) => { - Signed::new_unchecked(tx, signature, hash).into() - } - ScrollTypedTransaction::L1Message(tx) => { - ScrollTxEnvelope::L1Message(tx.seal_unchecked(hash)) - } - }; - - let TransactionInfo { - block_hash, block_number, index: transaction_index, base_fee, .. - } = tx_info; +/// Scroll implementation of [`TxInfoMapper`]. +/// +/// Receipt is fetched to extract the `l1_fee` for all transactions but L1 messages. +#[derive(Clone)] +pub struct ScrollTxInfoMapper(Arc>); - let effective_gas_price = if inner.is_l1_message() { - // For l1 message, we must always set the `gasPrice` field to 0 in rpc - // l1 message tx don't have a gas price field, but serde of `Transaction` will take care - // of it - 0 - } else { - base_fee - .map(|base_fee| { - inner.effective_tip_per_gas(base_fee).unwrap_or_default() + base_fee as u128 - }) - .unwrap_or_else(|| inner.max_fee_per_gas()) - }; - let inner = Recovered::new_unchecked(inner, from); - - Ok(Transaction { - inner: alloy_rpc_types_eth::Transaction { - inner, - block_hash, - block_number, - transaction_index, - effective_gas_price: Some(effective_gas_price), - }, - }) +impl Debug for ScrollTxInfoMapper { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.debug_struct("ScrollTxInfoMapper").finish() } +} - fn build_simulate_v1_transaction( - &self, - request: alloy_rpc_types_eth::TransactionRequest, - ) -> Result { - let request: ScrollTransactionRequest = request.into(); - let Ok(tx) = request.build_typed_tx() else { - return Err(ScrollEthApiError::Eth(EthApiError::TransactionConversionError)) - }; - - // Create an empty signature for the transaction. - let signature = Signature::new(Default::default(), Default::default(), false); - Ok(ScrollTransactionSigned::new_unhashed(tx, signature)) +impl ScrollTxInfoMapper { + /// Creates [`ScrollTxInfoMapper`] that uses [`ReceiptProvider`] borrowed from given `eth_api`. + pub const fn new(eth_api: Arc>) -> Self { + Self(eth_api) } +} - fn otterscan_api_truncate_input(tx: &mut Self::Transaction) { - let mut tx = tx.inner.inner.inner_mut(); - let input = match &mut tx { - ScrollTxEnvelope::Eip1559(tx) => &mut tx.tx_mut().input, - ScrollTxEnvelope::Eip2930(tx) => &mut tx.tx_mut().input, - ScrollTxEnvelope::Legacy(tx) => &mut tx.tx_mut().input, - ScrollTxEnvelope::L1Message(tx) => { - let (mut deposit, hash) = std::mem::replace( - tx, - Sealed::new_unchecked(Default::default(), Default::default()), - ) - .split(); - deposit.input = deposit.input.slice(..4); - let mut deposit = deposit.seal_unchecked(hash); - std::mem::swap(tx, &mut deposit); - return - } - _ => return, - }; - *input = input.slice(..4); +impl TxInfoMapper<&ScrollTxEnvelope> for ScrollTxInfoMapper +where + N: FullNodeComponents, + N::Provider: ReceiptProvider, +{ + type Out = ScrollTransactionInfo; + type Err = ProviderError; + + fn try_map( + &self, + tx: &ScrollTxEnvelope, + tx_info: TransactionInfo, + ) -> Result { + try_into_scroll_tx_info(self.0.eth_api.provider(), tx, tx_info) } } diff --git a/crates/scroll/txpool/src/transaction.rs b/crates/scroll/txpool/src/transaction.rs index 71a808ecf2a..bee30961949 100644 --- a/crates/scroll/txpool/src/transaction.rs +++ b/crates/scroll/txpool/src/transaction.rs @@ -212,7 +212,7 @@ where #[cfg(test)] mod tests { use crate::{ScrollPooledTransaction, ScrollTransactionValidator}; - use alloy_consensus::transaction::Recovered; + use alloy_consensus::{transaction::Recovered, Signed}; use alloy_eips::eip2718::Encodable2718; use alloy_primitives::Signature; use reth_provider::test_utils::MockEthProvider; @@ -236,7 +236,7 @@ mod tests { let signer = Default::default(); let deposit_tx = ScrollTypedTransaction::L1Message(TxL1Message::default()); let signature = Signature::test_signature(); - let signed_tx = ScrollTransactionSigned::new_unhashed(deposit_tx, signature); + let signed_tx: ScrollTransactionSigned = Signed::new_unhashed(deposit_tx, signature).into(); let signed_recovered = Recovered::new_unchecked(signed_tx, signer); let len = signed_recovered.encode_2718_len(); let pooled_tx: ScrollPooledTransaction = diff --git a/crates/stages/api/Cargo.toml b/crates/stages/api/Cargo.toml index 515c2712466..6d230b34731 100644 --- a/crates/stages/api/Cargo.toml +++ b/crates/stages/api/Cargo.toml @@ -51,7 +51,7 @@ reth-testing-utils.workspace = true test-utils = [ "reth-consensus/test-utils", "reth-network-p2p/test-utils", - "reth-primitives-traits/test-utils", "reth-provider/test-utils", "reth-stages-types/test-utils", + "reth-primitives-traits/test-utils", ] diff --git a/crates/stages/api/src/pipeline/builder.rs b/crates/stages/api/src/pipeline/builder.rs index 45bdc2d8942..56b895cac7d 100644 --- a/crates/stages/api/src/pipeline/builder.rs +++ b/crates/stages/api/src/pipeline/builder.rs @@ -90,6 +90,8 @@ impl PipelineBuilder { progress: Default::default(), metrics_tx, fail_on_unwind, + last_detached_head_unwind_target: None, + detached_head_attempts: 0, } } } diff --git a/crates/stages/api/src/pipeline/mod.rs b/crates/stages/api/src/pipeline/mod.rs index 37152967a62..a064dd471be 100644 --- a/crates/stages/api/src/pipeline/mod.rs +++ b/crates/stages/api/src/pipeline/mod.rs @@ -7,9 +7,9 @@ pub use event::*; use futures_util::Future; use reth_primitives_traits::constants::BEACON_CONSENSUS_REORG_UNWIND_DEPTH; use reth_provider::{ - providers::ProviderNodeTypes, writer::UnifiedStorageWriter, ChainStateBlockReader, - ChainStateBlockWriter, DatabaseProviderFactory, ProviderFactory, StageCheckpointReader, - StageCheckpointWriter, + providers::ProviderNodeTypes, writer::UnifiedStorageWriter, BlockHashReader, + ChainStateBlockReader, ChainStateBlockWriter, DatabaseProviderFactory, ProviderFactory, + StageCheckpointReader, StageCheckpointWriter, }; use reth_prune::PrunerBuilder; use reth_static_file::StaticFileProducer; @@ -83,6 +83,12 @@ pub struct Pipeline { /// Whether an unwind should fail the syncing process. Should only be set when downloading /// blocks from trusted sources and expecting them to be valid. fail_on_unwind: bool, + /// Block that was chosen as a target of the last unwind triggered by + /// [`StageError::DetachedHead`] error. + last_detached_head_unwind_target: Option, + /// Number of consecutive unwind attempts due to [`StageError::DetachedHead`] for the current + /// fork. + detached_head_attempts: u64, } impl Pipeline { @@ -110,6 +116,14 @@ impl Pipeline { pub fn events(&self) -> EventStream { self.event_sender.new_listener() } + + /// Get a mutable reference to a stage by index. + pub fn stage( + &mut self, + idx: usize, + ) -> &mut dyn Stage< as DatabaseProviderFactory>::ProviderRW> { + &mut self.stages[idx] + } } impl Pipeline { @@ -383,8 +397,7 @@ impl Pipeline { ) -> Result { let total_stages = self.stages.len(); - let stage = &mut self.stages[stage_index]; - let stage_id = stage.id(); + let stage_id = self.stage(stage_index).id(); let mut made_progress = false; let target = self.max_block.or(previous_stage); @@ -422,10 +435,9 @@ impl Pipeline { target, }); - if let Err(err) = stage.execute_ready(exec_input).await { + if let Err(err) = self.stage(stage_index).execute_ready(exec_input).await { self.event_sender.notify(PipelineEvent::Error { stage_id }); - - match on_stage_error(&self.provider_factory, stage_id, prev_checkpoint, err)? { + match self.on_stage_error(stage_id, prev_checkpoint, err)? { Some(ctrl) => return Ok(ctrl), None => continue, }; @@ -443,7 +455,7 @@ impl Pipeline { target, }); - match stage.execute(&provider_rw, exec_input) { + match self.stage(stage_index).execute(&provider_rw, exec_input) { Ok(out @ ExecOutput { checkpoint, done }) => { made_progress |= checkpoint.block_number != prev_checkpoint.unwrap_or_default().block_number; @@ -468,7 +480,7 @@ impl Pipeline { UnifiedStorageWriter::commit(provider_rw)?; - stage.post_execute_commit()?; + self.stage(stage_index).post_execute_commit()?; if done { let block_number = checkpoint.block_number; @@ -483,101 +495,118 @@ impl Pipeline { drop(provider_rw); self.event_sender.notify(PipelineEvent::Error { stage_id }); - if let Some(ctrl) = - on_stage_error(&self.provider_factory, stage_id, prev_checkpoint, err)? - { + if let Some(ctrl) = self.on_stage_error(stage_id, prev_checkpoint, err)? { return Ok(ctrl) } } } } } -} -fn on_stage_error( - factory: &ProviderFactory, - stage_id: StageId, - prev_checkpoint: Option, - err: StageError, -) -> Result, PipelineError> { - if let StageError::DetachedHead { local_head, header, error } = err { - warn!(target: "sync::pipeline", stage = %stage_id, ?local_head, ?header, %error, "Stage encountered detached head"); - - // We unwind because of a detached head. - let unwind_to = - local_head.block.number.saturating_sub(BEACON_CONSENSUS_REORG_UNWIND_DEPTH).max(1); - Ok(Some(ControlFlow::Unwind { target: unwind_to, bad_block: local_head })) - } else if let StageError::Block { block, error } = err { - match error { - BlockErrorKind::Validation(validation_error) => { - error!( - target: "sync::pipeline", - stage = %stage_id, - bad_block = %block.block.number, - "Stage encountered a validation error: {validation_error}" - ); - - // FIXME: When handling errors, we do not commit the database transaction. This - // leads to the Merkle stage not clearing its checkpoint, and restarting from an - // invalid place. - let provider_rw = factory.database_provider_rw()?; - provider_rw.save_stage_checkpoint_progress(StageId::MerkleExecute, vec![])?; - provider_rw.save_stage_checkpoint( - StageId::MerkleExecute, - prev_checkpoint.unwrap_or_default(), - )?; - - UnifiedStorageWriter::commit(provider_rw)?; - - // We unwind because of a validation error. If the unwind itself - // fails, we bail entirely, - // otherwise we restart the execution loop from the - // beginning. - Ok(Some(ControlFlow::Unwind { - target: prev_checkpoint.unwrap_or_default().block_number, - bad_block: block, - })) + fn on_stage_error( + &mut self, + stage_id: StageId, + prev_checkpoint: Option, + err: StageError, + ) -> Result, PipelineError> { + if let StageError::DetachedHead { local_head, header, error } = err { + warn!(target: "sync::pipeline", stage = %stage_id, ?local_head, ?header, %error, "Stage encountered detached head"); + + if let Some(last_detached_head_unwind_target) = self.last_detached_head_unwind_target { + if local_head.block.hash == last_detached_head_unwind_target && + header.block.number == local_head.block.number + 1 + { + self.detached_head_attempts += 1; + } else { + self.detached_head_attempts = 1; + } + } else { + self.detached_head_attempts = 1; } - BlockErrorKind::Execution(execution_error) => { - error!( - target: "sync::pipeline", - stage = %stage_id, - bad_block = %block.block.number, - "Stage encountered an execution error: {execution_error}" - ); - // We unwind because of an execution error. If the unwind itself - // fails, we bail entirely, - // otherwise we restart - // the execution loop from the beginning. - Ok(Some(ControlFlow::Unwind { - target: prev_checkpoint.unwrap_or_default().block_number, - bad_block: block, - })) + // We unwind because of a detached head. + let unwind_to = local_head + .block + .number + .saturating_sub( + BEACON_CONSENSUS_REORG_UNWIND_DEPTH.saturating_mul(self.detached_head_attempts), + ) + .max(1); + + self.last_detached_head_unwind_target = self.provider_factory.block_hash(unwind_to)?; + Ok(Some(ControlFlow::Unwind { target: unwind_to, bad_block: local_head })) + } else if let StageError::Block { block, error } = err { + match error { + BlockErrorKind::Validation(validation_error) => { + error!( + target: "sync::pipeline", + stage = %stage_id, + bad_block = %block.block.number, + "Stage encountered a validation error: {validation_error}" + ); + + // FIXME: When handling errors, we do not commit the database transaction. This + // leads to the Merkle stage not clearing its checkpoint, and restarting from an + // invalid place. + let provider_rw = self.provider_factory.database_provider_rw()?; + provider_rw.save_stage_checkpoint_progress(StageId::MerkleExecute, vec![])?; + provider_rw.save_stage_checkpoint( + StageId::MerkleExecute, + prev_checkpoint.unwrap_or_default(), + )?; + + UnifiedStorageWriter::commit(provider_rw)?; + + // We unwind because of a validation error. If the unwind itself + // fails, we bail entirely, + // otherwise we restart the execution loop from the + // beginning. + Ok(Some(ControlFlow::Unwind { + target: prev_checkpoint.unwrap_or_default().block_number, + bad_block: block, + })) + } + BlockErrorKind::Execution(execution_error) => { + error!( + target: "sync::pipeline", + stage = %stage_id, + bad_block = %block.block.number, + "Stage encountered an execution error: {execution_error}" + ); + + // We unwind because of an execution error. If the unwind itself + // fails, we bail entirely, + // otherwise we restart + // the execution loop from the beginning. + Ok(Some(ControlFlow::Unwind { + target: prev_checkpoint.unwrap_or_default().block_number, + bad_block: block, + })) + } } - } - } else if let StageError::MissingStaticFileData { block, segment } = err { - error!( - target: "sync::pipeline", - stage = %stage_id, - bad_block = %block.block.number, - segment = %segment, - "Stage is missing static file data." - ); + } else if let StageError::MissingStaticFileData { block, segment } = err { + error!( + target: "sync::pipeline", + stage = %stage_id, + bad_block = %block.block.number, + segment = %segment, + "Stage is missing static file data." + ); - Ok(Some(ControlFlow::Unwind { target: block.block.number - 1, bad_block: block })) - } else if err.is_fatal() { - error!(target: "sync::pipeline", stage = %stage_id, "Stage encountered a fatal error: {err}"); - Err(err.into()) - } else { - // On other errors we assume they are recoverable if we discard the - // transaction and run the stage again. - warn!( - target: "sync::pipeline", - stage = %stage_id, - "Stage encountered a non-fatal error: {err}. Retrying..." - ); - Ok(None) + Ok(Some(ControlFlow::Unwind { target: block.block.number - 1, bad_block: block })) + } else if err.is_fatal() { + error!(target: "sync::pipeline", stage = %stage_id, "Stage encountered a fatal error: {err}"); + Err(err.into()) + } else { + // On other errors we assume they are recoverable if we discard the + // transaction and run the stage again. + warn!( + target: "sync::pipeline", + stage = %stage_id, + "Stage encountered a non-fatal error: {err}. Retrying..." + ); + Ok(None) + } } } diff --git a/crates/stages/api/src/stage.rs b/crates/stages/api/src/stage.rs index 74c4d0441b0..e390f02e154 100644 --- a/crates/stages/api/src/stage.rs +++ b/crates/stages/api/src/stage.rs @@ -165,6 +165,11 @@ pub struct ExecOutput { } impl ExecOutput { + /// Mark the stage as not done, checkpointing at the given place. + pub const fn in_progress(checkpoint: StageCheckpoint) -> Self { + Self { checkpoint, done: false } + } + /// Mark the stage as done, checkpointing at the given place. pub const fn done(checkpoint: StageCheckpoint) -> Self { Self { checkpoint, done: true } @@ -271,4 +276,4 @@ pub trait StageExt: Stage { } } -impl> StageExt for S {} +impl + ?Sized> StageExt for S {} diff --git a/crates/stages/stages/Cargo.toml b/crates/stages/stages/Cargo.toml index eb3cce423b3..ef8d8f72f28 100644 --- a/crates/stages/stages/Cargo.toml +++ b/crates/stages/stages/Cargo.toml @@ -21,6 +21,9 @@ reth-db.workspace = true reth-db-api.workspace = true reth-etl.workspace = true reth-evm = { workspace = true, features = ["metrics"] } +reth-era-downloader.workspace = true +reth-era-utils.workspace = true +reth-era.workspace = true reth-exex.workspace = true reth-fs-util.workspace = true reth-network-p2p.workspace = true @@ -60,6 +63,7 @@ bincode.workspace = true blake3.workspace = true reqwest = { workspace = true, default-features = false, features = ["rustls-tls-native-roots", "blocking"] } serde = { workspace = true, features = ["derive"] } +eyre.workspace = true [dev-dependencies] # reth diff --git a/crates/stages/stages/src/lib.rs b/crates/stages/stages/src/lib.rs index d0e6a2f22c0..2c29bad8710 100644 --- a/crates/stages/stages/src/lib.rs +++ b/crates/stages/stages/src/lib.rs @@ -16,7 +16,6 @@ //! # use reth_downloaders::bodies::bodies::BodiesDownloaderBuilder; //! # use reth_downloaders::headers::reverse_headers::ReverseHeadersDownloaderBuilder; //! # use reth_network_p2p::test_utils::{TestBodiesClient, TestHeadersClient}; -//! # use reth_evm_ethereum::execute::EthExecutorProvider; //! # use alloy_primitives::B256; //! # use reth_chainspec::MAINNET; //! # use reth_prune_types::PruneModes; @@ -47,11 +46,12 @@ //! # provider_factory.clone() //! # ); //! # let (tip_tx, tip_rx) = watch::channel(B256::default()); -//! # let executor_provider = EthExecutorProvider::mainnet(); +//! # let executor_provider = EthEvmConfig::mainnet(); //! # let static_file_producer = StaticFileProducer::new( //! # provider_factory.clone(), //! # PruneModes::default() //! # ); +//! # let era_import_source = None; //! // Create a pipeline that can fully sync //! # let pipeline = //! Pipeline::::builder() @@ -65,6 +65,7 @@ //! executor_provider, //! StageConfig::default(), //! PruneModes::default(), +//! era_import_source, //! )) //! .build(provider_factory, static_file_producer); //! ``` diff --git a/crates/stages/stages/src/sets.rs b/crates/stages/stages/src/sets.rs index 058d172de08..51b99f626ad 100644 --- a/crates/stages/stages/src/sets.rs +++ b/crates/stages/stages/src/sets.rs @@ -38,9 +38,10 @@ //! ``` use crate::{ stages::{ - AccountHashingStage, BodyStage, ExecutionStage, FinishStage, HeaderStage, - IndexAccountHistoryStage, IndexStorageHistoryStage, MerkleStage, PruneSenderRecoveryStage, - PruneStage, SenderRecoveryStage, StorageHashingStage, TransactionLookupStage, + AccountHashingStage, BodyStage, EraImportSource, EraStage, ExecutionStage, FinishStage, + HeaderStage, IndexAccountHistoryStage, IndexStorageHistoryStage, MerkleStage, + PruneSenderRecoveryStage, PruneStage, SenderRecoveryStage, StorageHashingStage, + TransactionLookupStage, }, StageSet, StageSetBuilder, }; @@ -115,6 +116,7 @@ where evm_config: E, stages_config: StageConfig, prune_modes: PruneModes, + era_import_source: Option, ) -> Self { Self { online: OnlineStages::new( @@ -123,6 +125,7 @@ where header_downloader, body_downloader, stages_config.clone(), + era_import_source, ), evm_config, consensus, @@ -197,6 +200,8 @@ where body_downloader: B, /// Configuration for each stage in the pipeline stages_config: StageConfig, + /// Optional source of ERA1 files. The `EraStage` does nothing unless this is specified. + era_import_source: Option, } impl OnlineStages @@ -211,8 +216,9 @@ where header_downloader: H, body_downloader: B, stages_config: StageConfig, + era_import_source: Option, ) -> Self { - Self { provider, tip, header_downloader, body_downloader, stages_config } + Self { provider, tip, header_downloader, body_downloader, stages_config, era_import_source } } } @@ -259,9 +265,12 @@ where B: BodyDownloader + 'static, HeaderStage: Stage, BodyStage: Stage, + EraStage<::Header, ::Body, EraImportSource>: + Stage, { fn builder(self) -> StageSetBuilder { StageSetBuilder::default() + .add_stage(EraStage::new(self.era_import_source, self.stages_config.etl.clone())) .add_stage(HeaderStage::new( self.provider, self.header_downloader, diff --git a/crates/stages/stages/src/stages/bodies.rs b/crates/stages/stages/src/stages/bodies.rs index ca3043c3264..e503d8b5d5c 100644 --- a/crates/stages/stages/src/stages/bodies.rs +++ b/crates/stages/stages/src/stages/bodies.rs @@ -65,70 +65,61 @@ impl BodyStage { pub const fn new(downloader: D) -> Self { Self { downloader, buffer: None } } +} - /// Ensures that static files and database are in sync. - fn ensure_consistency( - &self, - provider: &Provider, - unwind_block: Option, - ) -> Result<(), StageError> - where - Provider: DBProvider + BlockReader + StaticFileProviderFactory, - { - // Get id for the next tx_num of zero if there are no transactions. - let next_tx_num = provider - .tx_ref() - .cursor_read::()? - .last()? - .map(|(id, _)| id + 1) - .unwrap_or_default(); - - let static_file_provider = provider.static_file_provider(); - - // Make sure Transactions static file is at the same height. If it's further, this - // input execution was interrupted previously and we need to unwind the static file. - let next_static_file_tx_num = static_file_provider - .get_highest_static_file_tx(StaticFileSegment::Transactions) - .map(|id| id + 1) - .unwrap_or_default(); - - match next_static_file_tx_num.cmp(&next_tx_num) { - // If static files are ahead, we are currently unwinding the stage or we didn't reach - // the database commit in a previous stage run. So, our only solution is to unwind the - // static files and proceed from the database expected height. - Ordering::Greater => { - let highest_db_block = - provider.tx_ref().entries::()? as u64; - let mut static_file_producer = - static_file_provider.latest_writer(StaticFileSegment::Transactions)?; - static_file_producer - .prune_transactions(next_static_file_tx_num - next_tx_num, highest_db_block)?; - // Since this is a database <-> static file inconsistency, we commit the change - // straight away. - static_file_producer.commit()?; - } - // If static files are behind, then there was some corruption or loss of files. This - // error will trigger an unwind, that will bring the database to the same height as the - // static files. - Ordering::Less => { - // If we are already in the process of unwind, this might be fine because we will - // fix the inconsistency right away. - if let Some(unwind_to) = unwind_block { - let next_tx_num_after_unwind = provider - .block_body_indices(unwind_to)? - .map(|b| b.next_tx_num()) - .ok_or(ProviderError::BlockBodyIndicesNotFound(unwind_to))?; - - // This means we need a deeper unwind. - if next_tx_num_after_unwind > next_static_file_tx_num { - return Err(missing_static_data_error( - next_static_file_tx_num.saturating_sub(1), - &static_file_provider, - provider, - StaticFileSegment::Transactions, - )?) - } - } else { +/// Ensures that static files and database are in sync. +pub(crate) fn ensure_consistency( + provider: &Provider, + unwind_block: Option, +) -> Result<(), StageError> +where + Provider: DBProvider + BlockReader + StaticFileProviderFactory, +{ + // Get id for the next tx_num of zero if there are no transactions. + let next_tx_num = provider + .tx_ref() + .cursor_read::()? + .last()? + .map(|(id, _)| id + 1) + .unwrap_or_default(); + + let static_file_provider = provider.static_file_provider(); + + // Make sure Transactions static file is at the same height. If it's further, this + // input execution was interrupted previously and we need to unwind the static file. + let next_static_file_tx_num = static_file_provider + .get_highest_static_file_tx(StaticFileSegment::Transactions) + .map(|id| id + 1) + .unwrap_or_default(); + + match next_static_file_tx_num.cmp(&next_tx_num) { + // If static files are ahead, we are currently unwinding the stage or we didn't reach + // the database commit in a previous stage run. So, our only solution is to unwind the + // static files and proceed from the database expected height. + Ordering::Greater => { + let highest_db_block = provider.tx_ref().entries::()? as u64; + let mut static_file_producer = + static_file_provider.latest_writer(StaticFileSegment::Transactions)?; + static_file_producer + .prune_transactions(next_static_file_tx_num - next_tx_num, highest_db_block)?; + // Since this is a database <-> static file inconsistency, we commit the change + // straight away. + static_file_producer.commit()?; + } + // If static files are behind, then there was some corruption or loss of files. This + // error will trigger an unwind, that will bring the database to the same height as the + // static files. + Ordering::Less => { + // If we are already in the process of unwind, this might be fine because we will + // fix the inconsistency right away. + if let Some(unwind_to) = unwind_block { + let next_tx_num_after_unwind = provider + .block_body_indices(unwind_to)? + .map(|b| b.next_tx_num()) + .ok_or(ProviderError::BlockBodyIndicesNotFound(unwind_to))?; + + // This means we need a deeper unwind. + if next_tx_num_after_unwind > next_static_file_tx_num { return Err(missing_static_data_error( next_static_file_tx_num.saturating_sub(1), &static_file_provider, @@ -136,12 +127,19 @@ impl BodyStage { StaticFileSegment::Transactions, )?) } + } else { + return Err(missing_static_data_error( + next_static_file_tx_num.saturating_sub(1), + &static_file_provider, + provider, + StaticFileSegment::Transactions, + )?) } - Ordering::Equal => {} } - - Ok(()) + Ordering::Equal => {} } + + Ok(()) } impl Stage for BodyStage @@ -194,7 +192,7 @@ where } let (from_block, to_block) = input.next_block_range().into_inner(); - self.ensure_consistency(provider, None)?; + ensure_consistency(provider, None)?; debug!(target: "sync::stages::bodies", stage_progress = from_block, target = to_block, "Commencing sync"); @@ -231,7 +229,7 @@ where ) -> Result { self.buffer.take(); - self.ensure_consistency(provider, Some(input.unwind_to))?; + ensure_consistency(provider, Some(input.unwind_to))?; provider.remove_bodies_above(input.unwind_to, StorageLocation::Both)?; Ok(UnwindOutput { diff --git a/crates/stages/stages/src/stages/era.rs b/crates/stages/stages/src/stages/era.rs new file mode 100644 index 00000000000..ea0ca7a5cd0 --- /dev/null +++ b/crates/stages/stages/src/stages/era.rs @@ -0,0 +1,630 @@ +use crate::{StageCheckpoint, StageId}; +use alloy_primitives::{BlockHash, BlockNumber}; +use futures_util::{Stream, StreamExt}; +use reqwest::{Client, Url}; +use reth_config::config::EtlConfig; +use reth_db_api::{table::Value, transaction::DbTxMut}; +use reth_era::era1_file::Era1Reader; +use reth_era_downloader::{read_dir, EraClient, EraMeta, EraStream, EraStreamConfig}; +use reth_era_utils as era; +use reth_etl::Collector; +use reth_primitives_traits::{FullBlockBody, FullBlockHeader, NodePrimitives}; +use reth_provider::{ + BlockReader, BlockWriter, DBProvider, HeaderProvider, StageCheckpointWriter, + StaticFileProviderFactory, StaticFileWriter, +}; +use reth_stages_api::{ + CheckpointBlockRange, EntitiesCheckpoint, ExecInput, ExecOutput, HeadersCheckpoint, Stage, + StageError, UnwindInput, UnwindOutput, +}; +use reth_static_file_types::StaticFileSegment; +use reth_storage_errors::ProviderError; +use std::{ + fmt::{Debug, Formatter}, + iter, + path::Path, + task::{ready, Context, Poll}, +}; + +type Item = + Box> + Send + Sync + Unpin>; +type ThreadSafeEraStream = + Box>> + Send + Sync + Unpin>; + +/// The [ERA1](https://github.com/eth-clients/e2store-format-specs/blob/main/formats/era1.md) +/// pre-merge history stage. +/// +/// Imports block headers and bodies from genesis up to the last pre-merge block. Receipts are +/// generated by execution. Execution is not done in this stage. +pub struct EraStage { + /// The `source` creates `stream`. + source: Option, + /// A map of block hash to block height collected when processing headers and inserted into + /// database afterward. + hash_collector: Collector, + /// Last extracted iterator of block `Header` and `Body` pairs. + item: Option>, + /// A stream of [`Item`]s, i.e. iterators over block `Header` and `Body` pairs. + stream: Option>, +} + +trait EraStreamFactory { + fn create(self, input: ExecInput) -> Result, StageError>; +} + +impl EraStreamFactory for EraImportSource +where + Header: FullBlockHeader + Value, + Body: FullBlockBody, +{ + fn create(self, input: ExecInput) -> Result, StageError> { + match self { + Self::Path(path) => Self::convert( + read_dir(path, input.next_block()).map_err(|e| StageError::Fatal(e.into()))?, + ), + Self::Url(url, folder) => { + let _ = reth_fs_util::create_dir_all(&folder); + let client = EraClient::new(Client::new(), url, folder); + + Self::convert(EraStream::new( + client.start_from(input.next_block()), + EraStreamConfig::default().start_from(input.next_block()), + )) + } + } + } +} + +impl EraImportSource { + fn convert( + stream: impl Stream> + + Send + + Sync + + 'static + + Unpin, + ) -> Result, StageError> + where + Header: FullBlockHeader + Value, + Body: FullBlockBody, + { + Ok(Box::new(Box::pin(stream.map(|meta| { + meta.and_then(|meta| { + let file = reth_fs_util::open(meta.path())?; + let reader = Era1Reader::new(file); + let iter = reader.iter(); + let iter = iter.map(era::decode); + let iter = iter.chain( + iter::once_with(move || match meta.mark_as_processed() { + Ok(..) => None, + Err(e) => Some(Err(e)), + }) + .flatten(), + ); + + Ok(Box::new(iter) as Item) + }) + })))) + } +} + +impl Debug for EraStage { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.debug_struct("EraStage") + .field("source", &self.source) + .field("hash_collector", &self.hash_collector) + .field("item", &self.item.is_some()) + .field("stream", &"dyn Stream") + .finish() + } +} + +impl EraStage { + /// Creates a new [`EraStage`]. + pub fn new(source: Option, etl_config: EtlConfig) -> Self { + Self { + source, + item: None, + stream: None, + hash_collector: Collector::new(etl_config.file_size, etl_config.dir), + } + } +} + +impl Stage for EraStage +where + Provider: DBProvider + + StaticFileProviderFactory + + BlockWriter + + BlockReader + + StageCheckpointWriter, + F: EraStreamFactory + Send + Sync + Clone, + N: NodePrimitives, +{ + fn id(&self) -> StageId { + StageId::Era + } + + fn poll_execute_ready( + &mut self, + cx: &mut Context<'_>, + input: ExecInput, + ) -> Poll> { + if input.target_reached() || self.item.is_some() { + return Poll::Ready(Ok(())); + } + + if self.stream.is_none() { + if let Some(source) = self.source.clone() { + self.stream.replace(source.create(input)?); + } + } + if let Some(stream) = &mut self.stream { + if let Some(next) = ready!(stream.poll_next_unpin(cx)) + .transpose() + .map_err(|e| StageError::Fatal(e.into()))? + { + self.item.replace(next); + } + } + + Poll::Ready(Ok(())) + } + + fn execute(&mut self, provider: &Provider, input: ExecInput) -> Result { + let height = if let Some(era) = self.item.take() { + let static_file_provider = provider.static_file_provider(); + + // Consistency check of expected headers in static files vs DB is done on + // provider::sync_gap when poll_execute_ready is polled. + let last_header_number = static_file_provider + .get_highest_static_file_block(StaticFileSegment::Headers) + .unwrap_or_default(); + + // Find the latest total difficulty + let mut td = static_file_provider + .header_td_by_number(last_header_number)? + .ok_or(ProviderError::TotalDifficultyNotFound(last_header_number))?; + + // Although headers were downloaded in reverse order, the collector iterates it in + // ascending order + let mut writer = static_file_provider.latest_writer(StaticFileSegment::Headers)?; + + let height = era::process_iter( + era, + &mut writer, + provider, + &mut self.hash_collector, + &mut td, + last_header_number..=input.target(), + ) + .map_err(|e| StageError::Fatal(e.into()))?; + + if !self.hash_collector.is_empty() { + era::build_index(provider, &mut self.hash_collector) + .map_err(|e| StageError::Recoverable(e.into()))?; + self.hash_collector.clear(); + } + + provider.save_stage_checkpoint( + StageId::Headers, + StageCheckpoint::new(height).with_headers_stage_checkpoint(HeadersCheckpoint { + block_range: CheckpointBlockRange { + from: input.checkpoint().block_number, + to: height, + }, + progress: EntitiesCheckpoint { processed: height, total: input.target() }, + }), + )?; + provider.save_stage_checkpoint( + StageId::Bodies, + StageCheckpoint::new(height).with_entities_stage_checkpoint(EntitiesCheckpoint { + processed: height, + total: input.target(), + }), + )?; + + height + } else { + input.target() + }; + + Ok(ExecOutput { checkpoint: StageCheckpoint::new(height), done: height == input.target() }) + } + + fn unwind( + &mut self, + _provider: &Provider, + input: UnwindInput, + ) -> Result { + Ok(UnwindOutput { checkpoint: input.checkpoint.with_block_number(input.unwind_to) }) + } +} + +/// Describes where to get the era files from. +#[derive(Debug, Clone)] +pub enum EraImportSource { + /// Remote HTTP accessible host. + Url(Url, Box), + /// Local directory. + Path(Box), +} + +impl EraImportSource { + /// Maybe constructs a new `EraImportSource` depending on the arguments. + /// + /// Only one of `url` or `path` should be provided, but upholding this invariant is delegated + /// above so that both parameters can be accepted. + /// + /// # Arguments + /// * The `path` uses a directory as the import source. It and its contents must be readable. + /// * The `url` uses an HTTP client to list and download files. + /// * The `default` gives the default [`Url`] if none of the previous parameters are provided. + /// * For any [`Url`] the `folder` is used as the download directory for storing files + /// temporarily. It and its contents must be readable and writable. + pub fn maybe_new( + path: Option>, + url: Option, + default: impl FnOnce() -> Option, + folder: impl FnOnce() -> Box, + ) -> Option { + path.map(Self::Path).or_else(|| url.or_else(default).map(|url| Self::Url(url, folder()))) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::{ + stage_test_suite, ExecuteStageTestRunner, StageTestRunner, UnwindStageTestRunner, + }; + use alloy_primitives::B256; + use assert_matches::assert_matches; + use reth_db_api::tables; + use reth_provider::BlockHashReader; + use reth_testing_utils::generators::{self, random_header}; + use test_runner::EraTestRunner; + + #[tokio::test] + async fn test_era_range_ends_below_target() { + let era_cap = 2; + let target = 20000; + + let mut runner = EraTestRunner::default(); + + let input = ExecInput { target: Some(era_cap), checkpoint: None }; + runner.seed_execution(input).unwrap(); + + let input = ExecInput { target: Some(target), checkpoint: None }; + let output = runner.execute(input).await.unwrap(); + + runner.commit(); + + assert_matches!( + output, + Ok(ExecOutput { + checkpoint: StageCheckpoint { block_number, stage_checkpoint: None }, + done: false + }) if block_number == era_cap + ); + + let output = output.unwrap(); + let validation_output = runner.validate_execution(input, Some(output.clone())); + + assert_matches!(validation_output, Ok(())); + + runner.take_responses(); + + let input = ExecInput { target: Some(target), checkpoint: Some(output.checkpoint) }; + let output = runner.execute(input).await.unwrap(); + + runner.commit(); + + assert_matches!( + output, + Ok(ExecOutput { + checkpoint: StageCheckpoint { block_number, stage_checkpoint: None }, + done: true + }) if block_number == target + ); + + let validation_output = runner.validate_execution(input, output.ok()); + + assert_matches!(validation_output, Ok(())); + } + + mod test_runner { + use super::*; + use crate::test_utils::{TestRunnerError, TestStageDB}; + use alloy_consensus::{BlockBody, Header}; + use futures_util::stream; + use reth_db_api::{ + cursor::DbCursorRO, + models::{StoredBlockBodyIndices, StoredBlockOmmers}, + transaction::DbTx, + }; + use reth_ethereum_primitives::TransactionSigned; + use reth_primitives_traits::{SealedBlock, SealedHeader}; + use reth_provider::{BlockNumReader, TransactionsProvider}; + use reth_testing_utils::generators::{ + random_block_range, random_signed_tx, BlockRangeParams, + }; + use tokio::sync::watch; + + pub(crate) struct EraTestRunner { + channel: (watch::Sender, watch::Receiver), + db: TestStageDB, + responses: Option)>>, + } + + impl Default for EraTestRunner { + fn default() -> Self { + Self { + channel: watch::channel(B256::ZERO), + db: TestStageDB::default(), + responses: Default::default(), + } + } + } + + impl StageTestRunner for EraTestRunner { + type S = EraStage, StubResponses>; + + fn db(&self) -> &TestStageDB { + &self.db + } + + fn stage(&self) -> Self::S { + EraStage::new(self.responses.clone().map(StubResponses), EtlConfig::default()) + } + } + + impl ExecuteStageTestRunner for EraTestRunner { + type Seed = Vec>; + + fn seed_execution(&mut self, input: ExecInput) -> Result { + let start = input.checkpoint().block_number; + let end = input.target(); + + let static_file_provider = self.db.factory.static_file_provider(); + + let mut rng = generators::rng(); + + // Static files do not support gaps in headers, so we need to generate 0 to end + let blocks = random_block_range( + &mut rng, + 0..=end, + BlockRangeParams { + parent: Some(B256::ZERO), + tx_count: 0..2, + ..Default::default() + }, + ); + self.db.insert_headers_with_td(blocks.iter().map(|block| block.sealed_header()))?; + if let Some(progress) = blocks.get(start as usize) { + // Insert last progress data + { + let tx = self.db.factory.provider_rw()?.into_tx(); + let mut static_file_producer = static_file_provider + .get_writer(start, StaticFileSegment::Transactions)?; + + let body = StoredBlockBodyIndices { + first_tx_num: 0, + tx_count: progress.transaction_count() as u64, + }; + + static_file_producer.set_block_range(0..=progress.number); + + body.tx_num_range().try_for_each(|tx_num| { + let transaction = random_signed_tx(&mut rng); + static_file_producer.append_transaction(tx_num, &transaction).map(drop) + })?; + + if body.tx_count != 0 { + tx.put::( + body.last_tx_num(), + progress.number, + )?; + } + + tx.put::(progress.number, body)?; + + if !progress.ommers_hash_is_empty() { + tx.put::( + progress.number, + StoredBlockOmmers { ommers: progress.body().ommers.clone() }, + )?; + } + + static_file_producer.commit()?; + tx.commit()?; + } + } + self.responses.replace( + blocks.iter().map(|v| (v.header().clone(), v.body().clone())).collect(), + ); + Ok(blocks) + } + + /// Validate stored headers and bodies + fn validate_execution( + &self, + input: ExecInput, + output: Option, + ) -> Result<(), TestRunnerError> { + let initial_checkpoint = input.checkpoint().block_number; + match output { + Some(output) if output.checkpoint.block_number > initial_checkpoint => { + let provider = self.db.factory.provider()?; + let mut td = provider + .header_td_by_number(initial_checkpoint.saturating_sub(1))? + .unwrap_or_default(); + + for block_num in initial_checkpoint.. + output + .checkpoint + .block_number + .min(self.responses.as_ref().map(|v| v.len()).unwrap_or_default() + as BlockNumber) + { + // look up the header hash + let hash = provider.block_hash(block_num)?.expect("no header hash"); + + // validate the header number + assert_eq!(provider.block_number(hash)?, Some(block_num)); + + // validate the header + let header = provider.header_by_number(block_num)?; + assert!(header.is_some()); + let header = SealedHeader::seal_slow(header.unwrap()); + assert_eq!(header.hash(), hash); + + // validate the header total difficulty + td += header.difficulty; + assert_eq!(provider.header_td_by_number(block_num)?, Some(td)); + } + + self.validate_db_blocks( + output.checkpoint.block_number, + output.checkpoint.block_number, + )?; + } + _ => self.check_no_header_entry_above(initial_checkpoint)?, + }; + Ok(()) + } + + async fn after_execution(&self, headers: Self::Seed) -> Result<(), TestRunnerError> { + let tip = if headers.is_empty() { + let tip = random_header(&mut generators::rng(), 0, None); + self.db.insert_headers(iter::once(&tip))?; + tip.hash() + } else { + headers.last().unwrap().hash() + }; + self.send_tip(tip); + Ok(()) + } + } + + impl UnwindStageTestRunner for EraTestRunner { + fn validate_unwind(&self, _input: UnwindInput) -> Result<(), TestRunnerError> { + Ok(()) + } + } + + impl EraTestRunner { + pub(crate) fn check_no_header_entry_above( + &self, + block: BlockNumber, + ) -> Result<(), TestRunnerError> { + self.db + .ensure_no_entry_above_by_value::(block, |val| val)?; + self.db.ensure_no_entry_above::(block, |key| key)?; + self.db.ensure_no_entry_above::(block, |key| key)?; + self.db.ensure_no_entry_above::( + block, + |num| num, + )?; + Ok(()) + } + + pub(crate) fn send_tip(&self, tip: B256) { + self.channel.0.send(tip).expect("failed to send tip"); + } + + /// Validate that the inserted block data is valid + pub(crate) fn validate_db_blocks( + &self, + prev_progress: BlockNumber, + highest_block: BlockNumber, + ) -> Result<(), TestRunnerError> { + let static_file_provider = self.db.factory.static_file_provider(); + + self.db.query(|tx| { + // Acquire cursors on body related tables + let mut bodies_cursor = tx.cursor_read::()?; + let mut ommers_cursor = tx.cursor_read::()?; + let mut tx_block_cursor = tx.cursor_read::()?; + + let first_body_key = match bodies_cursor.first()? { + Some((key, _)) => key, + None => return Ok(()), + }; + + let mut prev_number: Option = None; + + + for entry in bodies_cursor.walk(Some(first_body_key))? { + let (number, body) = entry?; + + // Validate sequentiality only after prev progress, + // since the data before is mocked and can contain gaps + if number > prev_progress { + if let Some(prev_key) = prev_number { + assert_eq!(prev_key + 1, number, "Body entries must be sequential"); + } + } + + // Validate that the current entry is below or equals to the highest allowed block + assert!( + number <= highest_block, + "We wrote a block body outside of our synced range. Found block with number {number}, highest block according to stage is {highest_block}", + ); + + let header = static_file_provider.header_by_number(number)?.expect("to be present"); + // Validate that ommers exist if any + let stored_ommers = ommers_cursor.seek_exact(number)?; + if header.ommers_hash_is_empty() { + assert!(stored_ommers.is_none(), "Unexpected ommers entry"); + } else { + assert!(stored_ommers.is_some(), "Missing ommers entry"); + } + + let tx_block_id = tx_block_cursor.seek_exact(body.last_tx_num())?.map(|(_,b)| b); + if body.tx_count == 0 { + assert_ne!(tx_block_id,Some(number)); + } else { + assert_eq!(tx_block_id, Some(number)); + } + + for tx_id in body.tx_num_range() { + assert!(static_file_provider.transaction_by_id(tx_id)?.is_some(), "Transaction is missing."); + } + + prev_number = Some(number); + } + Ok(()) + })?; + Ok(()) + } + + pub(crate) fn take_responses(&mut self) { + self.responses.take(); + } + + pub(crate) fn commit(&self) { + self.db.factory.static_file_provider().commit().unwrap(); + } + } + + #[derive(Clone)] + pub(crate) struct StubResponses(Vec<(Header, BlockBody)>); + + impl EraStreamFactory> for StubResponses { + fn create( + self, + _input: ExecInput, + ) -> Result>, StageError> + { + let stream = stream::iter(vec![self.0]); + + Ok(Box::new(Box::pin(stream.map(|meta| { + Ok(Box::new(meta.into_iter().map(Ok)) + as Item>) + })))) + } + } + } + + stage_test_suite!(EraTestRunner, era); +} diff --git a/crates/stages/stages/src/stages/headers.rs b/crates/stages/stages/src/stages/headers.rs index 15992c232cb..c622e743c14 100644 --- a/crates/stages/stages/src/stages/headers.rs +++ b/crates/stages/stages/src/stages/headers.rs @@ -214,7 +214,6 @@ where let target = SyncTarget::Tip(*self.tip.borrow()); let gap = HeaderSyncGap { local_head, target }; let tip = gap.target.tip(); - self.sync_gap = Some(gap.clone()); // Nothing to sync if gap.is_closed() { @@ -225,6 +224,7 @@ where "Target block already reached" ); self.is_etl_ready = true; + self.sync_gap = Some(gap); return Poll::Ready(Ok(())) } @@ -232,7 +232,10 @@ where let local_head_number = gap.local_head.number(); // let the downloader know what to sync - self.downloader.update_sync_gap(gap.local_head, gap.target); + if self.sync_gap != Some(gap.clone()) { + self.sync_gap = Some(gap.clone()); + self.downloader.update_sync_gap(gap.local_head, gap.target); + } // We only want to stop once we have all the headers on ETL filespace (disk). loop { @@ -263,13 +266,17 @@ where } Some(Err(HeadersDownloaderError::DetachedHead { local_head, header, error })) => { error!(target: "sync::stages::headers", %error, "Cannot attach header to head"); + self.sync_gap = None; return Poll::Ready(Err(StageError::DetachedHead { local_head: Box::new(local_head.block_with_parent()), header: Box::new(header.block_with_parent()), error, })) } - None => return Poll::Ready(Err(StageError::ChannelClosed)), + None => { + self.sync_gap = None; + return Poll::Ready(Err(StageError::ChannelClosed)) + } } } } @@ -279,7 +286,7 @@ where fn execute(&mut self, provider: &Provider, input: ExecInput) -> Result { let current_checkpoint = input.checkpoint(); - if self.sync_gap.as_ref().ok_or(StageError::MissingSyncGap)?.is_closed() { + if self.sync_gap.take().ok_or(StageError::MissingSyncGap)?.is_closed() { self.is_etl_ready = false; return Ok(ExecOutput::done(current_checkpoint)) } diff --git a/crates/stages/stages/src/stages/mod.rs b/crates/stages/stages/src/stages/mod.rs index 8977fa8a10b..e1b952db79f 100644 --- a/crates/stages/stages/src/stages/mod.rs +++ b/crates/stages/stages/src/stages/mod.rs @@ -25,6 +25,7 @@ mod sender_recovery; mod tx_lookup; pub use bodies::*; +pub use era::*; pub use execution::*; pub use finish::*; pub use hashing_account::*; @@ -38,7 +39,9 @@ pub use s3::*; pub use sender_recovery::*; pub use tx_lookup::*; +mod era; mod utils; + use utils::*; #[cfg(test)] @@ -61,7 +64,7 @@ mod tests { }; use reth_ethereum_consensus::EthBeaconConsensus; use reth_ethereum_primitives::Block; - use reth_evm_ethereum::execute::EthExecutorProvider; + use reth_evm_ethereum::EthEvmConfig; use reth_exex::ExExManagerHandle; use reth_primitives_traits::{Account, Bytecode, SealedBlock}; use reth_provider::{ @@ -154,7 +157,7 @@ mod tests { // Check execution and create receipts and changesets according to the pruning // configuration let mut execution_stage = ExecutionStage::new( - EthExecutorProvider::ethereum(Arc::new( + EthEvmConfig::ethereum(Arc::new( ChainSpecBuilder::mainnet().berlin_activated().build(), )), Arc::new(EthBeaconConsensus::new(Arc::new( diff --git a/crates/stages/stages/src/stages/s3/mod.rs b/crates/stages/stages/src/stages/s3/mod.rs index 1c656cc1ecf..b5904f1c2ca 100644 --- a/crates/stages/stages/src/stages/s3/mod.rs +++ b/crates/stages/stages/src/stages/s3/mod.rs @@ -224,6 +224,7 @@ mod tests { // stage_test_suite_ext!(S3TestRunner, s3); #[derive(Default)] + #[allow(unused)] struct S3TestRunner { db: TestStageDB, } diff --git a/crates/stages/types/src/id.rs b/crates/stages/types/src/id.rs index e1d466eff32..86dd9ced5c7 100644 --- a/crates/stages/types/src/id.rs +++ b/crates/stages/types/src/id.rs @@ -8,6 +8,7 @@ pub enum StageId { note = "Static Files are generated outside of the pipeline and do not require a separate stage" )] StaticFile, + Era, Headers, Bodies, SenderRecovery, @@ -28,7 +29,8 @@ pub enum StageId { impl StageId { /// All supported Stages - pub const ALL: [Self; 14] = [ + pub const ALL: [Self; 15] = [ + Self::Era, Self::Headers, Self::Bodies, Self::SenderRecovery, @@ -63,6 +65,7 @@ impl StageId { match self { #[expect(deprecated)] Self::StaticFile => "StaticFile", + Self::Era => "Era", Self::Headers => "Headers", Self::Bodies => "Bodies", Self::SenderRecovery => "SenderRecovery", @@ -83,7 +86,7 @@ impl StageId { /// Returns true if it's a downloading stage [`StageId::Headers`] or [`StageId::Bodies`] pub const fn is_downloading_stage(&self) -> bool { - matches!(self, Self::Headers | Self::Bodies) + matches!(self, Self::Era | Self::Headers | Self::Bodies) } /// Returns `true` if it's [`TransactionLookup`](StageId::TransactionLookup) stage. @@ -109,6 +112,7 @@ mod tests { #[test] fn stage_id_as_string() { + assert_eq!(StageId::Era.to_string(), "Era"); assert_eq!(StageId::Headers.to_string(), "Headers"); assert_eq!(StageId::Bodies.to_string(), "Bodies"); assert_eq!(StageId::SenderRecovery.to_string(), "SenderRecovery"); @@ -129,14 +133,8 @@ mod tests { fn is_downloading_stage() { assert!(StageId::Headers.is_downloading_stage()); assert!(StageId::Bodies.is_downloading_stage()); + assert!(StageId::Era.is_downloading_stage()); assert!(!StageId::Execution.is_downloading_stage()); } - - // Multiple places around the codebase assume headers is the first stage. - // Feel free to remove this test if the assumption changes. - #[test] - fn stage_all_headers_first() { - assert_eq!(*StageId::ALL.first().unwrap(), StageId::Headers); - } } diff --git a/crates/storage/codecs/src/alloy/transaction/ethereum.rs b/crates/storage/codecs/src/alloy/transaction/ethereum.rs index 14d51b866fb..799fcf7861e 100644 --- a/crates/storage/codecs/src/alloy/transaction/ethereum.rs +++ b/crates/storage/codecs/src/alloy/transaction/ethereum.rs @@ -112,7 +112,8 @@ impl Envelope } } -pub(super) trait CompactEnvelope: Sized { +/// Wraps the [`Compact`] trait. +pub trait CompactEnvelope: Sized { /// Takes a buffer which can be written to. *Ideally*, it returns the length written to. fn to_compact(&self, buf: &mut B) -> usize where diff --git a/crates/storage/codecs/src/alloy/transaction/mod.rs b/crates/storage/codecs/src/alloy/transaction/mod.rs index 47881b6f87a..f841ff24f17 100644 --- a/crates/storage/codecs/src/alloy/transaction/mod.rs +++ b/crates/storage/codecs/src/alloy/transaction/mod.rs @@ -56,7 +56,7 @@ where cond_mod!(eip1559, eip2930, eip4844, eip7702, legacy, txtype); mod ethereum; -pub use ethereum::{Envelope, FromTxCompact, ToTxCompact}; +pub use ethereum::{CompactEnvelope, Envelope, FromTxCompact, ToTxCompact}; #[cfg(all(feature = "test-utils", feature = "op"))] pub mod optimism; diff --git a/crates/storage/db-api/src/tables/mod.rs b/crates/storage/db-api/src/tables/mod.rs index 331ab41ed13..63a586b1ef1 100644 --- a/crates/storage/db-api/src/tables/mod.rs +++ b/crates/storage/db-api/src/tables/mod.rs @@ -405,8 +405,7 @@ tables! { /// the shard that equal or more than asked. For example: /// * For N=50 we would get first shard. /// * for N=150 we would get second shard. - /// * If max block number is 200 and we ask for N=250 we would fetch last shard and - /// know that needed entry is in `AccountPlainState`. + /// * If max block number is 200 and we ask for N=250 we would fetch last shard and know that needed entry is in `AccountPlainState`. /// * If there were no shard we would get `None` entry or entry of different storage key. /// /// Code example can be found in `reth_provider::HistoricalStateProviderRef` @@ -428,8 +427,7 @@ tables! { /// the shard that equal or more than asked. For example: /// * For N=50 we would get first shard. /// * for N=150 we would get second shard. - /// * If max block number is 200 and we ask for N=250 we would fetch last shard and - /// know that needed entry is in `StoragePlainState`. + /// * If max block number is 200 and we ask for N=250 we would fetch last shard and know that needed entry is in `StoragePlainState`. /// * If there were no shard we would get `None` entry or entry of different storage key. /// /// Code example can be found in `reth_provider::HistoricalStateProviderRef` diff --git a/crates/storage/errors/src/provider.rs b/crates/storage/errors/src/provider.rs index d72032e322b..c27587690ba 100644 --- a/crates/storage/errors/src/provider.rs +++ b/crates/storage/errors/src/provider.rs @@ -134,6 +134,9 @@ pub enum ProviderError { /// Received invalid output from configured storage implementation. #[error("received invalid output from storage")] InvalidStorageOutput, + /// Missing trie updates. + #[error("missing trie updates for block {0}")] + MissingTrieUpdates(B256), /// Any other error type wrapped into a cloneable [`AnyError`]. #[error(transparent)] Other(#[from] AnyError), diff --git a/crates/storage/libmdbx-rs/README.md b/crates/storage/libmdbx-rs/README.md index 0ead0242b8f..df115ee69a0 100644 --- a/crates/storage/libmdbx-rs/README.md +++ b/crates/storage/libmdbx-rs/README.md @@ -12,8 +12,8 @@ To update the libmdbx version you must clone it and copy the `dist/` folder in ` Make sure to follow the [building steps](https://libmdbx.dqdkfa.ru/usage.html#getting). ```bash -# clone libmmdbx to a repository outside at specific tag -git clone https://gitflic.ru/project/erthink/libmdbx.git ../libmdbx --branch v0.7.0 +# clone libmdbx to a repository outside at specific tag +git clone https://github.com/erthink/libmdbx.git ../libmdbx --branch v0.7.0 make -C ../libmdbx dist # copy the `libmdbx/dist/` folder just created into `mdbx-sys/libmdbx` diff --git a/crates/storage/libmdbx-rs/src/lib.rs b/crates/storage/libmdbx-rs/src/lib.rs index 4f2cc7c5448..300dfa60c70 100644 --- a/crates/storage/libmdbx-rs/src/lib.rs +++ b/crates/storage/libmdbx-rs/src/lib.rs @@ -43,7 +43,7 @@ mod test_utils { use tempfile::tempdir; /// Regression test for . - /// This test reliably segfaults when run against lmbdb compiled with opt level -O3 and newer + /// This test reliably segfaults when run against lmdb compiled with opt level -O3 and newer /// GCC compilers. #[test] fn issue_21_regression() { diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index bb8fd423dc7..88cb18ac445 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -7,7 +7,7 @@ use crate::{ DatabaseProviderFactory, FullProvider, HashedPostStateProvider, HeaderProvider, ProviderError, ProviderFactory, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, StateProviderBox, StateProviderFactory, StateReader, - StaticFileProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, + StaticFileProviderFactory, TransactionVariant, TransactionsProvider, }; use alloy_consensus::{transaction::TransactionMeta, Header}; use alloy_eips::{ @@ -36,8 +36,8 @@ use reth_primitives_traits::{ use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{ - BlockBodyIndicesProvider, DBProvider, NodePrimitivesProvider, OmmersProvider, - StateCommitmentProvider, StorageChangeSetReader, + BlockBodyIndicesProvider, DBProvider, NodePrimitivesProvider, StateCommitmentProvider, + StorageChangeSetReader, }; use reth_storage_errors::provider::ProviderResult; use reth_trie::HashedPostState; @@ -295,11 +295,7 @@ impl BlockReader for BlockchainProvider { self.consistent_provider()?.block(id) } - fn pending_block(&self) -> ProviderResult>> { - Ok(self.canonical_in_memory_state.pending_block()) - } - - fn pending_block_with_senders(&self) -> ProviderResult>> { + fn pending_block(&self) -> ProviderResult>> { Ok(self.canonical_in_memory_state.pending_recovered_block()) } @@ -455,22 +451,6 @@ impl ReceiptProviderIdExt for BlockchainProvider { } } -impl WithdrawalsProvider for BlockchainProvider { - fn withdrawals_by_block( - &self, - id: BlockHashOrNumber, - timestamp: u64, - ) -> ProviderResult> { - self.consistent_provider()?.withdrawals_by_block(id, timestamp) - } -} - -impl OmmersProvider for BlockchainProvider { - fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { - self.consistent_provider()?.ommers(id) - } -} - impl BlockBodyIndicesProvider for BlockchainProvider { fn block_body_indices( &self, @@ -696,10 +676,6 @@ where fn header_by_id(&self, id: BlockId) -> ProviderResult> { self.consistent_provider()?.header_by_id(id) } - - fn ommers_by_id(&self, id: BlockId) -> ProviderResult>> { - self.consistent_provider()?.ommers_by_id(id) - } } impl CanonStateSubscriptions for BlockchainProvider { @@ -779,13 +755,14 @@ mod tests { BlockWriter, CanonChainTracker, ProviderFactory, StaticFileProviderFactory, StaticFileWriter, }; - use alloy_eips::{eip4895::Withdrawals, BlockHashOrNumber, BlockNumHash, BlockNumberOrTag}; + use alloy_eips::{BlockHashOrNumber, BlockNumHash, BlockNumberOrTag}; use alloy_primitives::{BlockNumber, TxNumber, B256}; use itertools::Itertools; use rand::Rng; use reth_chain_state::{ test_utils::TestBlockBuilder, CanonStateNotification, CanonStateSubscriptions, - CanonicalInMemoryState, ExecutedBlock, ExecutedBlockWithTrieUpdates, NewCanonicalChain, + CanonicalInMemoryState, ExecutedBlock, ExecutedBlockWithTrieUpdates, ExecutedTrieUpdates, + NewCanonicalChain, }; use reth_chainspec::{ ChainSpec, ChainSpecBuilder, ChainSpecProvider, EthereumHardfork, MAINNET, @@ -806,8 +783,8 @@ mod tests { use reth_storage_api::{ BlockBodyIndicesProvider, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, BlockSource, ChangeSetReader, DatabaseProviderFactory, HeaderProvider, - OmmersProvider, ReceiptProvider, ReceiptProviderIdExt, StateProviderFactory, - TransactionVariant, TransactionsProvider, WithdrawalsProvider, + ReceiptProvider, ReceiptProviderIdExt, StateProviderFactory, TransactionVariant, + TransactionsProvider, }; use reth_testing_utils::generators::{ self, random_block, random_block_range, random_changeset_range, random_eoa_accounts, @@ -936,7 +913,7 @@ mod tests { Arc::new(RecoveredBlock::new_sealed(block.clone(), senders)), execution_outcome.into(), Default::default(), - Default::default(), + ExecutedTrieUpdates::empty(), ) }) .collect(), @@ -1068,7 +1045,7 @@ mod tests { )), Default::default(), Default::default(), - Default::default(), + ExecutedTrieUpdates::empty(), )], }; provider.canonical_in_memory_state.update_chain(chain); @@ -1106,7 +1083,7 @@ mod tests { execution_output: Default::default(), hashed_state: Default::default(), }, - trie: Default::default(), + trie: ExecutedTrieUpdates::empty(), }); // Now the last block should be found in memory @@ -1164,7 +1141,7 @@ mod tests { )), Default::default(), Default::default(), - Default::default(), + ExecutedTrieUpdates::empty(), )], }; provider.canonical_in_memory_state.update_chain(chain); @@ -1220,14 +1197,13 @@ mod tests { execution_output: Default::default(), hashed_state: Default::default(), }, - trie: Default::default(), + trie: ExecutedTrieUpdates::empty(), }); // Assertions related to the pending block - assert_eq!(provider.pending_block()?, Some(block.clone())); assert_eq!( - provider.pending_block_with_senders()?, + provider.pending_block()?, Some(RecoveredBlock::new_sealed(block.clone(), block.senders().unwrap())) ); @@ -1236,43 +1212,6 @@ mod tests { Ok(()) } - #[test] - fn test_block_reader_ommers() -> eyre::Result<()> { - // Create a new provider - let mut rng = generators::rng(); - let (provider, _, in_memory_blocks, _) = provider_with_random_blocks( - &mut rng, - TEST_BLOCKS_COUNT, - TEST_BLOCKS_COUNT, - BlockRangeParams::default(), - )?; - - let first_in_mem_block = in_memory_blocks.first().unwrap(); - - // If the block is after the Merge, we should have an empty ommers list - assert_eq!( - provider.ommers( - (provider.chain_spec().paris_block_and_final_difficulty.unwrap().0 + 2).into() - )?, - Some(vec![]) - ); - - // First in memory block ommers should be found - assert_eq!( - provider.ommers(first_in_mem_block.number.into())?, - Some(first_in_mem_block.body().ommers.clone()) - ); - assert_eq!( - provider.ommers(first_in_mem_block.hash().into())?, - Some(first_in_mem_block.body().ommers.clone()) - ); - - // A random hash should return None as the block number is not found - assert_eq!(provider.ommers(B256::random().into())?, None); - - Ok(()) - } - #[test] fn test_block_body_indices() -> eyre::Result<()> { // Create a new provider @@ -1300,7 +1239,7 @@ mod tests { )), Default::default(), Default::default(), - Default::default(), + ExecutedTrieUpdates::empty(), )], }; provider.canonical_in_memory_state.update_chain(chain); @@ -1441,50 +1380,6 @@ mod tests { Ok(()) } - #[test] - fn test_withdrawals_provider() -> eyre::Result<()> { - let mut rng = generators::rng(); - let chain_spec = Arc::new(ChainSpecBuilder::mainnet().shanghai_activated().build()); - let (provider, database_blocks, in_memory_blocks, _) = - provider_with_chain_spec_and_random_blocks( - &mut rng, - chain_spec.clone(), - TEST_BLOCKS_COUNT, - TEST_BLOCKS_COUNT, - BlockRangeParams { withdrawals_count: Some(1..3), ..Default::default() }, - )?; - let blocks = [database_blocks, in_memory_blocks].concat(); - - let shainghai_timestamp = - chain_spec.hardforks.fork(EthereumHardfork::Shanghai).as_timestamp().unwrap(); - - assert_eq!( - provider - .withdrawals_by_block( - alloy_eips::BlockHashOrNumber::Number(15), - shainghai_timestamp - ) - .expect("could not call withdrawals by block"), - Some(Withdrawals::new(vec![])), - "Expected withdrawals_by_block to return empty list if block does not exist" - ); - - for block in blocks { - assert_eq!( - provider - .withdrawals_by_block( - alloy_eips::BlockHashOrNumber::Number(block.number), - shainghai_timestamp - )? - .unwrap(), - block.body().withdrawals.clone().unwrap(), - "Expected withdrawals_by_block to return correct withdrawals" - ); - } - - Ok(()) - } - #[test] fn test_block_num_reader() -> eyre::Result<()> { let mut rng = generators::rng(); @@ -1660,46 +1555,6 @@ mod tests { Ok(()) } - #[test] - fn test_block_reader_id_ext_ommers_by_id() -> eyre::Result<()> { - let mut rng = generators::rng(); - let (provider, database_blocks, in_memory_blocks, _) = provider_with_random_blocks( - &mut rng, - TEST_BLOCKS_COUNT, - TEST_BLOCKS_COUNT, - BlockRangeParams::default(), - )?; - - let database_block = database_blocks.first().unwrap().clone(); - let in_memory_block = in_memory_blocks.last().unwrap().clone(); - - let block_number = database_block.number; - let block_hash = database_block.hash(); - - assert_eq!( - provider.ommers_by_id(block_number.into()).unwrap().unwrap_or_default(), - database_block.body().ommers - ); - assert_eq!( - provider.ommers_by_id(block_hash.into()).unwrap().unwrap_or_default(), - database_block.body().ommers - ); - - let block_number = in_memory_block.number; - let block_hash = in_memory_block.hash(); - - assert_eq!( - provider.ommers_by_id(block_number.into()).unwrap().unwrap_or_default(), - in_memory_block.body().ommers - ); - assert_eq!( - provider.ommers_by_id(block_hash.into()).unwrap().unwrap_or_default(), - in_memory_block.body().ommers - ); - - Ok(()) - } - #[test] fn test_receipt_provider_id_ext_receipts_by_block_id() -> eyre::Result<()> { let mut rng = generators::rng(); @@ -1874,7 +1729,7 @@ mod tests { ..Default::default() }), Default::default(), - Default::default(), + ExecutedTrieUpdates::empty(), ) }) .unwrap()], @@ -2003,7 +1858,7 @@ mod tests { execution_output: Default::default(), hashed_state: Default::default(), }, - trie: Default::default(), + trie: ExecutedTrieUpdates::empty(), }, ); @@ -2100,7 +1955,7 @@ mod tests { execution_output: Default::default(), hashed_state: Default::default(), }, - trie: Default::default(), + trie: ExecutedTrieUpdates::empty(), }); // Set the safe block in memory diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index ced92579471..b4fcfa6c7ff 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -4,19 +4,19 @@ use crate::{ BlockReader, BlockReaderIdExt, BlockSource, ChainSpecProvider, ChangeSetReader, HeaderProvider, ProviderError, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, StateReader, StaticFileProviderFactory, TransactionVariant, - TransactionsProvider, WithdrawalsProvider, + TransactionsProvider, }; use alloy_consensus::{transaction::TransactionMeta, BlockHeader}; use alloy_eips::{ - eip2718::Encodable2718, eip4895::Withdrawals, BlockHashOrNumber, BlockId, BlockNumHash, - BlockNumberOrTag, HashOrNumber, + eip2718::Encodable2718, BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, + HashOrNumber, }; use alloy_primitives::{ map::{hash_map, HashMap}, Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256, }; use reth_chain_state::{BlockState, CanonicalInMemoryState, MemoryOverlayStateProviderRef}; -use reth_chainspec::{ChainInfo, EthereumHardforks}; +use reth_chainspec::ChainInfo; use reth_db_api::models::{AccountBeforeTx, BlockNumberAddress, StoredBlockBodyIndices}; use reth_execution_types::{BundleStateInit, ExecutionOutcome, RevertsInit}; use reth_node_types::{BlockTy, HeaderTy, ReceiptTy, TxTy}; @@ -26,8 +26,8 @@ use reth_primitives_traits::{ use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{ - BlockBodyIndicesProvider, DatabaseProviderFactory, NodePrimitivesProvider, OmmersProvider, - StateProvider, StorageChangeSetReader, + BlockBodyIndicesProvider, DatabaseProviderFactory, NodePrimitivesProvider, StateProvider, + StorageChangeSetReader, }; use reth_storage_errors::provider::ProviderResult; use revm_database::states::PlainStorageRevert; @@ -825,11 +825,7 @@ impl BlockReader for ConsistentProvider { ) } - fn pending_block(&self) -> ProviderResult>> { - Ok(self.canonical_in_memory_state.pending_block()) - } - - fn pending_block_with_senders(&self) -> ProviderResult>> { + fn pending_block(&self) -> ProviderResult>> { Ok(self.canonical_in_memory_state.pending_recovered_block()) } @@ -1145,42 +1141,6 @@ impl ReceiptProviderIdExt for ConsistentProvider { } } -impl WithdrawalsProvider for ConsistentProvider { - fn withdrawals_by_block( - &self, - id: BlockHashOrNumber, - timestamp: u64, - ) -> ProviderResult> { - if !self.chain_spec().is_shanghai_active_at_timestamp(timestamp) { - return Ok(None) - } - - self.get_in_memory_or_storage_by_block( - id, - |db_provider| db_provider.withdrawals_by_block(id, timestamp), - |block_state| { - Ok(block_state.block_ref().recovered_block().body().withdrawals().cloned()) - }, - ) - } -} - -impl OmmersProvider for ConsistentProvider { - fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>>> { - self.get_in_memory_or_storage_by_block( - id, - |db_provider| db_provider.ommers(id), - |block_state| { - if self.chain_spec().is_paris_active_at_block(block_state.number()) { - return Ok(Some(Vec::new())) - } - - Ok(block_state.block_ref().recovered_block().body().ommers().map(|o| o.to_vec())) - }, - ) - } -} - impl BlockBodyIndicesProvider for ConsistentProvider { fn block_body_indices( &self, @@ -1335,17 +1295,6 @@ impl BlockReaderIdExt for ConsistentProvider { BlockId::Hash(hash) => self.header(&hash.block_hash)?, }) } - - fn ommers_by_id(&self, id: BlockId) -> ProviderResult>>> { - match id { - BlockId::Number(num) => self.ommers_by_number_or_tag(num), - BlockId::Hash(hash) => { - // TODO: EIP-1898 question, see above - // here it is not handled - self.ommers(BlockHashOrNumber::Hash(hash.block_hash)) - } - } - } } impl StorageChangeSetReader for ConsistentProvider { @@ -1491,7 +1440,9 @@ mod tests { use alloy_primitives::B256; use itertools::Itertools; use rand::Rng; - use reth_chain_state::{ExecutedBlock, ExecutedBlockWithTrieUpdates, NewCanonicalChain}; + use reth_chain_state::{ + ExecutedBlock, ExecutedBlockWithTrieUpdates, ExecutedTrieUpdates, NewCanonicalChain, + }; use reth_db_api::models::AccountBeforeTx; use reth_ethereum_primitives::Block; use reth_execution_types::ExecutionOutcome; @@ -1601,7 +1552,7 @@ mod tests { )), Default::default(), Default::default(), - Default::default(), + ExecutedTrieUpdates::empty(), )], }; consistent_provider.canonical_in_memory_state.update_chain(chain); @@ -1645,7 +1596,7 @@ mod tests { execution_output: Default::default(), hashed_state: Default::default(), }, - trie: Default::default(), + trie: ExecutedTrieUpdates::empty(), }); // Now the last block should be found in memory @@ -1711,7 +1662,7 @@ mod tests { )), Default::default(), Default::default(), - Default::default(), + ExecutedTrieUpdates::empty(), )], }; consistent_provider.canonical_in_memory_state.update_chain(chain); @@ -1826,7 +1777,7 @@ mod tests { ..Default::default() }), Default::default(), - Default::default(), + ExecutedTrieUpdates::empty(), ) }) .unwrap()], diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 64e14e6dee4..1801c148ecb 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -5,10 +5,10 @@ use crate::{ BlockHashReader, BlockNumReader, BlockReader, ChainSpecProvider, DatabaseProviderFactory, HashedPostStateProvider, HeaderProvider, HeaderSyncGapProvider, ProviderError, PruneCheckpointReader, StageCheckpointReader, StateProviderBox, StaticFileProviderFactory, - TransactionVariant, TransactionsProvider, WithdrawalsProvider, + TransactionVariant, TransactionsProvider, }; use alloy_consensus::transaction::TransactionMeta; -use alloy_eips::{eip4895::Withdrawals, BlockHashOrNumber}; +use alloy_eips::BlockHashOrNumber; use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; use core::fmt; use reth_chainspec::ChainInfo; @@ -23,7 +23,7 @@ use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_static_file_types::StaticFileSegment; use reth_storage_api::{ - BlockBodyIndicesProvider, NodePrimitivesProvider, OmmersProvider, StateCommitmentProvider, + BlockBodyIndicesProvider, NodePrimitivesProvider, StateCommitmentProvider, TryIntoHistoricalStateProvider, }; use reth_storage_errors::provider::ProviderResult; @@ -365,14 +365,10 @@ impl BlockReader for ProviderFactory { self.provider()?.block(id) } - fn pending_block(&self) -> ProviderResult>> { + fn pending_block(&self) -> ProviderResult>> { self.provider()?.pending_block() } - fn pending_block_with_senders(&self) -> ProviderResult>> { - self.provider()?.pending_block_with_senders() - } - fn pending_block_and_receipts( &self, ) -> ProviderResult, Vec)>> { @@ -533,22 +529,6 @@ impl ReceiptProvider for ProviderFactory { } } -impl WithdrawalsProvider for ProviderFactory { - fn withdrawals_by_block( - &self, - id: BlockHashOrNumber, - timestamp: u64, - ) -> ProviderResult> { - self.provider()?.withdrawals_by_block(id, timestamp) - } -} - -impl OmmersProvider for ProviderFactory { - fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { - self.provider()?.ommers(id) - } -} - impl BlockBodyIndicesProvider for ProviderFactory { fn block_body_indices( &self, diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index a964eb6edfb..0d61db4f27c 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -17,13 +17,12 @@ use crate::{ StageCheckpointReader, StateCommitmentProvider, StateProviderBox, StateWriter, StaticFileProviderFactory, StatsReader, StorageLocation, StorageReader, StorageTrieWriter, TransactionVariant, TransactionsProvider, TransactionsProviderExt, TrieWriter, - WithdrawalsProvider, }; use alloy_consensus::{ transaction::{SignerRecoverable, TransactionMeta}, BlockHeader, Header, TxReceipt, }; -use alloy_eips::{eip2718::Encodable2718, eip4895::Withdrawals, BlockHashOrNumber}; +use alloy_eips::{eip2718::Encodable2718, BlockHashOrNumber}; use alloy_primitives::{ keccak256, map::{hash_map, B256Map, HashMap, HashSet}, @@ -56,8 +55,8 @@ use reth_prune_types::{ use reth_stages_types::{StageCheckpoint, StageId}; use reth_static_file_types::StaticFileSegment; use reth_storage_api::{ - BlockBodyIndicesProvider, BlockBodyReader, NodePrimitivesProvider, OmmersProvider, - StateProvider, StorageChangeSetReader, TryIntoHistoricalStateProvider, + BlockBodyIndicesProvider, BlockBodyReader, NodePrimitivesProvider, StateProvider, + StorageChangeSetReader, TryIntoHistoricalStateProvider, }; use reth_storage_errors::provider::{ProviderResult, RootMismatch}; use reth_trie::{ @@ -1175,12 +1174,7 @@ impl BlockReader for DatabaseProvid Ok(None) } - - fn pending_block(&self) -> ProviderResult>> { - Ok(None) - } - - fn pending_block_with_senders(&self) -> ProviderResult>> { + fn pending_block(&self) -> ProviderResult>> { Ok(None) } @@ -1602,62 +1596,6 @@ impl ReceiptProvider for DatabasePr } } -impl> WithdrawalsProvider - for DatabaseProvider -{ - fn withdrawals_by_block( - &self, - id: BlockHashOrNumber, - timestamp: u64, - ) -> ProviderResult> { - if self.chain_spec.is_shanghai_active_at_timestamp(timestamp) { - if let Some(number) = self.convert_hash_or_number(id)? { - return self.static_file_provider.get_with_static_file_or_database( - StaticFileSegment::BlockMeta, - number, - |static_file| static_file.withdrawals_by_block(number.into(), timestamp), - || { - // If we are past shanghai, then all blocks should have a withdrawal list, - // even if empty - let withdrawals = self - .tx - .get::(number) - .map(|w| w.map(|w| w.withdrawals))? - .unwrap_or_default(); - Ok(Some(withdrawals)) - }, - ) - } - } - Ok(None) - } -} - -impl OmmersProvider for DatabaseProvider { - /// Returns the ommers for the block with matching id from the database. - /// - /// If the block is not found, this returns `None`. - /// If the block exists, but doesn't contain ommers, this returns `None`. - fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { - if let Some(number) = self.convert_hash_or_number(id)? { - // If the Paris (Merge) hardfork block is known and block is after it, return empty - // ommers. - if self.chain_spec.is_paris_active_at_block(number) { - return Ok(Some(Vec::new())) - } - - return self.static_file_provider.get_with_static_file_or_database( - StaticFileSegment::BlockMeta, - number, - |static_file| static_file.ommers(id), - || Ok(self.tx.get::>(number)?.map(|o| o.ommers)), - ) - } - - Ok(None) - } -} - impl BlockBodyIndicesProvider for DatabaseProvider { @@ -2996,13 +2934,6 @@ impl BlockWrite // Increment transaction id for each transaction. next_tx_num += 1; } - - debug!( - target: "providers::db", - ?block_number, - actions = ?durations_recorder.actions, - "Inserted block body" - ); } self.storage.writer().write_block_bodies(self, bodies, write_to)?; diff --git a/crates/storage/provider/src/providers/static_file/jar.rs b/crates/storage/provider/src/providers/static_file/jar.rs index e1b5e0e2196..365e54467ac 100644 --- a/crates/storage/provider/src/providers/static_file/jar.rs +++ b/crates/storage/provider/src/providers/static_file/jar.rs @@ -7,20 +7,20 @@ use crate::{ TransactionsProvider, }; use alloy_consensus::transaction::{SignerRecoverable, TransactionMeta}; -use alloy_eips::{eip2718::Encodable2718, eip4895::Withdrawals, BlockHashOrNumber}; +use alloy_eips::{eip2718::Encodable2718, BlockHashOrNumber}; use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; use reth_chainspec::ChainInfo; use reth_db::static_file::{ - BlockHashMask, BodyIndicesMask, HeaderMask, HeaderWithHashMask, OmmersMask, ReceiptMask, - StaticFileCursor, TDWithHashMask, TotalDifficultyMask, TransactionMask, WithdrawalsMask, + BlockHashMask, BodyIndicesMask, HeaderMask, HeaderWithHashMask, ReceiptMask, StaticFileCursor, + TDWithHashMask, TotalDifficultyMask, TransactionMask, }; use reth_db_api::{ models::StoredBlockBodyIndices, table::{Decompress, Value}, }; -use reth_node_types::{FullNodePrimitives, NodePrimitives}; +use reth_node_types::NodePrimitives; use reth_primitives_traits::{SealedHeader, SignedTransaction}; -use reth_storage_api::{BlockBodyIndicesProvider, OmmersProvider, WithdrawalsProvider}; +use reth_storage_api::BlockBodyIndicesProvider; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::{ fmt::Debug, @@ -361,36 +361,6 @@ impl WithdrawalsProvider for StaticFileJarProvider<'_, N> { - fn withdrawals_by_block( - &self, - id: BlockHashOrNumber, - _: u64, - ) -> ProviderResult> { - if let Some(num) = id.as_number() { - return Ok(self - .cursor()? - .get_one::(num.into())? - .and_then(|s| s.withdrawals)) - } - // Only accepts block number queries - Err(ProviderError::UnsupportedProvider) - } -} - -impl> OmmersProvider for StaticFileJarProvider<'_, N> { - fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { - if let Some(num) = id.as_number() { - return Ok(self - .cursor()? - .get_one::>(num.into())? - .map(|s| s.ommers)) - } - // Only accepts block number queries - Err(ProviderError::UnsupportedProvider) - } -} - impl BlockBodyIndicesProvider for StaticFileJarProvider<'_, N> { fn block_body_indices(&self, num: u64) -> ProviderResult> { self.cursor()?.get_one::(num.into()) diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index 2e3420b0c02..a3ab41325f8 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -5,13 +5,13 @@ use super::{ use crate::{ to_range, BlockHashReader, BlockNumReader, BlockReader, BlockSource, HeaderProvider, ReceiptProvider, StageCheckpointReader, StatsReader, TransactionVariant, TransactionsProvider, - TransactionsProviderExt, WithdrawalsProvider, + TransactionsProviderExt, }; use alloy_consensus::{ transaction::{SignerRecoverable, TransactionMeta}, Header, }; -use alloy_eips::{eip2718::Encodable2718, eip4895::Withdrawals, BlockHashOrNumber}; +use alloy_eips::{eip2718::Encodable2718, BlockHashOrNumber}; use alloy_primitives::{ b256, keccak256, Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256, }; @@ -42,7 +42,7 @@ use reth_static_file_types::{ find_fixed_range, HighestStaticFiles, SegmentHeader, SegmentRangeInclusive, StaticFileSegment, DEFAULT_BLOCKS_PER_STATIC_FILE, }; -use reth_storage_api::{BlockBodyIndicesProvider, DBProvider, OmmersProvider}; +use reth_storage_api::{BlockBodyIndicesProvider, DBProvider}; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::{ collections::{hash_map::Entry, BTreeMap, HashMap}, @@ -1647,12 +1647,7 @@ impl> Err(ProviderError::UnsupportedProvider) } - fn pending_block(&self) -> ProviderResult>> { - // Required data not present in static_files - Err(ProviderError::UnsupportedProvider) - } - - fn pending_block_with_senders(&self) -> ProviderResult>> { + fn pending_block(&self) -> ProviderResult>> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } @@ -1702,48 +1697,6 @@ impl> } } -impl WithdrawalsProvider for StaticFileProvider { - fn withdrawals_by_block( - &self, - id: BlockHashOrNumber, - timestamp: u64, - ) -> ProviderResult> { - if let Some(num) = id.as_number() { - return self - .get_segment_provider_from_block(StaticFileSegment::BlockMeta, num, None) - .and_then(|provider| provider.withdrawals_by_block(id, timestamp)) - .or_else(|err| { - if let ProviderError::MissingStaticFileBlock(_, _) = err { - Ok(None) - } else { - Err(err) - } - }) - } - // Only accepts block number queries - Err(ProviderError::UnsupportedProvider) - } -} - -impl> OmmersProvider for StaticFileProvider { - fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { - if let Some(num) = id.as_number() { - return self - .get_segment_provider_from_block(StaticFileSegment::BlockMeta, num, None) - .and_then(|provider| provider.ommers(id)) - .or_else(|err| { - if let ProviderError::MissingStaticFileBlock(_, _) = err { - Ok(None) - } else { - Err(err) - } - }) - } - // Only accepts block number queries - Err(ProviderError::UnsupportedProvider) - } -} - impl BlockBodyIndicesProvider for StaticFileProvider { fn block_body_indices(&self, num: u64) -> ProviderResult> { self.get_segment_provider_from_block(StaticFileSegment::BlockMeta, num, None) diff --git a/crates/storage/provider/src/providers/static_file/writer.rs b/crates/storage/provider/src/providers/static_file/writer.rs index 3781eff6621..3fd2828faad 100644 --- a/crates/storage/provider/src/providers/static_file/writer.rs +++ b/crates/storage/provider/src/providers/static_file/writer.rs @@ -359,18 +359,22 @@ impl StaticFileProviderRW { Ok(()) } - /// Verifies if the incoming block number matches the next expected block number - /// for a static file. This ensures data continuity when adding new blocks. - fn check_next_block_number(&self, expected_block_number: u64) -> ProviderResult<()> { + /// Returns a block number that is one next to the current tip of static files. + pub fn next_block_number(&self) -> u64 { // The next static file block number can be found by checking the one after block_end. - // However if it's a new file that hasn't been added any data, its block range will actually - // be None. In that case, the next block will be found on `expected_block_start`. - let next_static_file_block = self - .writer + // However, if it's a new file that hasn't been added any data, its block range will + // actually be None. In that case, the next block will be found on `expected_block_start`. + self.writer .user_header() .block_end() .map(|b| b + 1) - .unwrap_or_else(|| self.writer.user_header().expected_block_start()); + .unwrap_or_else(|| self.writer.user_header().expected_block_start()) + } + + /// Verifies if the incoming block number matches the next expected block number + /// for a static file. This ensures data continuity when adding new blocks. + fn check_next_block_number(&self, expected_block_number: u64) -> ProviderResult<()> { + let next_static_file_block = self.next_block_number(); if expected_block_number != next_static_file_block { return Err(ProviderError::UnexpectedStaticFileBlockNumber( diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 28d87586083..6480e4d9253 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -3,10 +3,10 @@ use crate::{ AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, ChainSpecProvider, ChangeSetReader, EthStorage, HeaderProvider, ReceiptProviderIdExt, StateProvider, StateProviderBox, StateProviderFactory, StateReader, StateRootProvider, - TransactionVariant, TransactionsProvider, WithdrawalsProvider, + TransactionVariant, TransactionsProvider, }; use alloy_consensus::{constants::EMPTY_ROOT_HASH, transaction::TransactionMeta, Header}; -use alloy_eips::{eip4895::Withdrawals, BlockHashOrNumber, BlockId, BlockNumberOrTag}; +use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag}; use alloy_primitives::{ keccak256, map::HashMap, Address, BlockHash, BlockNumber, Bytes, StorageKey, StorageValue, TxHash, TxNumber, B256, U256, @@ -30,8 +30,8 @@ use reth_prune_types::PruneModes; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{ BlockBodyIndicesProvider, DBProvider, DatabaseProviderFactory, HashedPostStateProvider, - NodePrimitivesProvider, OmmersProvider, StageCheckpointReader, StateCommitmentProvider, - StateProofProvider, StorageRootProvider, + NodePrimitivesProvider, StageCheckpointReader, StateCommitmentProvider, StateProofProvider, + StorageRootProvider, }; use reth_storage_errors::provider::{ConsistentViewError, ProviderError, ProviderResult}; use reth_trie::{ @@ -57,6 +57,8 @@ pub struct MockEthProvider< pub blocks: Arc>>, /// Local header store pub headers: Arc>>, + /// Local receipt store indexed by block number + pub receipts: Arc>>>, /// Local account store pub accounts: Arc>>, /// Local chain spec @@ -75,6 +77,7 @@ where Self { blocks: self.blocks.clone(), headers: self.headers.clone(), + receipts: self.receipts.clone(), accounts: self.accounts.clone(), chain_spec: self.chain_spec.clone(), state_roots: self.state_roots.clone(), @@ -90,6 +93,7 @@ impl MockEthProvider { Self { blocks: Default::default(), headers: Default::default(), + receipts: Default::default(), accounts: Default::default(), chain_spec: Arc::new(reth_chainspec::ChainSpecBuilder::mainnet().build()), state_roots: Default::default(), @@ -141,6 +145,18 @@ impl MockEthProvider) { + self.receipts.lock().insert(block_number, receipts); + } + + /// Add multiple receipts to local receipt store + pub fn extend_receipts(&self, iter: impl IntoIterator)>) { + for (block_number, receipts) in iter { + self.add_receipts(block_number, receipts); + } + } + /// Add state root to local state root store pub fn add_state_root(&self, state_root: B256) { self.state_roots.lock().push(state_root); @@ -154,6 +170,7 @@ impl MockEthProvider ProviderResult>> { - Ok(None) + let receipts_lock = self.receipts.lock(); + + match block { + BlockHashOrNumber::Hash(hash) => { + // Find block number by hash first + let headers_lock = self.headers.lock(); + if let Some(header) = headers_lock.get(&hash) { + Ok(receipts_lock.get(&header.number).cloned()) + } else { + Ok(None) + } + } + BlockHashOrNumber::Number(number) => Ok(receipts_lock.get(&number).cloned()), + } } fn receipts_by_tx_range( @@ -515,9 +545,25 @@ where fn receipts_by_block_range( &self, - _block_range: RangeInclusive, + block_range: RangeInclusive, ) -> ProviderResult>> { - Ok(vec![]) + let receipts_lock = self.receipts.lock(); + let headers_lock = self.headers.lock(); + + let mut result = Vec::new(); + for block_number in block_range { + // Only include blocks that exist in headers (i.e., have been added to the provider) + if headers_lock.values().any(|header| header.number == block_number) { + if let Some(block_receipts) = receipts_lock.get(&block_number) { + result.push(block_receipts.clone()); + } else { + // If block exists but no receipts found, add empty vec + result.push(vec![]); + } + } + } + + Ok(result) } } @@ -623,11 +669,7 @@ impl BlockReader } } - fn pending_block(&self) -> ProviderResult>> { - Ok(None) - } - - fn pending_block_with_senders(&self) -> ProviderResult>> { + fn pending_block(&self) -> ProviderResult>> { Ok(None) } @@ -700,13 +742,6 @@ where Some(block) => Ok(Some(block.header)), } } - - fn ommers_by_id(&self, id: BlockId) -> ProviderResult>> { - match id { - BlockId::Number(num) => self.ommers_by_number_or_tag(num), - BlockId::Hash(hash) => self.ommers(BlockHashOrNumber::Hash(hash.block_hash)), - } - } } impl AccountReader for MockEthProvider { @@ -909,29 +944,6 @@ impl StatePr } } -impl WithdrawalsProvider - for MockEthProvider -{ - fn withdrawals_by_block( - &self, - _id: BlockHashOrNumber, - _timestamp: u64, - ) -> ProviderResult> { - Ok(None) - } -} - -impl OmmersProvider for MockEthProvider -where - T: NodePrimitives, - ChainSpec: Send + Sync, - Self: HeaderProvider, -{ - fn ommers(&self, _id: BlockHashOrNumber) -> ProviderResult>> { - Ok(None) - } -} - impl BlockBodyIndicesProvider for MockEthProvider { @@ -976,3 +988,74 @@ impl NodePrimitivesProvider { type Primitives = T; } + +#[cfg(test)] +mod tests { + use super::*; + use alloy_consensus::Header; + use alloy_primitives::BlockHash; + use reth_ethereum_primitives::Receipt; + + #[test] + fn test_mock_provider_receipts() { + let provider = MockEthProvider::new(); + + let block_hash = BlockHash::random(); + let block_number = 1u64; + let header = Header { number: block_number, ..Default::default() }; + + let receipt1 = Receipt { cumulative_gas_used: 21000, success: true, ..Default::default() }; + let receipt2 = Receipt { cumulative_gas_used: 42000, success: true, ..Default::default() }; + let receipts = vec![receipt1, receipt2]; + + provider.add_header(block_hash, header); + provider.add_receipts(block_number, receipts.clone()); + + let result = provider.receipts_by_block(block_hash.into()).unwrap(); + assert_eq!(result, Some(receipts.clone())); + + let result = provider.receipts_by_block(block_number.into()).unwrap(); + assert_eq!(result, Some(receipts.clone())); + + let range_result = provider.receipts_by_block_range(1..=1).unwrap(); + assert_eq!(range_result, vec![receipts]); + + let non_existent = provider.receipts_by_block(BlockHash::random().into()).unwrap(); + assert_eq!(non_existent, None); + + let empty_range = provider.receipts_by_block_range(10..=20).unwrap(); + assert_eq!(empty_range, Vec::>::new()); + } + + #[test] + fn test_mock_provider_receipts_multiple_blocks() { + let provider = MockEthProvider::new(); + + let block1_hash = BlockHash::random(); + let block2_hash = BlockHash::random(); + let block1_number = 1u64; + let block2_number = 2u64; + + let header1 = Header { number: block1_number, ..Default::default() }; + let header2 = Header { number: block2_number, ..Default::default() }; + + let receipts1 = + vec![Receipt { cumulative_gas_used: 21000, success: true, ..Default::default() }]; + let receipts2 = + vec![Receipt { cumulative_gas_used: 42000, success: true, ..Default::default() }]; + + provider.add_header(block1_hash, header1); + provider.add_header(block2_hash, header2); + provider.add_receipts(block1_number, receipts1.clone()); + provider.add_receipts(block2_number, receipts2.clone()); + + let range_result = provider.receipts_by_block_range(1..=2).unwrap(); + assert_eq!(range_result.len(), 2); + assert_eq!(range_result[0], receipts1); + assert_eq!(range_result[1], receipts2); + + let partial_range = provider.receipts_by_block_range(1..=1).unwrap(); + assert_eq!(partial_range.len(), 1); + assert_eq!(partial_range[0], receipts1); + } +} diff --git a/crates/storage/provider/src/writer/mod.rs b/crates/storage/provider/src/writer/mod.rs index 9ae2d7ad27d..9494c865297 100644 --- a/crates/storage/provider/src/writer/mod.rs +++ b/crates/storage/provider/src/writer/mod.rs @@ -6,7 +6,7 @@ use crate::{ use alloy_consensus::BlockHeader; use reth_chain_state::{ExecutedBlock, ExecutedBlockWithTrieUpdates}; use reth_db_api::transaction::{DbTx, DbTxMut}; -use reth_errors::ProviderResult; +use reth_errors::{ProviderError, ProviderResult}; use reth_primitives_traits::{NodePrimitives, SignedTransaction}; use reth_static_file_types::StaticFileSegment; use reth_storage_api::{DBProvider, StageCheckpointWriter, TransactionsProviderExt}; @@ -165,6 +165,7 @@ where trie, } in blocks { + let block_hash = recovered_block.hash(); self.database() .insert_block(Arc::unwrap_or_clone(recovered_block), StorageLocation::Both)?; @@ -179,7 +180,9 @@ where // insert hashes and intermediate merkle nodes self.database() .write_hashed_state(&Arc::unwrap_or_clone(hashed_state).into_sorted())?; - self.database().write_trie_updates(&trie)?; + self.database().write_trie_updates( + trie.as_ref().ok_or(ProviderError::MissingTrieUpdates(block_hash))?, + )?; } // update history indices diff --git a/crates/storage/storage-api/Cargo.toml b/crates/storage/storage-api/Cargo.toml index 1c43004216a..7dfb5486f90 100644 --- a/crates/storage/storage-api/Cargo.toml +++ b/crates/storage/storage-api/Cargo.toml @@ -57,3 +57,30 @@ db-api = [ "dep:reth-db-api", "dep:reth-trie-db", ] + +serde = [ + "reth-ethereum-primitives/serde", + "reth-db-models/serde", + "reth-execution-types/serde", + "reth-primitives-traits/serde", + "reth-prune-types/serde", + "reth-stages-types/serde", + "reth-trie-common/serde", + "reth-trie-db?/serde", + "revm-database/serde", + "reth-ethereum-primitives/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "alloy-consensus/serde", + "alloy-rpc-types-engine/serde", +] + +serde-bincode-compat = [ + "reth-ethereum-primitives/serde-bincode-compat", + "reth-execution-types/serde-bincode-compat", + "reth-primitives-traits/serde-bincode-compat", + "reth-trie-common/serde-bincode-compat", + "reth-ethereum-primitives/serde-bincode-compat", + "alloy-eips/serde-bincode-compat", + "alloy-consensus/serde-bincode-compat", +] diff --git a/crates/storage/storage-api/src/block.rs b/crates/storage/storage-api/src/block.rs index ce488aba887..4316e5af673 100644 --- a/crates/storage/storage-api/src/block.rs +++ b/crates/storage/storage-api/src/block.rs @@ -1,6 +1,6 @@ use crate::{ - BlockBodyIndicesProvider, BlockNumReader, HeaderProvider, OmmersProvider, ReceiptProvider, - ReceiptProviderIdExt, TransactionVariant, TransactionsProvider, WithdrawalsProvider, + BlockBodyIndicesProvider, BlockNumReader, HeaderProvider, ReceiptProvider, + ReceiptProviderIdExt, TransactionVariant, TransactionsProvider, }; use alloc::{sync::Arc, vec::Vec}; use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag}; @@ -53,8 +53,6 @@ pub trait BlockReader: + BlockBodyIndicesProvider + TransactionsProvider + ReceiptProvider - + WithdrawalsProvider - + OmmersProvider + Send + Sync { @@ -80,17 +78,11 @@ pub trait BlockReader: /// Returns `None` if block is not found. fn block(&self, id: BlockHashOrNumber) -> ProviderResult>; - /// Returns the pending block if available - /// - /// Note: This returns a [`SealedBlock`] because it's expected that this is sealed by the - /// provider and the caller does not know the hash. - fn pending_block(&self) -> ProviderResult>>; - /// Returns the pending block if available /// /// Note: This returns a [`RecoveredBlock`] because it's expected that this is sealed by /// the provider and the caller does not know the hash. - fn pending_block_with_senders(&self) -> ProviderResult>>; + fn pending_block(&self) -> ProviderResult>>; /// Returns the pending block and receipts if available. #[expect(clippy::type_complexity)] @@ -167,12 +159,9 @@ impl BlockReader for Arc { fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { T::block(self, id) } - fn pending_block(&self) -> ProviderResult>> { + fn pending_block(&self) -> ProviderResult>> { T::pending_block(self) } - fn pending_block_with_senders(&self) -> ProviderResult>> { - T::pending_block_with_senders(self) - } fn pending_block_and_receipts( &self, ) -> ProviderResult, Vec)>> { @@ -228,12 +217,9 @@ impl BlockReader for &T { fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { T::block(self, id) } - fn pending_block(&self) -> ProviderResult>> { + fn pending_block(&self) -> ProviderResult>> { T::pending_block(self) } - fn pending_block_with_senders(&self) -> ProviderResult>> { - T::pending_block_with_senders(self) - } fn pending_block_and_receipts( &self, ) -> ProviderResult, Vec)>> { @@ -384,19 +370,6 @@ pub trait BlockReaderIdExt: BlockReader + ReceiptProviderIdExt { /// /// Returns `None` if header is not found. fn header_by_id(&self, id: BlockId) -> ProviderResult>; - - /// Returns the ommers with the matching tag from the database. - fn ommers_by_number_or_tag( - &self, - id: BlockNumberOrTag, - ) -> ProviderResult>> { - self.convert_block_number(id)?.map_or_else(|| Ok(None), |num| self.ommers(num.into())) - } - - /// Returns the ommers with the matching `BlockId` from the database. - /// - /// Returns `None` if block is not found. - fn ommers_by_id(&self, id: BlockId) -> ProviderResult>>; } /// Functionality to read the last known chain blocks from the database. diff --git a/crates/storage/storage-api/src/chain.rs b/crates/storage/storage-api/src/chain.rs index dd70d801eea..ffc0c8cb3c9 100644 --- a/crates/storage/storage-api/src/chain.rs +++ b/crates/storage/storage-api/src/chain.rs @@ -1,4 +1,4 @@ -use crate::{DBProvider, OmmersProvider, StorageLocation}; +use crate::{DBProvider, StorageLocation}; use alloc::vec::Vec; use alloy_consensus::Header; use alloy_primitives::BlockNumber; @@ -146,9 +146,7 @@ where impl BlockBodyReader for EthStorage where - Provider: DBProvider - + ChainSpecProvider - + OmmersProvider
, + Provider: DBProvider + ChainSpecProvider, T: SignedTransaction, H: FullBlockHeader, { @@ -181,9 +179,14 @@ where let ommers = if chain_spec.is_paris_active_at_block(header.number()) { Vec::new() } else { - provider.ommers(header.number().into())?.unwrap_or_default() + // Pre-merge: fetch ommers from database using direct database access + provider + .tx_ref() + .cursor_read::>()? + .seek_exact(header.number())? + .map(|(_, stored_ommers)| stored_ommers.ommers) + .unwrap_or_default() }; - bodies.push(alloy_consensus::BlockBody { transactions, ommers, withdrawals }); } diff --git a/crates/storage/storage-api/src/lib.rs b/crates/storage/storage-api/src/lib.rs index c6505c5ae1f..a82f6092494 100644 --- a/crates/storage/storage-api/src/lib.rs +++ b/crates/storage/storage-api/src/lib.rs @@ -57,12 +57,6 @@ pub use trie::*; mod chain_info; pub use chain_info::*; -mod withdrawals; -pub use withdrawals::*; - -mod ommers; -pub use ommers::*; - #[cfg(feature = "db-api")] mod database_provider; #[cfg(feature = "db-api")] diff --git a/crates/storage/storage-api/src/noop.rs b/crates/storage/storage-api/src/noop.rs index 6102ce34a78..eca0beb0f7b 100644 --- a/crates/storage/storage-api/src/noop.rs +++ b/crates/storage/storage-api/src/noop.rs @@ -3,14 +3,14 @@ use crate::{ AccountReader, BlockBodyIndicesProvider, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, BlockSource, ChangeSetReader, HashedPostStateProvider, - HeaderProvider, NodePrimitivesProvider, OmmersProvider, PruneCheckpointReader, ReceiptProvider, + HeaderProvider, NodePrimitivesProvider, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, StateProofProvider, StateProvider, StateProviderBox, StateProviderFactory, StateRootProvider, StorageRootProvider, - TransactionVariant, TransactionsProvider, WithdrawalsProvider, + TransactionVariant, TransactionsProvider, }; use alloc::{boxed::Box, string::String, sync::Arc, vec::Vec}; use alloy_consensus::transaction::TransactionMeta; -use alloy_eips::{eip4895::Withdrawals, BlockHashOrNumber, BlockId, BlockNumberOrTag}; +use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag}; use alloy_primitives::{ Address, BlockHash, BlockNumber, Bytes, StorageKey, StorageValue, TxHash, TxNumber, B256, U256, }; @@ -146,10 +146,6 @@ impl BlockReaderIdExt for NoopProvider fn header_by_id(&self, _id: BlockId) -> ProviderResult> { Ok(None) } - - fn ommers_by_id(&self, _id: BlockId) -> ProviderResult>> { - Ok(None) - } } impl BlockReader for NoopProvider { @@ -167,11 +163,7 @@ impl BlockReader for NoopProvider { Ok(None) } - fn pending_block(&self) -> ProviderResult>> { - Ok(None) - } - - fn pending_block_with_senders(&self) -> ProviderResult>> { + fn pending_block(&self) -> ProviderResult>> { Ok(None) } @@ -535,22 +527,6 @@ impl StageCheckpointReader for NoopProvider WithdrawalsProvider for NoopProvider { - fn withdrawals_by_block( - &self, - _id: BlockHashOrNumber, - _timestamp: u64, - ) -> ProviderResult> { - Ok(None) - } -} - -impl OmmersProvider for NoopProvider { - fn ommers(&self, _id: BlockHashOrNumber) -> ProviderResult>> { - Ok(None) - } -} - impl PruneCheckpointReader for NoopProvider { fn get_prune_checkpoint( &self, diff --git a/crates/storage/storage-api/src/ommers.rs b/crates/storage/storage-api/src/ommers.rs deleted file mode 100644 index c3f68b4f96e..00000000000 --- a/crates/storage/storage-api/src/ommers.rs +++ /dev/null @@ -1,24 +0,0 @@ -use crate::HeaderProvider; -use alloc::{sync::Arc, vec::Vec}; -use alloy_eips::BlockHashOrNumber; -use reth_storage_errors::provider::ProviderResult; - -/// Client trait for fetching ommers. -pub trait OmmersProvider: HeaderProvider + Send + Sync { - /// Returns the ommers/uncle headers of the given block from the database. - /// - /// Returns `None` if block is not found. - fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>>; -} - -impl OmmersProvider for Arc { - fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { - T::ommers(self, id) - } -} - -impl OmmersProvider for &T { - fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { - T::ommers(self, id) - } -} diff --git a/crates/storage/storage-api/src/withdrawals.rs b/crates/storage/storage-api/src/withdrawals.rs deleted file mode 100644 index fdfb27aa707..00000000000 --- a/crates/storage/storage-api/src/withdrawals.rs +++ /dev/null @@ -1,13 +0,0 @@ -use alloy_eips::{eip4895::Withdrawals, BlockHashOrNumber}; -use reth_storage_errors::provider::ProviderResult; - -/// Client trait for fetching [`alloy_eips::eip4895::Withdrawal`] related data. -#[auto_impl::auto_impl(&, Arc)] -pub trait WithdrawalsProvider: Send + Sync { - /// Get withdrawals by block id. - fn withdrawals_by_block( - &self, - id: BlockHashOrNumber, - timestamp: u64, - ) -> ProviderResult>; -} diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index d232607bb22..3e826f34707 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -89,6 +89,8 @@ serde = [ "revm-primitives/serde", "reth-primitives-traits/serde", "reth-ethereum-primitives/serde", + "reth-chain-state/serde", + "reth-storage-api/serde", ] test-utils = [ "rand", diff --git a/crates/transaction-pool/src/error.rs b/crates/transaction-pool/src/error.rs index 03e854d8ff2..e723dc0dc79 100644 --- a/crates/transaction-pool/src/error.rs +++ b/crates/transaction-pool/src/error.rs @@ -311,7 +311,8 @@ impl InvalidPoolTransactionError { InvalidTransactionError::ChainIdMismatch | InvalidTransactionError::GasUintOverflow | InvalidTransactionError::TxTypeNotSupported | - InvalidTransactionError::SignerAccountHasBytecode => true, + InvalidTransactionError::SignerAccountHasBytecode | + InvalidTransactionError::GasLimitTooHigh => true, } } Self::ExceedsGasLimit(_, _) => true, diff --git a/crates/transaction-pool/src/identifier.rs b/crates/transaction-pool/src/identifier.rs index 2abbe0b5896..17320ecf930 100644 --- a/crates/transaction-pool/src/identifier.rs +++ b/crates/transaction-pool/src/identifier.rs @@ -43,7 +43,7 @@ impl SenderIdentifiers { &mut self, addrs: impl IntoIterator, ) -> Vec { - addrs.into_iter().filter_map(|addr| self.sender_id(&addr)).collect() + addrs.into_iter().map(|addr| self.sender_id_or_create(addr)).collect() } /// Returns the current identifier and increments the counter. diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index bf9a8fadf4f..1a72585fc80 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -206,9 +206,9 @@ mod traits; pub mod test_utils; /// Type alias for default ethereum transaction pool -pub type EthTransactionPool = Pool< - TransactionValidationTaskExecutor>, - CoinbaseTipOrdering, +pub type EthTransactionPool = Pool< + TransactionValidationTaskExecutor>, + CoinbaseTipOrdering, S, >; diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index c18ca94f2ad..20b4b076e97 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -420,8 +420,12 @@ where self.pool.write().update_accounts(changed_senders); let mut listener = self.event_listener.write(); - promoted.iter().for_each(|tx| listener.pending(tx.hash(), None)); - discarded.iter().for_each(|tx| listener.discarded(tx.hash())); + for tx in &promoted { + listener.pending(tx.hash(), None); + } + for tx in &discarded { + listener.discarded(tx.hash()); + } // This deletes outdated blob txs from the blob store, based on the account's nonce. This is // called during txpool maintenance when the pool drifted. @@ -570,7 +574,9 @@ where { let mut listener = self.event_listener.write(); - discarded_hashes.iter().for_each(|hash| listener.discarded(hash)); + for hash in &discarded_hashes { + listener.discarded(hash); + } } // A newly added transaction may be immediately discarded, so we need to @@ -665,9 +671,15 @@ where // broadcast specific transaction events let mut listener = self.event_listener.write(); - mined.iter().for_each(|tx| listener.mined(tx, block_hash)); - promoted.iter().for_each(|tx| listener.pending(tx.hash(), None)); - discarded.iter().for_each(|tx| listener.discarded(tx.hash())); + for tx in &mined { + listener.mined(tx, block_hash); + } + for tx in &promoted { + listener.pending(tx.hash(), None); + } + for tx in &discarded { + listener.discarded(tx.hash()); + } } /// Fire events for the newly added transaction if there are any. @@ -679,8 +691,12 @@ where let AddedPendingTransaction { transaction, promoted, discarded, replaced } = tx; listener.pending(transaction.hash(), replaced.clone()); - promoted.iter().for_each(|tx| listener.pending(tx.hash(), None)); - discarded.iter().for_each(|tx| listener.discarded(tx.hash())); + for tx in promoted { + listener.pending(tx.hash(), None); + } + for tx in discarded { + listener.discarded(tx.hash()); + } } AddedTransaction::Parked { transaction, replaced, .. } => { listener.queued(transaction.hash()); @@ -748,7 +764,9 @@ where let mut listener = self.event_listener.write(); - removed.iter().for_each(|tx| listener.discarded(tx.hash())); + for tx in &removed { + listener.discarded(tx.hash()); + } removed } @@ -766,7 +784,9 @@ where let mut listener = self.event_listener.write(); - removed.iter().for_each(|tx| listener.discarded(tx.hash())); + for tx in &removed { + listener.discarded(tx.hash()); + } removed } @@ -781,7 +801,9 @@ where let mut listener = self.event_listener.write(); - removed.iter().for_each(|tx| listener.discarded(tx.hash())); + for tx in &removed { + listener.discarded(tx.hash()); + } removed } @@ -1194,11 +1216,13 @@ impl OnNewCanonicalStateOutcome { mod tests { use crate::{ blobstore::{BlobStore, InMemoryBlobStore}, + identifier::SenderId, test_utils::{MockTransaction, TestPoolBuilder}, validate::ValidTransaction, BlockInfo, PoolConfig, SubPoolLimit, TransactionOrigin, TransactionValidationOutcome, U256, }; use alloy_eips::{eip4844::BlobTransactionSidecar, eip7594::BlobTransactionSidecarVariant}; + use alloy_primitives::Address; use std::{fs, path::PathBuf}; #[test] @@ -1285,4 +1309,28 @@ mod tests { // Assert that the pool's blob store matches the expected blob store. assert_eq!(*test_pool.blob_store(), blob_store); } + + #[test] + fn test_auths_stored_in_identifiers() { + // Create a test pool with default configuration. + let test_pool = &TestPoolBuilder::default().with_config(Default::default()).pool; + + let auth = Address::new([1; 20]); + let tx = MockTransaction::eip7702(); + + test_pool.add_transactions( + TransactionOrigin::Local, + [TransactionValidationOutcome::Valid { + balance: U256::from(1_000), + state_nonce: 0, + bytecode_hash: None, + transaction: ValidTransaction::Valid(tx), + propagate: true, + authorities: Some(vec![auth]), + }], + ); + + let identifiers = test_pool.identifiers.read(); + assert_eq!(identifiers.sender_id(&auth), Some(SenderId::from(1))); + } } diff --git a/crates/transaction-pool/src/test_utils/pool.rs b/crates/transaction-pool/src/test_utils/pool.rs index 7e22f3b8863..6af440f086a 100644 --- a/crates/transaction-pool/src/test_utils/pool.rs +++ b/crates/transaction-pool/src/test_utils/pool.rs @@ -66,7 +66,7 @@ pub(crate) struct MockTransactionSimulator { balances: HashMap, /// represents the on chain nonce of a sender. nonces: HashMap, - /// A set of addresses to as senders. + /// A set of addresses to use as senders. senders: Vec
, /// What scenarios to execute. scenarios: Vec, @@ -166,7 +166,7 @@ impl MockSimulatorConfig { } } -/// Represents +/// Represents the different types of test scenarios. #[derive(Debug, Clone)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub(crate) enum ScenarioType { diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index 68d3e87499d..c702befbc93 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -25,7 +25,8 @@ use alloy_eips::{ }; use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks}; use reth_primitives_traits::{ - transaction::error::InvalidTransactionError, Block, GotExpected, SealedBlock, + constants::MAX_TX_GAS_LIMIT_OSAKA, transaction::error::InvalidTransactionError, Block, + GotExpected, SealedBlock, }; use reth_storage_api::{StateProvider, StateProviderFactory}; use reth_tasks::TaskSpawner; @@ -461,6 +462,16 @@ where } } + // Osaka validation of max tx gas. + if self.fork_tracker.is_osaka_activated() && + transaction.gas_limit() > MAX_TX_GAS_LIMIT_OSAKA + { + return Err(TransactionValidationOutcome::Invalid( + transaction, + InvalidTransactionError::GasLimitTooHigh.into(), + )) + } + Ok(transaction) } diff --git a/crates/trie/common/Cargo.toml b/crates/trie/common/Cargo.toml index 4c22c18247f..ff6c5a58539 100644 --- a/crates/trie/common/Cargo.toml +++ b/crates/trie/common/Cargo.toml @@ -84,10 +84,7 @@ std = [ "revm-database/std", "revm-state/std", ] -eip1186 = [ - "alloy-rpc-types-eth/serde", - "dep:alloy-serde", -] +eip1186 = ["alloy-rpc-types-eth/serde", "dep:alloy-serde"] serde = [ "dep:serde", "bytes?/serde", @@ -101,10 +98,7 @@ serde = [ "revm-database/serde", "revm-state/serde", ] -reth-codec = [ - "dep:reth-codecs", - "dep:bytes", -] +reth-codec = ["dep:reth-codecs", "dep:bytes"] serde-bincode-compat = [ "serde", "reth-primitives-traits/serde-bincode-compat", diff --git a/crates/trie/common/src/hashed_state.rs b/crates/trie/common/src/hashed_state.rs index 1875a132dca..b6f60e2b2a1 100644 --- a/crates/trie/common/src/hashed_state.rs +++ b/crates/trie/common/src/hashed_state.rs @@ -22,6 +22,7 @@ use revm_database::{AccountStatus, BundleAccount}; /// Representation of in-memory hashed state. #[derive(PartialEq, Eq, Clone, Default, Debug)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct HashedPostState { /// Mapping of hashed address to account info, `None` if destroyed. pub accounts: B256Map>, @@ -337,6 +338,7 @@ impl HashedPostState { /// Representation of in-memory hashed storage. #[derive(PartialEq, Eq, Clone, Debug, Default)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct HashedStorage { /// Flag indicating whether the storage was wiped or not. pub wiped: bool, diff --git a/crates/trie/common/src/input.rs b/crates/trie/common/src/input.rs index db15a61458d..ecf9bab7eca 100644 --- a/crates/trie/common/src/input.rs +++ b/crates/trie/common/src/input.rs @@ -31,6 +31,43 @@ impl TrieInput { Self { nodes: TrieUpdates::default(), state, prefix_sets } } + /// Create new trie input from the provided blocks, from oldest to newest. See the documentation + /// for [`Self::extend_with_blocks`] for details. + pub fn from_blocks<'a>( + blocks: impl IntoIterator)>, + ) -> Self { + let mut input = Self::default(); + input.extend_with_blocks(blocks); + input + } + + /// Extend the trie input with the provided blocks, from oldest to newest. + /// + /// For blocks with missing trie updates, the trie input will be extended with prefix sets + /// constructed from the state of this block and the state itself, **without** trie updates. + pub fn extend_with_blocks<'a>( + &mut self, + blocks: impl IntoIterator)>, + ) { + for (hashed_state, trie_updates) in blocks { + if let Some(nodes) = trie_updates.as_ref() { + self.append_cached_ref(nodes, hashed_state); + } else { + self.append_ref(hashed_state); + } + } + } + + /// Prepend another trie input to the current one. + pub fn prepend_self(&mut self, mut other: Self) { + core::mem::swap(&mut self.nodes, &mut other.nodes); + self.nodes.extend(other.nodes); + core::mem::swap(&mut self.state, &mut other.state); + self.state.extend(other.state); + // No need to swap prefix sets, as they will be sorted and deduplicated. + self.prefix_sets.extend(other.prefix_sets); + } + /// Prepend state to the input and extend the prefix sets. pub fn prepend(&mut self, mut state: HashedPostState) { self.prefix_sets.extend(state.construct_prefix_sets()); diff --git a/crates/trie/common/src/prefix_set.rs b/crates/trie/common/src/prefix_set.rs index 2620cd5154d..844f3de1b62 100644 --- a/crates/trie/common/src/prefix_set.rs +++ b/crates/trie/common/src/prefix_set.rs @@ -15,6 +15,13 @@ pub struct TriePrefixSetsMut { } impl TriePrefixSetsMut { + /// Returns `true` if all prefix sets are empty. + pub fn is_empty(&self) -> bool { + self.account_prefix_set.is_empty() && + self.storage_prefix_sets.is_empty() && + self.destroyed_accounts.is_empty() + } + /// Extends prefix sets with contents of another prefix set. pub fn extend(&mut self, other: Self) { self.account_prefix_set.extend(other.account_prefix_set); diff --git a/crates/trie/trie/src/trie.rs b/crates/trie/trie/src/trie.rs index fc38b653d8c..e6f5463b7df 100644 --- a/crates/trie/trie/src/trie.rs +++ b/crates/trie/trie/src/trie.rs @@ -13,7 +13,7 @@ use alloy_consensus::EMPTY_ROOT_HASH; use alloy_primitives::{keccak256, Address, B256}; use alloy_rlp::{BufMut, Encodable}; use reth_execution_errors::{StateRootError, StorageRootError}; -use tracing::trace; +use tracing::{trace, trace_span}; #[cfg(feature = "metrics")] use crate::metrics::{StateRootMetrics, TrieRootMetrics}; @@ -396,7 +396,10 @@ where self, retain_updates: bool, ) -> Result<(B256, usize, StorageTrieUpdates), StorageRootError> { - trace!(target: "trie::storage_root", hashed_address = ?self.hashed_address, "calculating storage root"); + let span = trace_span!(target: "trie::storage_root", "Storage trie", hashed_address = ?self.hashed_address); + let _enter = span.enter(); + + trace!(target: "trie::storage_root", "calculating storage root"); let mut hashed_storage_cursor = self.hashed_cursor_factory.hashed_storage_cursor(self.hashed_address)?; diff --git a/etc/docker-compose.yml b/etc/docker-compose.yml index 5e10225018f..73311616fd1 100644 --- a/etc/docker-compose.yml +++ b/etc/docker-compose.yml @@ -1,24 +1,26 @@ -name: 'reth' +name: reth services: reth: restart: unless-stopped image: ghcr.io/paradigmxyz/reth ports: - - '9001:9001' # metrics - - '30303:30303' # eth/66 peering - - '8545:8545' # rpc - - '8551:8551' # engine + - "9001:9001" # metrics + - "30303:30303" # eth/66 peering + - "8545:8545" # rpc + - "8551:8551" # engine volumes: - mainnet_data:/root/.local/share/reth/mainnet - sepolia_data:/root/.local/share/reth/sepolia - holesky_data:/root/.local/share/reth/holesky + - hoodi_data:/root/.local/share/reth/hoodi - logs:/root/logs - ./jwttoken:/root/jwt/:ro # https://paradigmxyz.github.io/reth/run/troubleshooting.html#concurrent-database-access-error-using-containersdocker pid: host # For Sepolia, replace `--chain mainnet` with `--chain sepolia` # For Holesky, replace `--chain mainnet` with `--chain holesky` + # For Hoodi, replace `--chain mainnet` with `--chain hoodi` command: > node --chain mainnet @@ -39,7 +41,7 @@ services: - 9090:9090 volumes: - ./prometheus/:/etc/prometheus/ - - prometheusdata:/prometheus + - prometheus_data:/prometheus command: - --config.file=/etc/prometheus/prometheus.yml - --storage.tsdb.path=/prometheus @@ -55,7 +57,7 @@ services: environment: PROMETHEUS_URL: ${PROMETHEUS_URL:-http://prometheus:9090} volumes: - - grafanadata:/var/lib/grafana + - grafana_data:/var/lib/grafana - ./grafana/datasources:/etc/grafana/provisioning/datasources - ./grafana/dashboards:/etc/grafana/provisioning_temp/dashboards # 1. Copy dashboards from temp directory to prevent modifying original host files @@ -73,9 +75,11 @@ volumes: driver: local holesky_data: driver: local + hoodi_data: + driver: local logs: driver: local - prometheusdata: + prometheus_data: driver: local - grafanadata: + grafana_data: driver: local diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index 1f2ebe36943..edfabec7f17 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -39,7 +39,7 @@ "type": "grafana", "id": "grafana", "name": "Grafana", - "version": "11.5.3" + "version": "11.5.1" }, { "type": "panel", @@ -145,7 +145,7 @@ }, "gridPos": { "h": 3, - "w": 3, + "w": 4, "x": 0, "y": 1 }, @@ -157,9 +157,7 @@ "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { - "calcs": [ - "lastNotNull" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, @@ -170,16 +168,16 @@ "textMode": "name", "wideLayout": true }, - "pluginVersion": "11.5.3", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "exemplar": false, - "expr": "reth_info{instance=~\"$instance\"}", + "expr": "reth_info{job=\"$job\"}", "instant": true, "legendFormat": "{{version}}", "range": false, @@ -215,8 +213,8 @@ }, "gridPos": { "h": 3, - "w": 6, - "x": 3, + "w": 4, + "x": 4, "y": 1 }, "id": 192, @@ -227,9 +225,7 @@ "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { - "calcs": [ - "lastNotNull" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, @@ -240,16 +236,16 @@ "textMode": "name", "wideLayout": true }, - "pluginVersion": "11.5.3", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "exemplar": false, - "expr": "reth_info{instance=~\"$instance\"}", + "expr": "reth_info{job=\"$job\"}", "instant": true, "legendFormat": "{{build_timestamp}}", "range": false, @@ -285,8 +281,8 @@ }, "gridPos": { "h": 3, - "w": 3, - "x": 9, + "w": 4, + "x": 8, "y": 1 }, "id": 193, @@ -297,9 +293,7 @@ "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { - "calcs": [ - "lastNotNull" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, @@ -310,16 +304,16 @@ "textMode": "name", "wideLayout": true }, - "pluginVersion": "11.5.3", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "exemplar": false, - "expr": "reth_info{instance=~\"$instance\"}", + "expr": "reth_info{job=\"$job\"}", "instant": true, "legendFormat": "{{git_sha}}", "range": false, @@ -355,7 +349,7 @@ }, "gridPos": { "h": 3, - "w": 2, + "w": 4, "x": 12, "y": 1 }, @@ -367,9 +361,7 @@ "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { - "calcs": [ - "lastNotNull" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, @@ -380,16 +372,16 @@ "textMode": "name", "wideLayout": true }, - "pluginVersion": "11.5.3", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "exemplar": false, - "expr": "reth_info{instance=~\"$instance\"}", + "expr": "reth_info{job=\"$job\"}", "instant": true, "legendFormat": "{{build_profile}}", "range": false, @@ -425,8 +417,8 @@ }, "gridPos": { "h": 3, - "w": 5, - "x": 14, + "w": 4, + "x": 16, "y": 1 }, "id": 196, @@ -437,9 +429,7 @@ "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { - "calcs": [ - "lastNotNull" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, @@ -450,16 +440,16 @@ "textMode": "name", "wideLayout": true }, - "pluginVersion": "11.5.3", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "exemplar": false, - "expr": "reth_info{instance=~\"$instance\"}", + "expr": "reth_info{job=\"$job\"}", "instant": true, "legendFormat": "{{target_triple}}", "range": false, @@ -495,8 +485,8 @@ }, "gridPos": { "h": 3, - "w": 5, - "x": 19, + "w": 4, + "x": 20, "y": 1 }, "id": 197, @@ -507,9 +497,7 @@ "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { - "calcs": [ - "lastNotNull" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, @@ -520,16 +508,16 @@ "textMode": "name", "wideLayout": true }, - "pluginVersion": "11.5.3", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "exemplar": false, - "expr": "reth_info{instance=~\"$instance\"}", + "expr": "reth_info{job=\"$job\"}", "instant": true, "legendFormat": "{{cargo_features}}", "range": false, @@ -578,7 +566,7 @@ "overrides": [] }, "gridPos": { - "h": 8, + "h": 9, "w": 8, "x": 0, "y": 4 @@ -589,9 +577,7 @@ "minVizWidth": 75, "orientation": "auto", "reduceOptions": { - "calcs": [ - "lastNotNull" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, @@ -599,16 +585,16 @@ "showThresholdMarkers": true, "sizing": "auto" }, - "pluginVersion": "11.5.3", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "exemplar": false, - "expr": "reth_network_connected_peers{instance=~\"$instance\"}", + "expr": "reth_network_connected_peers{job=\"$job\"}", "instant": true, "legendFormat": "__auto", "range": false, @@ -645,7 +631,7 @@ "overrides": [] }, "gridPos": { - "h": 8, + "h": 9, "w": 8, "x": 8, "y": 4 @@ -660,14 +646,12 @@ "showLegend": false }, "maxVizHeight": 300, - "minVizHeight": 10, - "minVizWidth": 0, + "minVizHeight": 16, + "minVizWidth": 8, "namePlacement": "auto", "orientation": "horizontal", "reduceOptions": { - "calcs": [ - "last" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, @@ -675,16 +659,16 @@ "sizing": "auto", "valueMode": "color" }, - "pluginVersion": "11.5.3", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "exemplar": false, - "expr": "reth_sync_checkpoint{instance=~\"$instance\"}", + "expr": "reth_sync_checkpoint{job=\"$job\"}", "instant": true, "legendFormat": "{{stage}}", "range": false, @@ -754,7 +738,7 @@ "overrides": [] }, "gridPos": { - "h": 8, + "h": 9, "w": 8, "x": 16, "y": 4 @@ -767,9 +751,7 @@ "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { - "calcs": [ - "lastNotNull" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, @@ -777,15 +759,15 @@ "textMode": "auto", "wideLayout": true }, - "pluginVersion": "11.5.3", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "sum(reth_db_table_size{instance=~\"$instance\"})", + "expr": "sum(reth_db_table_size{job=\"$job\"})", "legendFormat": "Database", "range": true, "refId": "A" @@ -796,7 +778,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "sum(reth_db_freelist{instance=~\"$instance\"} * reth_db_page_size{instance=~\"$instance\"})", + "expr": "sum(reth_db_freelist{job=\"$job\"} * reth_db_page_size{job=\"$job\"})", "hide": false, "instant": false, "legendFormat": "Freelist", @@ -806,10 +788,10 @@ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "sum(reth_static_files_segment_size{instance=~\"$instance\"})", + "expr": "sum(reth_static_files_segment_size{job=\"$job\"})", "hide": false, "instant": false, "legendFormat": "Static Files", @@ -822,7 +804,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "sum(reth_db_table_size{instance=~\"$instance\"}) + sum(reth_db_freelist{instance=~\"$instance\"} * reth_db_page_size{instance=~\"$instance\"}) + sum(reth_static_files_segment_size{instance=~\"$instance\"})", + "expr": "sum(reth_db_table_size{job=\"$job\"}) + sum(reth_db_freelist{job=\"$job\"} * reth_db_page_size{job=\"$job\"}) + sum(reth_static_files_segment_size{job=\"$job\"})", "hide": false, "instant": false, "legendFormat": "Total", @@ -837,7 +819,7 @@ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -897,7 +879,7 @@ "h": 8, "w": 12, "x": 0, - "y": 12 + "y": 13 }, "id": 69, "options": { @@ -913,7 +895,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.3", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { @@ -921,7 +903,7 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "reth_sync_entities_processed{instance=~\"$instance\"} / reth_sync_entities_total{instance=~\"$instance\"}", + "expr": "reth_sync_entities_processed{job=\"$job\"} / reth_sync_entities_total{job=\"$job\"}", "legendFormat": "{{stage}}", "range": true, "refId": "A" @@ -933,7 +915,7 @@ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -994,7 +976,7 @@ "h": 8, "w": 12, "x": 12, - "y": 12 + "y": 13 }, "id": 12, "options": { @@ -1010,7 +992,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.3", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { @@ -1018,7 +1000,7 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "reth_sync_checkpoint{instance=~\"$instance\"}", + "expr": "reth_sync_checkpoint{job=\"$job\"}", "legendFormat": "{{stage}}", "range": true, "refId": "A" @@ -1030,110 +1012,9 @@ { "datasource": { "type": "prometheus", - "uid": "${datasource}" - }, - "description": "Tracks the number of critical tasks currently ran by the executor.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "semi-dark-red", - "value": 0 - } - ] - }, - "unit": "tasks" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 20 - }, - "id": 248, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.5.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "expr": "reth_executor_spawn_critical_tasks_total{instance=\"$instance\"}- reth_executor_spawn_finished_critical_tasks_total{instance=\"$instance\"}", - "hide": false, - "instant": false, - "legendFormat": "Tasks running", - "range": true, - "refId": "C" - } - ], - "title": "Task Executor critical tasks", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, - "description": "Tracks the number of regular tasks currently ran by the executor.", + "description": "Latency histogram for the engine_forkchoiceUpdated RPC API", "fieldConfig": { "defaults": { "color": { @@ -1181,23 +1062,31 @@ "value": null }, { - "color": "semi-dark-red", + "color": "red", "value": 80 } ] }, - "unit": "tasks/s" + "unit": "s" }, "overrides": [ { "matcher": { - "id": "byFrameRefID", - "options": "C" + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } }, "properties": [ { - "id": "unit", - "value": "tasks" + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } } ] } @@ -1206,10 +1095,10 @@ "gridPos": { "h": 8, "w": 12, - "x": 12, - "y": 20 + "x": 0, + "y": 21 }, - "id": 247, + "id": 211, "options": { "legend": { "calcs": [], @@ -1223,7 +1112,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.3", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { @@ -1232,13 +1121,10 @@ }, "disableTextWrap": false, "editorMode": "builder", - "exemplar": false, - "expr": "rate(reth_executor_spawn_regular_tasks_total{instance=\"$instance\"}[$__rate_interval])", + "expr": "reth_engine_rpc_fork_choice_updated_v1{job=\"$job\", quantile=\"0\"}", "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": false, - "instant": false, - "legendFormat": "Tasks started", + "includeNullMetadata": true, + "legendFormat": "engine_forkchoiceUpdatedV1 min", "range": true, "refId": "A", "useBackend": false @@ -1246,106 +1132,327 @@ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, - "editorMode": "code", - "expr": "reth_executor_spawn_regular_tasks_total{instance=\"$instance\"}- reth_executor_spawn_finished_regular_tasks_total{instance=\"$instance\"}", + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_fork_choice_updated_v1{job=\"$job\", quantile=\"0.5\"}", + "fullMetaSearch": false, "hide": false, - "instant": false, - "legendFormat": "Tasks running", + "includeNullMetadata": true, + "legendFormat": "engine_forkchoiceUpdatedV1 p50", "range": true, - "refId": "C" - } - ], - "title": "Task Executor regular tasks", - "type": "timeseries" - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 28 - }, - "id": 38, - "panels": [], - "repeat": "instance", - "title": "Database", - "type": "row" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "The average commit time for database transactions. Generally, this should not be a limiting factor in syncing.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic", - "seriesBy": "last" + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "points", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_fork_choice_updated_v1{job=\"$job\", quantile=\"0.9\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_forkchoiceUpdatedV1 p90", + "range": true, + "refId": "C", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - "mappings": [], - "thresholds": { + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_fork_choice_updated_v1{job=\"$job\", quantile=\"0.95\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_forkchoiceUpdatedV1 p95", + "range": true, + "refId": "D", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_fork_choice_updated_v1{job=\"$job\", quantile=\"0.99\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_forkchoiceUpdatedV1 p99", + "range": true, + "refId": "E", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_fork_choice_updated_v2{job=\"$job\", quantile=\"0\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_forkchoiceUpdatedV2 min", + "range": true, + "refId": "F", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_fork_choice_updated_v2{job=\"$job\", quantile=\"0.5\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_forkchoiceUpdatedV2 p50", + "range": true, + "refId": "G", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_fork_choice_updated_v2{job=\"$job\", quantile=\"0.9\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_forkchoiceUpdatedV2 p90", + "range": true, + "refId": "H", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_fork_choice_updated_v2{job=\"$job\", quantile=\"0.95\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_forkchoiceUpdatedV2 p95", + "range": true, + "refId": "I", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_fork_choice_updated_v2{job=\"$job\", quantile=\"0.99\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_forkchoiceUpdatedV2 p99", + "range": true, + "refId": "J", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_fork_choice_updated_v3{job=\"$job\", quantile=\"0\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_forkchoiceUpdatedV3 min", + "range": true, + "refId": "K", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_fork_choice_updated_v3{job=\"$job\", quantile=\"0.5\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_forkchoiceUpdatedV3 p50", + "range": true, + "refId": "L", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_fork_choice_updated_v3{job=\"$job\", quantile=\"0.9\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_forkchoiceUpdatedV3 p90", + "range": true, + "refId": "M", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_fork_choice_updated_v3{job=\"$job\", quantile=\"0.95\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_forkchoiceUpdatedV3 p95", + "range": true, + "refId": "N", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_fork_choice_updated_v3{job=\"$job\", quantile=\"0.99\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_forkchoiceUpdatedV3 p99", + "range": true, + "refId": "O", + "useBackend": false + } + ], + "title": "Engine API forkchoiceUpdated Latency", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Latency histogram for the engine_newPayload RPC API", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { "mode": "absolute", "steps": [ { "color": "green", "value": null + }, + { + "color": "red", + "value": 80 } ] }, "unit": "s" }, - "overrides": [] + "overrides": [ + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] }, "gridPos": { "h": 8, "w": 12, - "x": 0, - "y": 29 + "x": 12, + "y": 21 }, - "id": 40, + "id": 210, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", - "showLegend": false + "showLegend": true }, "tooltip": { "hideZeros": false, @@ -1353,1298 +1460,402 @@ "sort": "none" } }, - "pluginVersion": "11.5.3", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, - "editorMode": "code", - "exemplar": false, - "expr": "avg(rate(reth_database_transaction_close_duration_seconds_sum{instance=~\"$instance\", outcome=\"commit\"}[$__rate_interval]) / rate(reth_database_transaction_close_duration_seconds_count{instance=~\"$instance\", outcome=\"commit\"}[$__rate_interval]) >= 0)", - "format": "time_series", - "instant": false, - "legendFormat": "Commit time", + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_new_payload_v1{job=\"$job\", quantile=\"0\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "legendFormat": "engine_newPayloadV1 min", "range": true, - "refId": "A" - } - ], - "title": "Average commit time", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "", - "fieldConfig": { - "defaults": { - "custom": { - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "scaleDistribution": { - "type": "linear" - } - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 29 - }, - "id": 42, - "maxDataPoints": 25, - "options": { - "calculate": false, - "cellGap": 1, - "cellValues": { - "unit": "s" - }, - "color": { - "exponent": 0.2, - "fill": "dark-orange", - "min": 0, - "mode": "opacity", - "reverse": false, - "scale": "exponential", - "scheme": "Oranges", - "steps": 128 - }, - "exemplars": { - "color": "rgba(255,0,255,0.7)" - }, - "filterValues": { - "le": 1e-9 - }, - "legend": { - "show": true - }, - "rowsFrame": { - "layout": "auto", - "value": "Commit time" - }, - "tooltip": { - "mode": "single", - "showColorScale": false, - "yHistogram": false - }, - "yAxis": { - "axisLabel": "Quantile", - "axisPlacement": "left", - "reverse": false, - "unit": "percentunit" - } - }, - "pluginVersion": "11.5.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "avg(max_over_time(reth_database_transaction_close_duration_seconds{instance=~\"$instance\", outcome=\"commit\"}[$__rate_interval])) by (quantile)", - "format": "time_series", - "instant": false, - "legendFormat": "{{quantile}}", - "range": true, - "refId": "A" - } - ], - "title": "Commit time heatmap", - "type": "heatmap" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "The average time a database transaction was open.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic", - "seriesBy": "last" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "points", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - } - ] - }, - "unit": "s" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 37 - }, - "id": 117, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.5.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "sum(rate(reth_database_transaction_open_duration_seconds_sum{instance=~\"$instance\", outcome!=\"\"}[$__rate_interval]) / rate(reth_database_transaction_open_duration_seconds_count{instance=~\"$instance\", outcome!=\"\"}[$__rate_interval])) by (outcome, mode)", - "format": "time_series", - "instant": false, - "legendFormat": "{{mode}}, {{outcome}}", - "range": true, - "refId": "A" - } - ], - "title": "Average transaction open time", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "The maximum time the database transaction was open.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "points", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - } - ] - }, - "unit": "s" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 37 - }, - "id": 116, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.5.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "max(max_over_time(reth_database_transaction_open_duration_seconds{instance=~\"$instance\", outcome!=\"\", quantile=\"1\"}[$__interval])) by (outcome, mode)", - "format": "time_series", - "instant": false, - "legendFormat": "{{mode}}, {{outcome}}", - "range": true, - "refId": "A" - } - ], - "title": "Max transaction open time", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "txs", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineStyle": { - "fill": "solid" - }, - "lineWidth": 3, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "none" - }, - "overrides": [ - { - "matcher": { - "id": "byFrameRefID", - "options": "Diff(opened-closed)" - }, - "properties": [ - { - "id": "custom.axisPlacement", - "value": "right" - }, - { - "id": "custom.lineStyle", - "value": { - "dash": [ - 0, - 10 - ], - "fill": "dot" - } - }, - { - "id": "custom.axisLabel", - "value": "diff" - } - ] - } - ] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 45 - }, - "id": 119, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.5.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "code", - "exemplar": false, - "expr": "sum(reth_database_transaction_opened_total{instance=~\"$instance\", mode=\"read-write\"})", - "format": "time_series", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Opened", - "range": true, - "refId": "A", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "sum(reth_database_transaction_closed_total{instance=~\"$instance\", mode=\"read-write\"})", - "format": "time_series", - "instant": false, - "legendFormat": "Closed {{mode}}", - "range": true, - "refId": "B" - }, - { - "datasource": { - "type": "__expr__", - "uid": "${DS_EXPRESSION}" - }, - "expression": "${A} - ${B}", - "hide": false, - "refId": "Diff(opened-closed)", - "type": "math" - } - ], - "title": "Number of read-write transactions", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "txs", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineStyle": { - "fill": "solid" - }, - "lineWidth": 3, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "none" - }, - "overrides": [ - { - "matcher": { - "id": "byFrameRefID", - "options": "Diff(opened, closed)" - }, - "properties": [ - { - "id": "custom.axisPlacement", - "value": "right" - }, - { - "id": "custom.lineStyle", - "value": { - "dash": [ - 0, - 10 - ], - "fill": "dot" - } - }, - { - "id": "custom.axisLabel", - "value": "diff" - } - ] - } - ] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 45 - }, - "id": 250, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.5.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "exemplar": false, - "expr": "reth_database_transaction_opened_total{instance=~\"$instance\", mode=\"read-only\"}", - "format": "time_series", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Opened", - "range": true, - "refId": "A", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "exemplar": false, - "expr": "sum(reth_database_transaction_closed_total{instance=~\"$instance\", mode=\"read-only\"})", - "format": "time_series", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Closed {{mode}}", - "range": true, - "refId": "B", - "useBackend": false - }, - { - "datasource": { - "type": "__expr__", - "uid": "${DS_EXPRESSION}" - }, - "expression": "${A} - ${B}", - "hide": false, - "refId": "Diff(opened, closed)", - "type": "math" - } - ], - "title": "Number of read-only transactions", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "The size of tables in the database", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - } - }, - "mappings": [], - "unit": "bytes" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 53 - }, - "id": 48, - "options": { - "displayLabels": [ - "name" - ], - "legend": { - "displayMode": "table", - "placement": "right", - "showLegend": true, - "values": [ - "value" - ] - }, - "pieType": "pie", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.5.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "builder", - "expr": "reth_db_table_size{instance=~\"$instance\"}", - "interval": "", - "legendFormat": "{{table}}", - "range": true, - "refId": "A" - } - ], - "title": "Database tables", - "type": "piechart" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "The maximum time the database transaction operation which inserts a large value took.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "points", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "s" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 53 - }, - "id": 118, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true + "refId": "A", + "useBackend": false }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.5.3", - "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, - "editorMode": "code", - "exemplar": false, - "expr": "max(max_over_time(reth_database_operation_large_value_duration_seconds{instance=~\"$instance\", quantile=\"1\"}[$__interval]) > 0) by (table)", - "format": "time_series", - "instant": false, - "legendFormat": "{{table}}", + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_new_payload_v1{job=\"$job\", quantile=\"0.5\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_newPayloadV1 p50", "range": true, - "refId": "A" - } - ], - "title": "Max insertion operation time", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "The type of the pages in the database:\n\n- **Leaf** pages contain KV pairs.\n- **Branch** pages contain information about keys in the leaf pages\n- **Overflow** pages store large values and should generally be avoided if possible", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" }, - "custom": { - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - } + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_new_payload_v1{job=\"$job\", quantile=\"0.9\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_newPayloadV1 p90", + "range": true, + "refId": "C", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - "mappings": [], - "unit": "short" + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_new_payload_v1{job=\"$job\", quantile=\"0.95\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_newPayloadV1 p95", + "range": true, + "refId": "D", + "useBackend": false }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 61 - }, - "id": 50, - "options": { - "legend": { - "displayMode": "table", - "placement": "right", - "showLegend": true, - "values": [ - "value" - ] + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_new_payload_v1{job=\"$job\", quantile=\"0.99\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_newPayloadV1 p99", + "range": true, + "refId": "E", + "useBackend": false }, - "pieType": "pie", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_new_payload_v2{job=\"$job\", quantile=\"0\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_newPayloadV2 min", + "range": true, + "refId": "F", + "useBackend": false }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.5.3", - "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, + "disableTextWrap": false, "editorMode": "builder", - "expr": "sum by (type) ( reth_db_table_pages{instance=~\"$instance\"} )", - "legendFormat": "__auto", + "expr": "reth_engine_rpc_new_payload_v2{job=\"$job\", quantile=\"0.5\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_newPayloadV2 p50", "range": true, - "refId": "A" - } - ], - "title": "Database pages", - "type": "piechart" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "The size of the database over time", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } + "refId": "G", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - "decimals": 4, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_new_payload_v2{job=\"$job\", quantile=\"0.9\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_newPayloadV2 p90", + "range": true, + "refId": "H", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" }, - "unit": "bytes" + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_new_payload_v2{job=\"$job\", quantile=\"0.95\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_newPayloadV2 p95", + "range": true, + "refId": "I", + "useBackend": false }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 61 - }, - "id": 52, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_new_payload_v2{job=\"$job\", quantile=\"0.99\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_newPayloadV2 p99", + "range": true, + "refId": "J", + "useBackend": false }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.5.3", - "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, - "editorMode": "code", - "expr": "sum by (job) ( reth_db_table_size{instance=~\"$instance\"} )", - "legendFormat": "Size ({{job}})", + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_new_payload_v3{job=\"$job\", quantile=\"0\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_newPayloadV3 min", "range": true, - "refId": "A" - } - ], - "title": "Database growth", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "The number of pages on the MDBX freelist", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "none" + "refId": "K", + "useBackend": false }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 69 - }, - "id": 113, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_new_payload_v3{job=\"$job\", quantile=\"0.5\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_newPayloadV3 p50", + "range": true, + "refId": "L", + "useBackend": false }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.5.3", - "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, - "editorMode": "code", - "expr": "sum(reth_db_freelist{instance=~\"$instance\"}) by (job)", - "legendFormat": "Pages ({{job}})", + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_new_payload_v3{job=\"$job\", quantile=\"0.9\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_newPayloadV3 p90", "range": true, - "refId": "A" - } - ], - "title": "Freelist", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "custom": { - "align": "left", - "cellOptions": { - "type": "auto" - }, - "inspect": false - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - } + "refId": "M", + "useBackend": false }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "Time" - }, - "properties": [ - { - "id": "custom.hidden", - "value": true - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "__name__" - }, - "properties": [ - { - "id": "custom.hidden", - "value": true - } - ] + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "matcher": { - "id": "byName", - "options": "instance" - }, - "properties": [ - { - "id": "custom.hidden", - "value": true - } - ] + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_new_payload_v3{job=\"$job\", quantile=\"0.95\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_newPayloadV3 p95", + "range": true, + "refId": "N", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" }, - { - "matcher": { - "id": "byName", - "options": "job" - }, - "properties": [ - { - "id": "custom.hidden", - "value": true - } - ] + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_new_payload_v3{job=\"$job\", quantile=\"0.99\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_newPayloadV3 p99", + "range": true, + "refId": "O", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "matcher": { - "id": "byName", - "options": "type" - }, - "properties": [ - { - "id": "custom.hidden", - "value": true - } - ] + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_new_payload_v4{job=\"$job\", quantile=\"0\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_newPayloadV4 min", + "range": true, + "refId": "P", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" }, - { - "matcher": { - "id": "byName", - "options": "Value" - }, - "properties": [ - { - "id": "unit", - "value": "locale" - }, - { - "id": "displayName", - "value": "Overflow pages" - } - ] + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_new_payload_v4{job=\"$job\", quantile=\"0.5\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_newPayloadV4 p50", + "range": true, + "refId": "Q", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "matcher": { - "id": "byName", - "options": "table" - }, - "properties": [ - { - "id": "displayName", - "value": "Table" - } - ] - } - ] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 69 - }, - "id": 58, - "options": { - "cellHeight": "sm", - "footer": { - "countRows": false, - "fields": "", - "reducer": [ - "sum" - ], - "show": false + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_new_payload_v4{job=\"$job\", quantile=\"0.9\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_newPayloadV4 p90", + "range": true, + "refId": "R", + "useBackend": false }, - "showHeader": true - }, - "pluginVersion": "11.5.3", - "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, - "editorMode": "code", - "exemplar": false, - "expr": "sort_desc(reth_db_table_pages{instance=~\"$instance\", type=\"overflow\"} != 0)", - "format": "table", - "instant": true, - "legendFormat": "__auto", - "range": false, - "refId": "A" + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_new_payload_v4{job=\"$job\", quantile=\"0.95\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_newPayloadV4 p95", + "range": true, + "refId": "S", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_new_payload_v4{job=\"$job\", quantile=\"0.99\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_newPayloadV4 p99", + "range": true, + "refId": "T", + "useBackend": false } ], - "title": "Overflow pages by table", - "type": "table" - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 77 - }, - "id": 203, - "panels": [], - "title": "Static Files", - "type": "row" + "title": "Engine API newPayload Latency", + "type": "timeseries" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, - "description": "The size of segments in the static files", + "description": "The throughput of the node's executor. The metric is the amount of gas processed in a block, divided by the time it took to process the block.\n\nNote: For mainnet, the block range 2,383,397-2,620,384 will be slow because of the 2016 DoS attack.", "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" } }, "mappings": [], - "unit": "bytes" + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "si: gas/s" }, "overrides": [] }, "gridPos": { "h": 8, - "w": 8, + "w": 12, "x": 0, - "y": 78 + "y": 29 }, - "id": 202, + "id": 56, "options": { - "displayLabels": [ - "name" - ], "legend": { - "displayMode": "table", - "placement": "right", - "showLegend": true, - "values": [ - "value" - ] - }, - "pieType": "pie", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, "tooltip": { "hideZeros": false, @@ -2652,23 +1863,118 @@ "sort": "none" } }, - "pluginVersion": "11.5.3", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "reth_sync_execution_gas_per_second{job=\"$job\"}", + "legendFormat": "Gas/s", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "avg_over_time(reth_sync_execution_gas_per_second{job=\"$job\"}[1m])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "Avg Gas/s (1m)", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "avg_over_time(reth_sync_execution_gas_per_second{job=\"$job\"}[5m])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "Avg Gas/s (5m)", + "range": true, + "refId": "C", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "avg_over_time(reth_sync_execution_gas_per_second{job=\"$job\"}[10m])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "Avg Gas/s (10m)", + "range": true, + "refId": "D", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "avg_over_time(reth_sync_execution_gas_per_second{job=\"$job\"}[30m])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "Avg Gas/s (30m)", + "range": true, + "refId": "E", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "avg_over_time(reth_sync_execution_gas_per_second{job=\"$job\"}[1h])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "Avg Gas/s (1h)", + "range": true, + "refId": "F", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - "editorMode": "code", - "expr": "reth_static_files_segment_size{instance=~\"$instance\"}", - "interval": "", - "legendFormat": "{{segment}}", + "disableTextWrap": false, + "editorMode": "builder", + "expr": "avg_over_time(reth_sync_execution_gas_per_second{job=\"$job\"}[24h])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "Avg Gas/s (24h)", "range": true, - "refId": "A" + "refId": "G", + "useBackend": false } ], - "title": "Segments size", - "type": "piechart" + "title": "Execution throughput", + "type": "timeseries" }, { "datasource": { @@ -2678,170 +1984,183 @@ "fieldConfig": { "defaults": { "color": { - "mode": "thresholds" + "mode": "palette-classic" }, "custom": { - "align": "left", - "cellOptions": { - "type": "auto" + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false }, - "inspect": false + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", "value": 80 } ] - } - }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "Value" - }, - "properties": [ - { - "id": "unit", - "value": "locale" - }, - { - "id": "displayName", - "value": "Entries" - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "segment" - }, - "properties": [ - { - "id": "displayName", - "value": "Segment" - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "Time" - }, - "properties": [ - { - "id": "custom.hidden", - "value": true - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "instance" - }, - "properties": [ - { - "id": "custom.hidden", - "value": true - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "job" - }, - "properties": [ - { - "id": "custom.hidden", - "value": true - } - ] }, - { - "matcher": { - "id": "byName", - "options": "__name__" - }, - "properties": [ - { - "id": "custom.hidden", - "value": true - } - ] - } - ] + "unit": "s" + }, + "overrides": [] }, "gridPos": { "h": 8, - "w": 8, - "x": 8, - "y": 78 + "w": 12, + "x": 12, + "y": 29 }, - "id": 204, + "id": 240, "options": { - "cellHeight": "sm", - "footer": { - "countRows": false, - "fields": "", - "reducer": [ - "sum" - ], - "show": false + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, - "showHeader": true + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } }, - "pluginVersion": "11.5.3", + "pluginVersion": "11.5.1", "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_sync_block_validation_state_root_duration{job=\"$job\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "State Root Duration", + "range": true, + "refId": "A", + "useBackend": false + }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, - "editorMode": "code", - "exemplar": false, - "expr": "reth_static_files_segment_entries{instance=~\"$instance\"}", - "format": "table", - "instant": true, - "legendFormat": "__auto", - "range": false, - "refId": "A" + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_sync_execution_execution_duration{job=\"$job\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Execution Duration", + "range": true, + "refId": "B", + "useBackend": false } ], - "title": "Entries per segment", - "type": "table" + "title": "Block Processing Latency", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 37 + }, + "id": 87, + "panels": [], + "repeat": "instance", + "title": "Engine API", + "type": "row" }, { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, + "description": "Engine API messages received by the CL, either engine_newPayload or engine_forkchoiceUpdated", "fieldConfig": { "defaults": { "color": { - "mode": "thresholds" + "mode": "palette-classic" }, "custom": { - "align": "left", - "cellOptions": { - "type": "auto" + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false }, - "inspect": false + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -2850,105 +2169,29 @@ ] } }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "Value" - }, - "properties": [ - { - "id": "unit", - "value": "locale" - }, - { - "id": "displayName", - "value": "Files" - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "segment" - }, - "properties": [ - { - "id": "displayName", - "value": "Segment" - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "Time" - }, - "properties": [ - { - "id": "custom.hidden", - "value": true - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "instance" - }, - "properties": [ - { - "id": "custom.hidden", - "value": true - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "job" - }, - "properties": [ - { - "id": "custom.hidden", - "value": true - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "__name__" - }, - "properties": [ - { - "id": "custom.hidden", - "value": true - } - ] - } - ] + "overrides": [] }, "gridPos": { "h": 8, - "w": 8, - "x": 16, - "y": 78 + "w": 12, + "x": 0, + "y": 38 }, - "id": 205, + "id": 84, "options": { - "cellHeight": "sm", - "footer": { - "countRows": false, - "fields": "", - "reducer": [ - "sum" - ], - "show": false + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, - "showHeader": true + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } }, - "pluginVersion": "11.5.3", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { @@ -2956,24 +2199,33 @@ "uid": "${datasource}" }, "editorMode": "code", - "exemplar": false, - "expr": "reth_static_files_segment_files{instance=~\"$instance\"}", - "format": "table", - "instant": true, - "legendFormat": "__auto", - "range": false, + "expr": "rate(reth_consensus_engine_beacon_forkchoice_updated_messages{job=\"$job\"}[$__rate_interval])", + "legendFormat": "forkchoiceUpdated", + "range": true, "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "rate(reth_consensus_engine_beacon_new_payload_messages{job=\"$job\"}[$__rate_interval])", + "hide": false, + "legendFormat": "newPayload", + "range": true, + "refId": "B" } ], - "title": "Files per segment", - "type": "table" + "title": "Engine API messages", + "type": "timeseries" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, - "description": "The size of the static files over time", + "description": "Counts the number of failed response deliveries due to client request termination.", "fieldConfig": { "defaults": { "color": { @@ -3002,7 +2254,7 @@ "scaleDistribution": { "type": "linear" }, - "showPoints": "auto", + "showPoints": "never", "spanNulls": false, "stacking": { "group": "A", @@ -3017,7 +2269,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -3025,17 +2278,17 @@ } ] }, - "unit": "bytes" + "unit": "none" }, "overrides": [] }, "gridPos": { "h": 8, "w": 12, - "x": 0, - "y": 86 + "x": 12, + "y": 38 }, - "id": 206, + "id": 249, "options": { "legend": { "calcs": [], @@ -3049,29 +2302,40 @@ "sort": "none" } }, - "pluginVersion": "11.5.3", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "sum by (job) ( reth_static_files_segment_size{instance=~\"$instance\"} )", - "legendFormat": "__auto", + "expr": "rate(reth_consensus_engine_beacon_failed_new_payload_response_deliveries{job=\"$job\"}[$__rate_interval])", + "legendFormat": "newPayload", "range": true, "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "rate(reth_consensus_engine_beacon_failed_forkchoice_updated_response_deliveries{job=\"$job\"}[$__rate_interval])", + "legendFormat": "forkchoiceUpdated", + "range": true, + "refId": "B" } ], - "title": "Static Files growth", + "title": "Failed Engine API Response Deliveries", "type": "timeseries" }, { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, - "description": "The maximum time the static files operation which commits a writer took.", + "description": "Latency histogram for the engine_newPayload to engine_forkchoiceUpdated", "fieldConfig": { "defaults": { "color": { @@ -3085,7 +2349,7 @@ "axisPlacement": "auto", "barAlignment": 0, "barWidthFactor": 0.6, - "drawStyle": "points", + "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { @@ -3100,7 +2364,7 @@ "scaleDistribution": { "type": "linear" }, - "showPoints": "auto", + "showPoints": "never", "spanNulls": false, "stacking": { "group": "A", @@ -3115,7 +2379,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -3130,10 +2395,10 @@ "gridPos": { "h": 8, "w": 12, - "x": 12, - "y": 86 + "x": 0, + "y": 46 }, - "id": 207, + "id": 213, "options": { "legend": { "calcs": [], @@ -3147,7 +2412,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.3", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { @@ -3155,35 +2420,21 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "max(max_over_time(reth_static_files_jar_provider_write_duration_seconds{instance=~\"$instance\", operation=\"commit-writer\", quantile=\"1\"}[$__interval]) > 0) by (segment)", - "legendFormat": "{{segment}}", + "expr": "reth_engine_rpc_new_payload_forkchoice_updated_time_diff{job=\"$job\"}", + "legendFormat": "p{{quantile}}", "range": true, "refId": "A" } ], - "title": "Max writer commit time", + "title": "Engine API latency between forkchoiceUpdated and newPayload", "type": "timeseries" }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 94 - }, - "id": 46, - "panels": [], - "repeat": "instance", - "title": "Execution", - "type": "row" - }, { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, - "description": "The throughput of the node's executor. The metric is the amount of gas processed in a block, divided by the time it took to process the block.\n\nNote: For mainnet, the block range 2,383,397-2,620,384 will be slow because of the 2016 DoS attack.", + "description": "Latency histograms for the engine_getPayloadBodiesByHashV1 and engine_getPayloadBodiesByRangeV1 RPC APIs", "fieldConfig": { "defaults": { "color": { @@ -3227,21 +2478,26 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 } ] }, - "unit": "si: gas/s" + "unit": "s" }, "overrides": [] }, "gridPos": { "h": 8, - "w": 24, - "x": 0, - "y": 95 + "w": 12, + "x": 12, + "y": 46 }, - "id": 56, + "id": 212, "options": { "legend": { "calcs": [], @@ -3255,18 +2511,39 @@ "sort": "none" } }, - "pluginVersion": "11.5.3", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, - "editorMode": "code", - "expr": "reth_sync_execution_gas_per_second{instance=~\"$instance\"}", - "legendFormat": "Gas/s", + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_get_payload_bodies_by_hash_v1{job=\"$job\", quantile=\"0\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_getPayloadBodiesByHashV1 min", "range": true, - "refId": "A" + "refId": "O", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_get_payload_bodies_by_hash_v1{job=\"$job\", quantile=\"0.5\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_getPayloadBodiesByHashV1 p50", + "range": true, + "refId": "A", + "useBackend": false }, { "datasource": { @@ -3275,11 +2552,11 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "avg_over_time(reth_sync_execution_gas_per_second{instance=~\"$instance\"}[1m])", + "expr": "reth_engine_rpc_get_payload_bodies_by_hash_v1{job=\"$job\", quantile=\"0.9\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, - "legendFormat": "Avg Gas/s (1m)", + "legendFormat": "engine_getPayloadBodiesByHashV1 p90", "range": true, "refId": "B", "useBackend": false @@ -3287,17 +2564,49 @@ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_get_payload_bodies_by_hash_v1{job=\"$job\", quantile=\"0.95\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_getPayloadBodiesByHashV1 p95", + "range": true, + "refId": "C", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_get_payload_bodies_by_hash_v1{job=\"$job\", quantile=\"0.99\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_getPayloadBodiesByHashV1 p99", + "range": true, + "refId": "D", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "disableTextWrap": false, "editorMode": "builder", - "expr": "avg_over_time(reth_sync_execution_gas_per_second{instance=~\"$instance\"}[5m])", + "expr": "reth_engine_rpc_get_payload_bodies_by_range_v1{job=\"$job\", quantile=\"0\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, - "legendFormat": "Avg Gas/s (5m)", + "legendFormat": "engine_getPayloadBodiesByRangeV1 min", "range": true, - "refId": "C", + "refId": "E", "useBackend": false }, { @@ -3307,29 +2616,29 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "avg_over_time(reth_sync_execution_gas_per_second{instance=~\"$instance\"}[10m])", + "expr": "reth_engine_rpc_get_payload_bodies_by_range_v1{job=\"$job\", quantile=\"0.5\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, - "legendFormat": "Avg Gas/s (10m)", + "legendFormat": "engine_getPayloadBodiesByRangeV1 p50", "range": true, - "refId": "D", + "refId": "F", "useBackend": false }, { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, "disableTextWrap": false, "editorMode": "builder", - "expr": "avg_over_time(reth_sync_execution_gas_per_second{instance=~\"$instance\"}[30m])", + "expr": "reth_engine_rpc_get_payload_bodies_by_range_v1{job=\"$job\", quantile=\"0.9\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, - "legendFormat": "Avg Gas/s (30m)", + "legendFormat": "engine_getPayloadBodiesByRangeV1 p90", "range": true, - "refId": "E", + "refId": "G", "useBackend": false }, { @@ -3339,33 +2648,33 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "avg_over_time(reth_sync_execution_gas_per_second{instance=~\"$instance\"}[1h])", + "expr": "reth_engine_rpc_get_payload_bodies_by_range_v1{job=\"$job\", quantile=\"0.95\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, - "legendFormat": "Avg Gas/s (1h)", + "legendFormat": "engine_getPayloadBodiesByRangeV1 p95", "range": true, - "refId": "F", + "refId": "H", "useBackend": false }, { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, "disableTextWrap": false, "editorMode": "builder", - "expr": "avg_over_time(reth_sync_execution_gas_per_second{instance=~\"$instance\"}[24h])", + "expr": "reth_engine_rpc_get_payload_bodies_by_range_v1{job=\"$job\", quantile=\"0.99\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, - "legendFormat": "Avg Gas/s (24h)", + "legendFormat": "engine_getPayloadBodiesByRangeV1 p99", "range": true, - "refId": "G", + "refId": "I", "useBackend": false } ], - "title": "Execution throughput", + "title": "Engine API getPayloadBodies Latency", "type": "timeseries" }, { @@ -3387,7 +2696,7 @@ "barAlignment": 0, "barWidthFactor": 0.6, "drawStyle": "line", - "fillOpacity": 25, + "fillOpacity": 10, "gradientMode": "none", "hideFrom": { "legend": false, @@ -3401,11 +2710,11 @@ "scaleDistribution": { "type": "linear" }, - "showPoints": "auto", + "showPoints": "never", "spanNulls": false, "stacking": { "group": "A", - "mode": "percent" + "mode": "none" }, "thresholdsStyle": { "mode": "off" @@ -3416,31 +2725,28 @@ "mode": "absolute", "steps": [ { - "color": "green" - }, - { - "color": "red", - "value": 80 + "color": "green", + "value": null } ] }, - "unit": "s" + "unit": "none" }, "overrides": [] }, "gridPos": { - "h": 11, - "w": 24, + "h": 8, + "w": 12, "x": 0, - "y": 103 + "y": 54 }, - "id": 240, + "id": 1000, "options": { "legend": { "calcs": [], - "displayMode": "hidden", - "placement": "right", - "showLegend": false + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, "tooltip": { "hideZeros": false, @@ -3448,49 +2754,39 @@ "sort": "none" } }, - "pluginVersion": "11.5.3", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_sync_block_validation_state_root_duration{instance=\"$instance\"}", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "State Root Duration", + "editorMode": "code", + "expr": "rate(reth_engine_rpc_blobs_blob_count{job=\"$job\"}[$__rate_interval])", + "legendFormat": "Found", "range": true, - "refId": "A", - "useBackend": false + "refId": "A" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_sync_execution_execution_duration{instance=\"$instance\"}", - "fullMetaSearch": false, + "editorMode": "code", + "expr": "rate(reth_engine_rpc_blobs_blob_misses{job=\"$job\"}[$__rate_interval])", "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Execution Duration", + "legendFormat": "Missed", "range": true, - "refId": "B", - "useBackend": false + "refId": "B" } ], - "title": "Block Processing Latency", + "title": "Blob Count and Misses", "type": "timeseries" }, { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -3535,7 +2831,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -3543,23 +2840,23 @@ } ] }, - "unit": "percentunit" + "unit": "s" }, "overrides": [] }, "gridPos": { - "h": 11, - "w": 24, - "x": 0, - "y": 114 + "h": 8, + "w": 12, + "x": 12, + "y": 54 }, - "id": 251, + "id": 258, "options": { "legend": { "calcs": [], - "displayMode": "hidden", - "placement": "right", - "showLegend": false + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, "tooltip": { "hideZeros": false, @@ -3567,7 +2864,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.3", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { @@ -3575,13 +2872,12 @@ "uid": "${datasource}" }, "disableTextWrap": false, - "editorMode": "code", - "expr": "reth_sync_caching_account_cache_hits{instance=\"$instance\"} / (reth_sync_caching_account_cache_hits{instance=\"$instance\"} + reth_sync_caching_account_cache_misses{instance=\"$instance\"})", + "editorMode": "builder", + "expr": "reth_engine_rpc_get_blobs_v1{job=\"$job\", quantile=\"0.5\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, - "instant": false, - "legendFormat": "Account cache hits", + "legendFormat": "engine_getBlobsV1 p50", "range": true, "refId": "A", "useBackend": false @@ -3589,16 +2885,15 @@ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, "disableTextWrap": false, - "editorMode": "code", - "expr": "reth_sync_caching_storage_cache_hits{instance=\"$instance\"} / (reth_sync_caching_storage_cache_hits{instance=\"$instance\"} + reth_sync_caching_storage_cache_misses{instance=\"$instance\"})", + "editorMode": "builder", + "expr": "reth_engine_rpc_get_blobs_v1{job=\"$job\", quantile=\"0.95\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, - "instant": false, - "legendFormat": "Storage cache hits", + "legendFormat": "engine_getBlobsV1 p95", "range": true, "refId": "B", "useBackend": false @@ -3609,27 +2904,58 @@ "uid": "${datasource}" }, "disableTextWrap": false, - "editorMode": "code", - "expr": "reth_sync_caching_code_cache_hits{instance=\"$instance\"} / (reth_sync_caching_code_cache_hits{instance=\"$instance\"} + reth_sync_caching_code_cache_misses{instance=\"$instance\"})", + "editorMode": "builder", + "expr": "reth_engine_rpc_get_blobs_v1{job=\"$job\", quantile=\"0.99\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, - "instant": false, - "legendFormat": "Code cache hits", + "legendFormat": "engine_getBlobsV1 p99", "range": true, "refId": "C", "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_get_blobs_v1{job=\"$job\", quantile=\"0\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_getBlobsV1 min", + "range": true, + "refId": "D", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_get_blobs_v1{job=\"$job\", quantile=\"1\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_getBlobsV1 max", + "range": true, + "refId": "E", + "useBackend": false } ], - "title": "Execution cache hitrate", + "title": "Engine API getBlobs Latency", "type": "timeseries" }, { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, - "description": "The time it takes for operations that are part of block validation, but not execution or state root, to complete.", + "description": "Total pipeline runs triggered by the sync controller", "fieldConfig": { "defaults": { "color": { @@ -3673,25 +2999,25 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", "value": 80 } ] - }, - "unit": "s" + } }, "overrides": [] }, "gridPos": { - "h": 11, - "w": 24, + "h": 8, + "w": 12, "x": 0, - "y": 125 + "y": 62 }, - "id": 252, + "id": 85, "options": { "legend": { "calcs": [], @@ -3705,48 +3031,29 @@ "sort": "none" } }, - "pluginVersion": "11.5.3", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, - "disableTextWrap": false, - "editorMode": "code", - "expr": "reth_sync_block_validation_trie_input_duration{instance=\"$instance\", quantile=~\"(0|0.5|0.9|0.95|1)\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Trie input creation duration p{{quantile}}", + "editorMode": "builder", + "expr": "reth_consensus_engine_beacon_pipeline_runs{job=\"$job\"}", + "legendFormat": "Pipeline runs", "range": true, - "refId": "A", - "useBackend": false + "refId": "A" } ], - "title": "Block validation overhead", + "title": "Pipeline runs", "type": "timeseries" }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 136 - }, - "id": 24, - "panels": [], - "repeat": "instance", - "title": "Downloader: Headers", - "type": "row" - }, { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, + "description": "", "fieldConfig": { "defaults": { "color": { @@ -3790,7 +3097,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -3799,40 +3107,15 @@ ] } }, - "overrides": [ - { - "matcher": { - "id": "byFrameRefID", - "options": "C" - }, - "properties": [ - { - "id": "custom.axisPlacement", - "value": "right" - } - ] - }, - { - "matcher": { - "id": "byFrameRefID", - "options": "D" - }, - "properties": [ - { - "id": "custom.axisPlacement", - "value": "right" - } - ] - } - ] + "overrides": [] }, "gridPos": { "h": 8, "w": 12, - "x": 0, - "y": 137 + "x": 12, + "y": 62 }, - "id": 26, + "id": 83, "options": { "legend": { "calcs": [], @@ -3842,70 +3125,46 @@ }, "tooltip": { "hideZeros": false, - "mode": "multi", + "mode": "single", "sort": "none" } }, - "pluginVersion": "11.5.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "builder", - "expr": "reth_downloaders_headers_total_downloaded{instance=~\"$instance\"}", - "legendFormat": "Downloaded", - "range": true, - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "builder", - "expr": "reth_downloaders_headers_total_flushed{instance=~\"$instance\"}", - "hide": false, - "legendFormat": "Flushed", - "range": true, - "refId": "B" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "builder", - "expr": "rate(reth_downloaders_headers_total_downloaded{instance=~\"$instance\"}[$__rate_interval])", - "hide": false, - "instant": false, - "legendFormat": "Downloaded/s", - "range": true, - "refId": "C" - }, + "pluginVersion": "11.5.1", + "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "builder", - "expr": "rate(reth_downloaders_headers_total_flushed{instance=~\"$instance\"}[$__rate_interval])", - "hide": false, - "legendFormat": "Flushed/s", + "expr": "reth_consensus_engine_beacon_active_block_downloads{job=\"$job\"}", + "legendFormat": "Active block downloads", "range": true, - "refId": "D" + "refId": "A" } ], - "title": "I/O", + "title": "Active block downloads", "type": "timeseries" }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 70 + }, + "id": 46, + "panels": [], + "repeat": "instance", + "title": "Execution", + "type": "row" + }, { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, - "description": "Internal errors in the header downloader. These are expected to happen from time to time.", "fieldConfig": { "defaults": { "color": { @@ -3920,7 +3179,7 @@ "barAlignment": 0, "barWidthFactor": 0.6, "drawStyle": "line", - "fillOpacity": 0, + "fillOpacity": 24, "gradientMode": "none", "hideFrom": { "legend": false, @@ -3938,7 +3197,7 @@ "spanNulls": false, "stacking": { "group": "A", - "mode": "none" + "mode": "percent" }, "thresholdsStyle": { "mode": "off" @@ -3949,7 +3208,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -3957,17 +3217,17 @@ } ] }, - "unit": "cps" + "unit": "s" }, "overrides": [] }, "gridPos": { "h": 8, "w": 12, - "x": 12, - "y": 137 + "x": 0, + "y": 71 }, - "id": 33, + "id": 1001, "options": { "legend": { "calcs": [], @@ -3981,45 +3241,43 @@ "sort": "none" } }, - "pluginVersion": "11.5.3", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, + "disableTextWrap": false, "editorMode": "builder", - "expr": "rate(reth_downloaders_headers_timeout_errors{instance=~\"$instance\"}[$__rate_interval])", - "legendFormat": "Request timed out", - "range": true, - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "builder", - "expr": "rate(reth_downloaders_headers_unexpected_errors{instance=~\"$instance\"}[$__rate_interval])", - "hide": false, - "legendFormat": "Unexpected error", + "expr": "reth_sync_block_validation_state_root_duration{job=\"$job\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "State Root Duration", "range": true, - "refId": "B" + "refId": "A", + "useBackend": false }, { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, + "disableTextWrap": false, "editorMode": "builder", - "expr": "rate(reth_downloaders_headers_validation_errors{instance=~\"$instance\"}[$__rate_interval])", + "expr": "reth_sync_execution_execution_duration{job=\"$job\"}", + "fullMetaSearch": false, "hide": false, - "legendFormat": "Invalid response", + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Execution Duration", "range": true, - "refId": "C" + "refId": "B", + "useBackend": false } ], - "title": "Errors", + "title": "Block Processing Latency", "type": "timeseries" }, { @@ -4027,7 +3285,6 @@ "type": "prometheus", "uid": "${datasource}" }, - "description": "The number of connected peers and in-progress requests for headers.", "fieldConfig": { "defaults": { "color": { @@ -4071,24 +3328,26 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", "value": 80 } ] - } + }, + "unit": "percentunit" }, "overrides": [] }, "gridPos": { "h": 8, "w": 12, - "x": 0, - "y": 145 + "x": 12, + "y": 71 }, - "id": 36, + "id": 251, "options": { "legend": { "calcs": [], @@ -4097,59 +3356,74 @@ "showLegend": true }, "tooltip": { - "mode": "multi", + "hideZeros": false, + "mode": "single", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, - "editorMode": "builder", - "expr": "reth_downloaders_headers_in_flight_requests{instance=~\"$instance\"}", - "legendFormat": "In flight requests", + "disableTextWrap": false, + "editorMode": "code", + "expr": "reth_sync_caching_account_cache_hits{job=\"$job\"} / (reth_sync_caching_account_cache_hits{job=\"$job\"} + reth_sync_caching_account_cache_misses{job=\"$job\"})", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Account cache hits", "range": true, - "refId": "A" + "refId": "A", + "useBackend": false }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, - "editorMode": "builder", - "expr": "reth_network_connected_peers{instance=~\"$instance\"}", + "disableTextWrap": false, + "editorMode": "code", + "expr": "reth_sync_caching_storage_cache_hits{job=\"$job\"} / (reth_sync_caching_storage_cache_hits{job=\"$job\"} + reth_sync_caching_storage_cache_misses{job=\"$job\"})", + "fullMetaSearch": false, "hide": false, - "legendFormat": "Connected peers", + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Storage cache hits", "range": true, - "refId": "B" + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "reth_sync_caching_code_cache_hits{job=\"$job\"} / (reth_sync_caching_code_cache_hits{job=\"$job\"} + reth_sync_caching_code_cache_misses{job=\"$job\"})", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Code cache hits", + "range": true, + "refId": "C", + "useBackend": false } ], - "title": "Requests", + "title": "Execution cache hitrate", "type": "timeseries" }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 153 - }, - "id": 32, - "panels": [], - "repeat": "instance", - "title": "Downloader: Bodies", - "type": "row" - }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, - "description": "The internal state of the headers downloader: the number of downloaded headers, and the number of headers sent to the header stage.", + "description": "The time it takes for operations that are part of block validation, but not execution or state root, to complete.", "fieldConfig": { "defaults": { "color": { @@ -4193,7 +3467,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -4201,157 +3476,71 @@ } ] }, - "unit": "locale" + "unit": "s" }, - "overrides": [ - { - "matcher": { - "id": "byFrameRefID", - "options": "C" - }, - "properties": [ - { - "id": "custom.axisPlacement", - "value": "right" - }, - { - "id": "unit", - "value": "ops" - } - ] - }, - { - "matcher": { - "id": "byFrameRefID", - "options": "D" - }, - "properties": [ - { - "id": "custom.axisPlacement", - "value": "right" - }, - { - "id": "unit", - "value": "ops" - } - ] - } - ] + "overrides": [] }, "gridPos": { "h": 8, "w": 12, "x": 0, - "y": 154 + "y": 79 }, - "id": 30, + "id": 252, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "multi", - "sort": "none" - } - }, - "pluginVersion": "11.4.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "builder", - "expr": "reth_downloaders_bodies_total_downloaded{instance=~\"$instance\"}", - "legendFormat": "Downloaded", - "range": true, - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "builder", - "expr": "reth_downloaders_bodies_total_flushed{instance=~\"$instance\"}", - "hide": false, - "legendFormat": "Flushed", - "range": true, - "refId": "B" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "builder", - "expr": "rate(reth_downloaders_bodies_total_flushed{instance=~\"$instance\"}[$__rate_interval])", - "hide": false, - "legendFormat": "Flushed/s", - "range": true, - "refId": "C" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "builder", - "expr": "rate(reth_downloaders_bodies_total_downloaded{instance=~\"$instance\"}[$__rate_interval])", - "hide": false, - "legendFormat": "Downloaded/s", - "range": true, - "refId": "D" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "builder", - "expr": "reth_downloaders_bodies_buffered_responses{instance=~\"$instance\"}", - "hide": false, - "legendFormat": "Buffered responses", - "range": true, - "refId": "E" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "builder", - "expr": "reth_downloaders_bodies_buffered_blocks{instance=~\"$instance\"}", - "hide": false, - "legendFormat": "Buffered blocks", - "range": true, - "refId": "F" + "showLegend": true }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.1", + "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, - "editorMode": "builder", - "expr": "reth_downloaders_bodies_queued_blocks{instance=~\"$instance\"}", + "disableTextWrap": false, + "editorMode": "code", + "expr": "reth_sync_block_validation_trie_input_duration{job=\"$job\", quantile=~\"(0|0.5|0.9|0.95|1)\"}", + "fullMetaSearch": false, "hide": false, - "legendFormat": "Queued blocks", + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Trie input creation duration p{{quantile}}", "range": true, - "refId": "G" + "refId": "A", + "useBackend": false } ], - "title": "I/O", + "title": "Block validation overhead", "type": "timeseries" }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 87 + }, + "id": 214, + "panels": [], + "title": "State Root Task", + "type": "row" + }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, - "description": "Internal errors in the bodies downloader. These are expected to happen from time to time.", "fieldConfig": { "defaults": { "color": { @@ -4391,26 +3580,29 @@ } }, "mappings": [], - "min": 0, "thresholds": { "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 } ] - }, - "unit": "cps" + } }, "overrides": [] }, "gridPos": { "h": 8, "w": 12, - "x": 12, - "y": 154 + "x": 0, + "y": 88 }, - "id": 28, + "id": 255, "options": { "legend": { "calcs": [], @@ -4419,49 +3611,27 @@ "showLegend": true }, "tooltip": { + "hideZeros": false, "mode": "single", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "builder", - "expr": "rate(reth_downloaders_bodies_timeout_errors{instance=~\"$instance\"}[$__rate_interval])", - "legendFormat": "Request timed out", - "range": true, - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "builder", - "expr": "rate(reth_downloaders_bodies_unexpected_errors{instance=~\"$instance\"}[$__rate_interval])", - "hide": false, - "legendFormat": "Unexpected error", - "range": true, - "refId": "B" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, - "editorMode": "builder", - "expr": "rate(reth_downloaders_bodies_validation_errors{instance=~\"$instance\"}[$__rate_interval])", - "hide": false, - "legendFormat": "Invalid response", + "editorMode": "code", + "expr": "reth_tree_root_proofs_processed_histogram{job=\"$job\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", + "instant": false, + "legendFormat": "{{quantile}} percentile", "range": true, - "refId": "C" + "refId": "Branch Nodes" } ], - "title": "Errors", + "title": "Proofs Processed", "type": "timeseries" }, { @@ -4469,7 +3639,6 @@ "type": "prometheus", "uid": "${datasource}" }, - "description": "The number of connected peers and in-progress requests for bodies.", "fieldConfig": { "defaults": { "color": { @@ -4513,24 +3682,26 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", "value": 80 } ] - } + }, + "unit": "s" }, "overrides": [] }, "gridPos": { "h": 8, "w": 12, - "x": 0, - "y": 162 + "x": 12, + "y": 88 }, - "id": 35, + "id": 254, "options": { "legend": { "calcs": [], @@ -4539,37 +3710,27 @@ "showLegend": true }, "tooltip": { - "mode": "multi", + "hideZeros": false, + "mode": "single", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "builder", - "expr": "reth_downloaders_bodies_in_flight_requests{instance=~\"$instance\"}", - "legendFormat": "In flight requests", - "range": true, - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, - "editorMode": "builder", - "expr": "reth_network_connected_peers{instance=~\"$instance\"}", - "hide": false, - "legendFormat": "Connected peers", + "editorMode": "code", + "expr": "reth_tree_root_proof_calculation_duration_histogram{job=\"$job\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", + "instant": false, + "legendFormat": "{{quantile}} percentile", "range": true, - "refId": "B" + "refId": "Branch Nodes" } ], - "title": "Requests", + "title": "Proof calculation duration", "type": "timeseries" }, { @@ -4577,7 +3738,6 @@ "type": "prometheus", "uid": "${datasource}" }, - "description": "The number of blocks and size in bytes of those blocks", "fieldConfig": { "defaults": { "color": { @@ -4621,7 +3781,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -4629,34 +3790,17 @@ } ] }, - "unit": "bytes" + "unit": "none" }, - "overrides": [ - { - "matcher": { - "id": "byFrameRefID", - "options": "B" - }, - "properties": [ - { - "id": "custom.axisPlacement", - "value": "right" - }, - { - "id": "unit", - "value": "blocks" - } - ] - } - ] + "overrides": [] }, "gridPos": { "h": 8, "w": 12, - "x": 12, - "y": 162 + "x": 0, + "y": 96 }, - "id": 73, + "id": 257, "options": { "legend": { "calcs": [], @@ -4665,38 +3809,27 @@ "showLegend": true }, "tooltip": { - "mode": "multi", + "hideZeros": false, + "mode": "single", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "builder", - "expr": "reth_downloaders_bodies_buffered_blocks_size_bytes{instance=~\"$instance\"}", - "hide": false, - "legendFormat": "Buffered blocks size ", - "range": true, - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, - "editorMode": "builder", - "expr": "reth_downloaders_bodies_buffered_blocks{instance=~\"$instance\"}", - "hide": false, - "legendFormat": "Buffered blocks", + "editorMode": "code", + "expr": "reth_tree_root_pending_multiproofs_histogram{job=\"$job\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", + "instant": false, + "legendFormat": "{{quantile}} percentile", "range": true, - "refId": "B" + "refId": "Branch Nodes" } ], - "title": "Downloader buffer", + "title": "Pending MultiProof requests", "type": "timeseries" }, { @@ -4704,7 +3837,6 @@ "type": "prometheus", "uid": "${datasource}" }, - "description": "The number of blocks in a request and size in bytes of those block responses", "fieldConfig": { "defaults": { "color": { @@ -4748,7 +3880,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -4756,34 +3889,17 @@ } ] }, - "unit": "bytes" + "unit": "none" }, - "overrides": [ - { - "matcher": { - "id": "byFrameRefID", - "options": "B" - }, - "properties": [ - { - "id": "custom.axisPlacement", - "value": "right" - }, - { - "id": "unit", - "value": "blocks" - } - ] - } - ] + "overrides": [] }, "gridPos": { "h": 8, "w": 12, - "x": 0, - "y": 170 + "x": 12, + "y": 96 }, - "id": 102, + "id": 256, "options": { "legend": { "calcs": [], @@ -4792,73 +3908,34 @@ "showLegend": true }, "tooltip": { - "mode": "multi", + "hideZeros": false, + "mode": "single", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "builder", - "expr": "reth_downloaders_bodies_response_response_size_bytes{instance=~\"$instance\"}", - "hide": false, - "legendFormat": "Response size", - "range": true, - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "builder", - "expr": "reth_downloaders_bodies_response_response_length{instance=~\"$instance\"}", - "hide": false, - "legendFormat": "Individual response length (number of bodies in response)", - "range": true, - "refId": "B" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, - "editorMode": "builder", - "expr": "reth_downloaders_bodies_response_response_size_bytes / reth_downloaders_bodies_response_response_length{instance=~\"$instance\"}", - "hide": false, + "editorMode": "code", + "expr": "reth_tree_root_inflight_multiproofs_histogram{job=\"$job\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", "instant": false, - "legendFormat": "Mean body size in response", - "range": true, - "refId": "C" - } - ], - "title": "Block body response sizes", - "type": "timeseries" - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 178 - }, - "id": 79, - "panels": [], - "repeat": "instance", - "title": "Blockchain Tree", - "type": "row" + "legendFormat": "{{quantile}} percentile", + "range": true, + "refId": "Branch Nodes" + } + ], + "title": "In-flight MultiProof requests", + "type": "timeseries" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, - "description": "The block number of the tip of the canonical chain from the blockchain tree.", "fieldConfig": { "defaults": { "color": { @@ -4902,14 +3979,16 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", "value": 80 } ] - } + }, + "unit": "none" }, "overrides": [] }, @@ -4917,9 +3996,9 @@ "h": 8, "w": 12, "x": 0, - "y": 179 + "y": 104 }, - "id": 74, + "id": 260, "options": { "legend": { "calcs": [], @@ -4928,26 +4007,27 @@ "showLegend": true }, "tooltip": { - "mode": "multi", + "hideZeros": false, + "mode": "single", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, - "editorMode": "builder", - "expr": "reth_blockchain_tree_canonical_chain_height{instance=~\"$instance\"}", - "hide": false, - "legendFormat": "Canonical chain height", + "editorMode": "code", + "expr": "reth_sparse_state_trie_multiproof_skipped_storage_nodes{job=\"$job\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", + "instant": false, + "legendFormat": "Storage {{quantile}} percentile", "range": true, - "refId": "B" + "refId": "Branch Nodes" } ], - "title": "Canonical chain height", + "title": "Redundant multiproof storage nodes", "type": "timeseries" }, { @@ -4955,7 +4035,6 @@ "type": "prometheus", "uid": "${datasource}" }, - "description": "Total number of blocks in the tree's block buffer", "fieldConfig": { "defaults": { "color": { @@ -4999,14 +4078,16 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", "value": 80 } ] - } + }, + "unit": "none" }, "overrides": [] }, @@ -5014,9 +4095,9 @@ "h": 8, "w": 12, "x": 12, - "y": 179 + "y": 104 }, - "id": 80, + "id": 259, "options": { "legend": { "calcs": [], @@ -5025,26 +4106,27 @@ "showLegend": true }, "tooltip": { - "mode": "multi", + "hideZeros": false, + "mode": "single", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, - "editorMode": "builder", - "expr": "reth_blockchain_tree_block_buffer_blocks{instance=~\"$instance\"}", - "hide": false, - "legendFormat": "Buffered blocks", + "editorMode": "code", + "expr": "reth_sparse_state_trie_multiproof_skipped_account_nodes{job=\"$job\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", + "instant": false, + "legendFormat": "Account {{quantile}} percentile", "range": true, - "refId": "B" + "refId": "Branch Nodes" } ], - "title": "Block buffer blocks", + "title": "Redundant multiproof account nodes", "type": "timeseries" }, { @@ -5052,7 +4134,6 @@ "type": "prometheus", "uid": "${datasource}" }, - "description": "Total number of sidechains in the blockchain tree", "fieldConfig": { "defaults": { "color": { @@ -5096,14 +4177,16 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", "value": 80 } ] - } + }, + "unit": "none" }, "overrides": [] }, @@ -5111,9 +4194,9 @@ "h": 8, "w": 12, "x": 0, - "y": 187 + "y": 112 }, - "id": 81, + "id": 262, "options": { "legend": { "calcs": [], @@ -5122,26 +4205,28 @@ "showLegend": true }, "tooltip": { - "mode": "multi", + "hideZeros": false, + "mode": "single", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, - "editorMode": "builder", - "expr": "reth_blockchain_tree_sidechains{instance=~\"$instance\"}", + "editorMode": "code", + "expr": "reth_sparse_state_trie_multiproof_total_account_nodes{job=\"$job\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", "hide": false, - "legendFormat": "Total number of sidechains", + "instant": false, + "legendFormat": "Account {{quantile}} percentile", "range": true, - "refId": "B" + "refId": "A" } ], - "title": "Sidechains", + "title": "Total multiproof account nodes", "type": "timeseries" }, { @@ -5192,7 +4277,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -5200,7 +4286,7 @@ } ] }, - "unit": "s" + "unit": "none" }, "overrides": [] }, @@ -5208,37 +4294,38 @@ "h": 8, "w": 12, "x": 12, - "y": 187 + "y": 112 }, - "id": 114, + "id": 261, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", - "showLegend": false + "showLegend": true }, "tooltip": { + "hideZeros": false, "mode": "single", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "rate(reth_consensus_engine_beacon_make_canonical_committed_latency_sum{instance=~\"$instance\"}[$__rate_interval]) / rate(reth_consensus_engine_beacon_make_canonical_committed_latency_count{instance=~\"$instance\"}[$__rate_interval])", + "expr": "reth_sparse_state_trie_multiproof_total_storage_nodes{job=\"$job\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", "instant": false, - "legendFormat": "__auto", + "legendFormat": "Storage {{quantile}} percentile", "range": true, - "refId": "A" + "refId": "Branch Nodes" } ], - "title": "Canonical Commit Latency time", + "title": "Total multiproof storage nodes", "type": "timeseries" }, { @@ -5246,6 +4333,7 @@ "type": "prometheus", "uid": "${datasource}" }, + "description": "How much time is spent in the multiproof task", "fieldConfig": { "defaults": { "color": { @@ -5289,7 +4377,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -5297,7 +4386,7 @@ } ] }, - "unit": "none" + "unit": "s" }, "overrides": [] }, @@ -5305,37 +4394,39 @@ "h": 8, "w": 12, "x": 12, - "y": 195 + "y": 120 }, - "id": 190, + "id": 263, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", - "showLegend": false + "showLegend": true }, "tooltip": { + "hideZeros": false, "mode": "single", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "reth_blockchain_tree_latest_reorg_depth{instance=~\"$instance\"}", + "expr": "reth_tree_root_multiproof_task_total_duration_histogram{job=\"$job\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", + "hide": false, "instant": false, - "legendFormat": "__auto", + "legendFormat": "Task duration {{quantile}} percentile", "range": true, "refId": "A" } ], - "title": "Latest Reorg Depth", + "title": "Proof fetching total duration", "type": "timeseries" }, { @@ -5344,12 +4435,12 @@ "h": 1, "w": 24, "x": 0, - "y": 203 + "y": 128 }, - "id": 87, + "id": 38, "panels": [], "repeat": "instance", - "title": "Engine API", + "title": "Database", "type": "row" }, { @@ -5357,11 +4448,12 @@ "type": "prometheus", "uid": "${datasource}" }, - "description": "", + "description": "The average commit time for database transactions. Generally, this should not be a limiting factor in syncing.", "fieldConfig": { "defaults": { "color": { - "mode": "palette-classic" + "mode": "palette-classic", + "seriesBy": "last" }, "custom": { "axisBorderShow": false, @@ -5371,7 +4463,7 @@ "axisPlacement": "auto", "barAlignment": 0, "barWidthFactor": 0.6, - "drawStyle": "line", + "drawStyle": "points", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { @@ -5401,14 +4493,12 @@ "mode": "absolute", "steps": [ { - "color": "green" - }, - { - "color": "red", - "value": 80 + "color": "green", + "value": null } ] - } + }, + "unit": "s" }, "overrides": [] }, @@ -5416,36 +4506,40 @@ "h": 8, "w": 12, "x": 0, - "y": 204 + "y": 129 }, - "id": 83, + "id": 40, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", - "showLegend": true + "showLegend": false }, "tooltip": { + "hideZeros": false, "mode": "single", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, - "editorMode": "builder", - "expr": "reth_consensus_engine_beacon_active_block_downloads{instance=~\"$instance\"}", - "legendFormat": "Active block downloads", + "editorMode": "code", + "exemplar": false, + "expr": "avg(rate(reth_database_transaction_close_duration_seconds_sum{job=\"$job\", outcome=\"commit\"}[$__rate_interval]) / rate(reth_database_transaction_close_duration_seconds_count{job=\"$job\", outcome=\"commit\"}[$__rate_interval]) >= 0)", + "format": "time_series", + "instant": false, + "legendFormat": "Commit time", "range": true, "refId": "A" } ], - "title": "Active block downloads", + "title": "Average commit time", "type": "timeseries" }, { @@ -5453,11 +4547,102 @@ "type": "prometheus", "uid": "${datasource}" }, - "description": "Engine API messages received by the CL, either engine_newPayload or engine_forkchoiceUpdated", + "description": "", + "fieldConfig": { + "defaults": { + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "scaleDistribution": { + "type": "linear" + } + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 129 + }, + "id": 42, + "maxDataPoints": 25, + "options": { + "calculate": false, + "cellGap": 1, + "cellValues": { + "unit": "s" + }, + "color": { + "exponent": 0.2, + "fill": "dark-orange", + "min": 0, + "mode": "opacity", + "reverse": false, + "scale": "exponential", + "scheme": "Oranges", + "steps": 128 + }, + "exemplars": { + "color": "rgba(255,0,255,0.7)" + }, + "filterValues": { + "le": 1e-9 + }, + "legend": { + "show": true + }, + "rowsFrame": { + "layout": "auto", + "value": "Commit time" + }, + "tooltip": { + "mode": "single", + "showColorScale": false, + "yHistogram": false + }, + "yAxis": { + "axisLabel": "Quantile", + "axisPlacement": "left", + "reverse": false, + "unit": "percentunit" + } + }, + "pluginVersion": "11.5.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "avg(max_over_time(reth_database_transaction_close_duration_seconds{job=\"$job\", outcome=\"commit\"}[$__rate_interval])) by (quantile)", + "format": "time_series", + "instant": false, + "legendFormat": "{{quantile}}", + "range": true, + "refId": "A" + } + ], + "title": "Commit time heatmap", + "type": "heatmap" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "description": "The average time a database transaction was open.", "fieldConfig": { "defaults": { "color": { - "mode": "palette-classic" + "mode": "palette-classic", + "seriesBy": "last" }, "custom": { "axisBorderShow": false, @@ -5467,7 +4652,7 @@ "axisPlacement": "auto", "barAlignment": 0, "barWidthFactor": 0.6, - "drawStyle": "line", + "drawStyle": "points", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { @@ -5497,24 +4682,22 @@ "mode": "absolute", "steps": [ { - "color": "green" - }, - { - "color": "red", - "value": 80 + "color": "green", + "value": null } ] - } + }, + "unit": "s" }, "overrides": [] }, "gridPos": { "h": 8, "w": 12, - "x": 12, - "y": 204 + "x": 0, + "y": 137 }, - "id": 84, + "id": 117, "options": { "legend": { "calcs": [], @@ -5523,37 +4706,29 @@ "showLegend": true }, "tooltip": { + "hideZeros": false, "mode": "single", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, - "editorMode": "builder", - "expr": "reth_consensus_engine_beacon_forkchoice_updated_messages{instance=~\"$instance\"}", - "legendFormat": "Forkchoice updated messages", + "editorMode": "code", + "exemplar": false, + "expr": "sum(rate(reth_database_transaction_open_duration_seconds_sum{job=\"$job\", outcome!=\"\"}[$__rate_interval]) / rate(reth_database_transaction_open_duration_seconds_count{job=\"$job\", outcome!=\"\"}[$__rate_interval])) by (outcome, mode)", + "format": "time_series", + "instant": false, + "legendFormat": "{{mode}}, {{outcome}}", "range": true, "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "builder", - "expr": "reth_consensus_engine_beacon_new_payload_messages{instance=~\"$instance\"}", - "hide": false, - "legendFormat": "New payload messages", - "range": true, - "refId": "B" } ], - "title": "Engine API messages", + "title": "Average transaction open time", "type": "timeseries" }, { @@ -5561,7 +4736,7 @@ "type": "prometheus", "uid": "${datasource}" }, - "description": "Latency histogram for the engine_newPayload to Forkchoice Update", + "description": "The maximum time the database transaction was open.", "fieldConfig": { "defaults": { "color": { @@ -5575,7 +4750,7 @@ "axisPlacement": "auto", "barAlignment": 0, "barWidthFactor": 0.6, - "drawStyle": "line", + "drawStyle": "points", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { @@ -5590,7 +4765,7 @@ "scaleDistribution": { "type": "linear" }, - "showPoints": "never", + "showPoints": "auto", "spanNulls": false, "stacking": { "group": "A", @@ -5605,11 +4780,8 @@ "mode": "absolute", "steps": [ { - "color": "green" - }, - { - "color": "red", - "value": 80 + "color": "green", + "value": null } ] }, @@ -5620,10 +4792,10 @@ "gridPos": { "h": 8, "w": 12, - "x": 0, - "y": 212 + "x": 12, + "y": 137 }, - "id": 213, + "id": 116, "options": { "legend": { "calcs": [], @@ -5632,25 +4804,29 @@ "showLegend": true }, "tooltip": { + "hideZeros": false, "mode": "single", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_forkchoice_updated_time_diff{instance=~\"$instance\"}", - "legendFormat": "new_payload_forkchoice_updated", + "editorMode": "code", + "exemplar": false, + "expr": "max(max_over_time(reth_database_transaction_open_duration_seconds{job=\"$job\", outcome!=\"\", quantile=\"1\"}[$__interval])) by (outcome, mode)", + "format": "time_series", + "instant": false, + "legendFormat": "{{mode}}, {{outcome}}", "range": true, "refId": "A" } ], - "title": "Engine API newPayload Forkchoice Update Latency", + "title": "Max transaction open time", "type": "timeseries" }, { @@ -5658,7 +4834,7 @@ "type": "prometheus", "uid": "${datasource}" }, - "description": "Latency histogram for the engine_newPayload RPC API", + "description": "", "fieldConfig": { "defaults": { "color": { @@ -5668,7 +4844,7 @@ "axisBorderShow": false, "axisCenteredZero": false, "axisColorMode": "text", - "axisLabel": "", + "axisLabel": "txs", "axisPlacement": "auto", "barAlignment": 0, "barWidthFactor": 0.6, @@ -5682,7 +4858,10 @@ }, "insertNulls": false, "lineInterpolation": "linear", - "lineWidth": 1, + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 3, "pointSize": 5, "scaleDistribution": { "type": "linear" @@ -5702,260 +4881,81 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", - "value": 80 - } - ] - }, - "unit": "s" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 212 - }, - "id": 210, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.4.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v1{instance=~\"$instance\", quantile=\"0\"}", - "fullMetaSearch": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV1 min", - "range": true, - "refId": "A", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v1{instance=~\"$instance\", quantile=\"0.5\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV1 p50", - "range": true, - "refId": "B", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v1{instance=~\"$instance\", quantile=\"0.9\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV1 p90", - "range": true, - "refId": "C", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v1{instance=~\"$instance\", quantile=\"0.95\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV1 p95", - "range": true, - "refId": "D", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v1{instance=~\"$instance\", quantile=\"0.99\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV1 p99", - "range": true, - "refId": "E", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v2{instance=~\"$instance\", quantile=\"0\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV2 min", - "range": true, - "refId": "F", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v2{instance=~\"$instance\", quantile=\"0.5\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV2 p50", - "range": true, - "refId": "G", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v2{instance=~\"$instance\", quantile=\"0.9\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV2 p90", - "range": true, - "refId": "H", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v2{instance=~\"$instance\", quantile=\"0.95\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV2 p95", - "range": true, - "refId": "I", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v2{instance=~\"$instance\", quantile=\"0.99\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV2 p99", - "range": true, - "refId": "J", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v3{instance=~\"$instance\", quantile=\"0\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV3 min", - "range": true, - "refId": "K", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" + "value": 80 + } + ] }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v3{instance=~\"$instance\", quantile=\"0.5\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV3 p50", - "range": true, - "refId": "L", - "useBackend": false + "unit": "none" }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v3{instance=~\"$instance\", quantile=\"0.9\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV3 p90", - "range": true, - "refId": "M", - "useBackend": false + "overrides": [ + { + "matcher": { + "id": "byFrameRefID", + "options": "Diff(opened-closed)" + }, + "properties": [ + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "custom.lineStyle", + "value": { + "dash": [0, 10], + "fill": "dot" + } + }, + { + "id": "custom.axisLabel", + "value": "diff" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 145 + }, + "id": 119, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.1", + "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v3{instance=~\"$instance\", quantile=\"0.95\"}", + "editorMode": "code", + "exemplar": false, + "expr": "sum(reth_database_transaction_opened_total{job=\"$job\", mode=\"read-write\"})", + "format": "time_series", "fullMetaSearch": false, - "hide": false, "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV3 p95", + "instant": false, + "legendFormat": "Opened", "range": true, - "refId": "N", + "refId": "A", "useBackend": false }, { @@ -5963,79 +4963,156 @@ "type": "prometheus", "uid": "${datasource}" }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v3{instance=~\"$instance\", quantile=\"0.99\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV3 p99", + "editorMode": "code", + "exemplar": false, + "expr": "sum(reth_database_transaction_closed_total{job=\"$job\", mode=\"read-write\"})", + "format": "time_series", + "instant": false, + "legendFormat": "Closed {{mode}}", "range": true, - "refId": "O", - "useBackend": false + "refId": "B" }, { "datasource": { - "type": "prometheus", - "uid": "${datasource}" + "type": "__expr__", + "uid": "${DS_EXPRESSION}" }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v4{instance=~\"$instance\", quantile=\"0\"}", - "fullMetaSearch": false, + "expression": "${A} - ${B}", "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV4 min", - "range": true, - "refId": "P", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" + "refId": "Diff(opened-closed)", + "type": "math" + } + ], + "title": "Number of read-write transactions", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v4{instance=~\"$instance\", quantile=\"0.5\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV4 p50", - "range": true, - "refId": "Q", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "txs", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 3, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v4{instance=~\"$instance\", quantile=\"0.9\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV4 p90", - "range": true, - "refId": "R", - "useBackend": false + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byFrameRefID", + "options": "Diff(opened, closed)" + }, + "properties": [ + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "custom.lineStyle", + "value": { + "dash": [0, 10], + "fill": "dot" + } + }, + { + "id": "custom.axisLabel", + "value": "diff" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 145 + }, + "id": 250, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.1", + "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v4{instance=~\"$instance\", quantile=\"0.95\"}", + "exemplar": false, + "expr": "reth_database_transaction_opened_total{job=\"$job\", mode=\"read-only\"}", + "format": "time_series", "fullMetaSearch": false, - "hide": false, "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV4 p95", + "instant": false, + "legendFormat": "Opened", "range": true, - "refId": "S", + "refId": "A", "useBackend": false }, { @@ -6045,17 +5122,29 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v4{instance=~\"$instance\", quantile=\"0.99\"}", + "exemplar": false, + "expr": "sum(reth_database_transaction_closed_total{job=\"$job\", mode=\"read-only\"})", + "format": "time_series", "fullMetaSearch": false, - "hide": false, "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV4 p99", + "instant": false, + "legendFormat": "Closed {{mode}}", "range": true, - "refId": "T", + "refId": "B", "useBackend": false - } + }, + { + "datasource": { + "type": "__expr__", + "uid": "${DS_EXPRESSION}" + }, + "expression": "${A} - ${B}", + "hide": false, + "refId": "Diff(opened, closed)", + "type": "math" + } ], - "title": "Engine API newPayload Latency", + "title": "Number of read-only transactions", "type": "timeseries" }, { @@ -6063,58 +5152,21 @@ "type": "prometheus", "uid": "${datasource}" }, - "description": "Total pipeline runs triggered by the sync controller", + "description": "The size of tables in the database", "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - } + "unit": "bytes" }, "overrides": [] }, @@ -6122,44 +5174,53 @@ "h": 8, "w": 12, "x": 0, - "y": 220 + "y": 153 }, - "id": 85, + "id": 48, "options": { + "displayLabels": ["name"], "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true + "displayMode": "table", + "placement": "right", + "showLegend": true, + "values": ["value"] + }, + "pieType": "pie", + "reduceOptions": { + "calcs": ["lastNotNull"], + "fields": "", + "values": false }, "tooltip": { + "hideZeros": false, "mode": "single", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "reth_consensus_engine_beacon_pipeline_runs{instance=~\"$instance\"}", - "legendFormat": "Pipeline runs", + "expr": "reth_db_table_size{job=\"$job\"}", + "interval": "", + "legendFormat": "{{table}}", "range": true, "refId": "A" } ], - "title": "Pipeline runs", - "type": "timeseries" + "title": "Database tables", + "type": "piechart" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, - "description": "Latency histograms for the engine_getPayloadBodiesByHashV1 and engine_getPayloadBodiesByRangeV1 RPC APIs", + "description": "The maximum time the database transaction operation which inserts a large value took.", "fieldConfig": { "defaults": { "color": { @@ -6173,7 +5234,7 @@ "axisPlacement": "auto", "barAlignment": 0, "barWidthFactor": 0.6, - "drawStyle": "line", + "drawStyle": "points", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { @@ -6203,7 +5264,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -6219,9 +5281,9 @@ "h": 8, "w": 12, "x": 12, - "y": 220 + "y": 153 }, - "id": 212, + "id": 118, "options": { "legend": { "calcs": [], @@ -6230,182 +5292,103 @@ "showLegend": true }, "tooltip": { + "hideZeros": false, "mode": "single", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_get_payload_bodies_by_hash_v1{instance=~\"$instance\", quantile=\"0\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_getPayloadBodiesByHashV1 min", - "range": true, - "refId": "O", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_get_payload_bodies_by_hash_v1{instance=~\"$instance\", quantile=\"0.5\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_getPayloadBodiesByHashV1 p50", - "range": true, - "refId": "A", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_get_payload_bodies_by_hash_v1{instance=~\"$instance\", quantile=\"0.9\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_getPayloadBodiesByHashV1 p90", - "range": true, - "refId": "B", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_get_payload_bodies_by_hash_v1{instance=~\"$instance\", quantile=\"0.95\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_getPayloadBodiesByHashV1 p95", - "range": true, - "refId": "C", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_get_payload_bodies_by_hash_v1{instance=~\"$instance\", quantile=\"0.99\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_getPayloadBodiesByHashV1 p99", + "editorMode": "code", + "exemplar": false, + "expr": "max(max_over_time(reth_database_operation_large_value_duration_seconds{job=\"$job\", quantile=\"1\"}[$__interval]) > 0) by (table)", + "format": "time_series", + "instant": false, + "legendFormat": "{{table}}", "range": true, - "refId": "D", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" + "refId": "A" + } + ], + "title": "Max insertion operation time", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "description": "The type of the pages in the database:\n\n- **Leaf** pages contain KV pairs.\n- **Branch** pages contain information about keys in the leaf pages\n- **Overflow** pages store large values and should generally be avoided if possible", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_get_payload_bodies_by_range_v1{instance=~\"$instance\", quantile=\"0\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_getPayloadBodiesByRangeV1 min", - "range": true, - "refId": "E", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + } }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_get_payload_bodies_by_range_v1{instance=~\"$instance\", quantile=\"0.5\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_getPayloadBodiesByRangeV1 p50", - "range": true, - "refId": "F", - "useBackend": false + "mappings": [], + "unit": "short" }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_get_payload_bodies_by_range_v1{instance=~\"$instance\", quantile=\"0.9\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_getPayloadBodiesByRangeV1 p90", - "range": true, - "refId": "G", - "useBackend": false + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 161 + }, + "id": 50, + "options": { + "legend": { + "displayMode": "table", + "placement": "right", + "showLegend": true, + "values": ["value"] }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_get_payload_bodies_by_range_v1{instance=~\"$instance\", quantile=\"0.95\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_getPayloadBodiesByRangeV1 p95", - "range": true, - "refId": "H", - "useBackend": false + "pieType": "pie", + "reduceOptions": { + "calcs": ["lastNotNull"], + "fields": "", + "values": false }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.1", + "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, - "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_get_payload_bodies_by_range_v1{instance=~\"$instance\", quantile=\"0.99\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_getPayloadBodiesByRangeV1 p99", + "expr": "sum by (type) ( reth_db_table_pages{job=\"$job\"} )", + "legendFormat": "__auto", "range": true, - "refId": "I", - "useBackend": false + "refId": "A" } ], - "title": "Engine API getPayloadBodies Latency", - "type": "timeseries" + "title": "Database pages", + "type": "piechart" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, - "description": "Latency histogram for the engine_forkchoiceUpdated RPC API", + "description": "The size of the database over time", "fieldConfig": { "defaults": { "color": { @@ -6444,12 +5427,14 @@ "mode": "off" } }, + "decimals": 4, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -6457,272 +5442,45 @@ } ] }, - "unit": "s" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 228 - }, - "id": 211, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.4.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v1{instance=~\"$instance\", quantile=\"0\"}", - "fullMetaSearch": false, - "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV1 min", - "range": true, - "refId": "A", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v1{instance=~\"$instance\", quantile=\"0.5\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV1 p50", - "range": true, - "refId": "B", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v1{instance=~\"$instance\", quantile=\"0.9\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV1 p90", - "range": true, - "refId": "C", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v1{instance=~\"$instance\", quantile=\"0.95\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV1 p95", - "range": true, - "refId": "D", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v1{instance=~\"$instance\", quantile=\"0.99\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV1 p99", - "range": true, - "refId": "E", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v2{instance=~\"$instance\", quantile=\"0\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV2 min", - "range": true, - "refId": "F", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v2{instance=~\"$instance\", quantile=\"0.5\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV2 p50", - "range": true, - "refId": "G", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v2{instance=~\"$instance\", quantile=\"0.9\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV2 p90", - "range": true, - "refId": "H", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v2{instance=~\"$instance\", quantile=\"0.95\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV2 p95", - "range": true, - "refId": "I", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v2{instance=~\"$instance\", quantile=\"0.99\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV2 p99", - "range": true, - "refId": "J", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v3{instance=~\"$instance\", quantile=\"0\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV3 min", - "range": true, - "refId": "K", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v3{instance=~\"$instance\", quantile=\"0.5\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV3 p50", - "range": true, - "refId": "L", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v3{instance=~\"$instance\", quantile=\"0.9\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV3 p90", - "range": true, - "refId": "M", - "useBackend": false + "unit": "bytes" }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v3{instance=~\"$instance\", quantile=\"0.95\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV3 p95", - "range": true, - "refId": "N", - "useBackend": false + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 161 + }, + "id": 52, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.1", + "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v3{instance=~\"$instance\", quantile=\"0.99\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV3 p99", + "editorMode": "code", + "expr": "sum by (job) ( reth_db_table_size{job=\"$job\"} )", + "legendFormat": "Size ({{job}})", "range": true, - "refId": "O", - "useBackend": false + "refId": "A" } ], - "title": "Engine API forkchoiceUpdated Latency", + "title": "Database growth", "type": "timeseries" }, { @@ -6730,6 +5488,7 @@ "type": "prometheus", "uid": "${datasource}" }, + "description": "The number of pages on the MDBX freelist", "fieldConfig": { "defaults": { "color": { @@ -6773,7 +5532,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -6781,17 +5541,17 @@ } ] }, - "unit": "s" + "unit": "none" }, "overrides": [] }, "gridPos": { "h": 8, "w": 12, - "x": 12, - "y": 228 + "x": 0, + "y": 169 }, - "id": 258, + "id": 113, "options": { "legend": { "calcs": [], @@ -6800,312 +5560,416 @@ "showLegend": true }, "tooltip": { + "hideZeros": false, "mode": "single", "sort": "none" } }, + "pluginVersion": "11.5.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_get_blobs_v1{instance=~\"$instance\", quantile=\"0.5\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_getBlobsV1 p50", + "editorMode": "code", + "expr": "sum(reth_db_freelist{job=\"$job\"}) by (job)", + "legendFormat": "Pages ({{job}})", "range": true, - "refId": "A", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" + "refId": "A" + } + ], + "title": "Freelist", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_get_blobs_v1{instance=~\"$instance\", quantile=\"0.95\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_getBlobsV1 p95", - "range": true, - "refId": "B", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" + "custom": { + "align": "left", + "cellOptions": { + "type": "auto" + }, + "inspect": false }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_get_blobs_v1{instance=~\"$instance\", quantile=\"0.99\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_getBlobsV1 p99", - "range": true, - "refId": "C", - "useBackend": false + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Time" + }, + "properties": [ + { + "id": "custom.hidden", + "value": true + } + ] }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_get_blobs_v1{instance=~\"$instance\", quantile=\"0\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_getBlobsV1 min", - "range": true, - "refId": "D", - "useBackend": false + { + "matcher": { + "id": "byName", + "options": "__name__" + }, + "properties": [ + { + "id": "custom.hidden", + "value": true + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "instance" + }, + "properties": [ + { + "id": "custom.hidden", + "value": true + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "job" + }, + "properties": [ + { + "id": "custom.hidden", + "value": true + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "type" + }, + "properties": [ + { + "id": "custom.hidden", + "value": true + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value" + }, + "properties": [ + { + "id": "unit", + "value": "locale" + }, + { + "id": "displayName", + "value": "Overflow pages" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "table" + }, + "properties": [ + { + "id": "displayName", + "value": "Table" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 169 + }, + "id": 58, + "options": { + "cellHeight": "sm", + "footer": { + "countRows": false, + "fields": "", + "reducer": ["sum"], + "show": false }, + "showHeader": true + }, + "pluginVersion": "11.5.1", + "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_get_blobs_v1{instance=~\"$instance\", quantile=\"1\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_getBlobsV1 max", - "range": true, - "refId": "E", - "useBackend": false + "editorMode": "code", + "exemplar": false, + "expr": "sort_desc(reth_db_table_pages{job=\"$job\", type=\"overflow\"} != 0)", + "format": "table", + "instant": true, + "legendFormat": "__auto", + "range": false, + "refId": "A" } ], - "title": "Engine API getBlobs Latency", - "type": "timeseries" + "title": "Overflow pages by table", + "type": "table" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 177 + }, + "id": 203, + "panels": [], + "title": "Static Files", + "type": "row" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, + "description": "The size of segments in the static files", "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 10, - "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false - }, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "never", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unit": "none" + "unit": "bytes" }, "overrides": [] }, "gridPos": { "h": 8, - "w": 12, + "w": 8, "x": 0, - "y": 236 + "y": 178 }, - "id": 1000, + "id": 202, "options": { + "displayLabels": ["name"], "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true + "displayMode": "table", + "placement": "right", + "showLegend": true, + "values": ["value"] + }, + "pieType": "pie", + "reduceOptions": { + "calcs": ["lastNotNull"], + "fields": "", + "values": false }, "tooltip": { + "hideZeros": false, "mode": "single", "sort": "none" } }, + "pluginVersion": "11.5.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, - "editorMode": "builder", - "expr": "reth_engine_api_blob_metrics_blob_count{instance=~\"$instance\"}", - "legendFormat": "Blobs Found", + "editorMode": "code", + "expr": "reth_static_files_segment_size{job=\"$job\"}", + "interval": "", + "legendFormat": "{{segment}}", "range": true, "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "builder", - "expr": "reth_engine_api_blob_metrics_blob_misses{instance=~\"$instance\"}", - "hide": false, - "legendFormat": "Blobs Missed", - "range": true, - "refId": "B" } ], - "title": "Blob Count and Misses", - "type": "timeseries" + "title": "Segments size", + "type": "piechart" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, - "description": "Counts the number of failed response deliveries due to client request termination.", "fieldConfig": { "defaults": { "color": { - "mode": "palette-classic" + "mode": "thresholds" }, "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "never", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" + "align": "left", + "cellOptions": { + "type": "auto" }, - "thresholdsStyle": { - "mode": "off" - } + "inspect": false }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", "value": 80 } ] - }, - "unit": "none" + } }, - "overrides": [] + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Value" + }, + "properties": [ + { + "id": "unit", + "value": "locale" + }, + { + "id": "displayName", + "value": "Entries" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "segment" + }, + "properties": [ + { + "id": "displayName", + "value": "Segment" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Time" + }, + "properties": [ + { + "id": "custom.hidden", + "value": true + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "instance" + }, + "properties": [ + { + "id": "custom.hidden", + "value": true + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "job" + }, + "properties": [ + { + "id": "custom.hidden", + "value": true + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "__name__" + }, + "properties": [ + { + "id": "custom.hidden", + "value": true + } + ] + } + ] }, "gridPos": { "h": 8, - "w": 24, - "x": 0, - "y": 236 + "w": 8, + "x": 8, + "y": 178 }, - "id": 249, + "id": 204, "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } + "cellHeight": "sm", + "footer": { + "countRows": false, + "fields": "", + "reducer": ["sum"], + "show": false + }, + "showHeader": true }, - "pluginVersion": "11.4.0", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, - "expr": "consensus_engine_beacon_failed_new_payload_response_deliveries{instance=~\"$instance\"}", - "legendFormat": "Failed NewPayload Deliveries", + "editorMode": "code", + "exemplar": false, + "expr": "reth_static_files_segment_entries{job=\"$job\"}", + "format": "table", + "instant": true, + "legendFormat": "__auto", + "range": false, "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "expr": "consensus_engine_beacon_failed_forkchoice_updated_response_deliveries{instance=~\"$instance\"}", - "legendFormat": "Failed ForkchoiceUpdated Deliveries", - "refId": "B" } ], - "title": "Failed Engine API Response Deliveries", - "type": "timeseries" - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 244 - }, - "id": 214, - "panels": [], - "title": "State Root Task", - "type": "row" + "title": "Entries per segment", + "type": "table" }, { "datasource": { @@ -7115,47 +5979,22 @@ "fieldConfig": { "defaults": { "color": { - "mode": "palette-classic" + "mode": "thresholds" }, "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" + "align": "left", + "cellOptions": { + "type": "auto" }, - "thresholdsStyle": { - "mode": "off" - } + "inspect": false }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -7164,63 +6003,128 @@ ] } }, - "overrides": [] + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Value" + }, + "properties": [ + { + "id": "unit", + "value": "locale" + }, + { + "id": "displayName", + "value": "Files" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "segment" + }, + "properties": [ + { + "id": "displayName", + "value": "Segment" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Time" + }, + "properties": [ + { + "id": "custom.hidden", + "value": true + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "instance" + }, + "properties": [ + { + "id": "custom.hidden", + "value": true + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "job" + }, + "properties": [ + { + "id": "custom.hidden", + "value": true + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "__name__" + }, + "properties": [ + { + "id": "custom.hidden", + "value": true + } + ] + } + ] }, "gridPos": { "h": 8, - "w": 12, - "x": 0, - "y": 245 + "w": 8, + "x": 16, + "y": 178 }, - "id": 216, + "id": 205, "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true + "cellHeight": "sm", + "footer": { + "countRows": false, + "fields": "", + "reducer": ["sum"], + "show": false }, - "tooltip": { - "mode": "single", - "sort": "none" - } + "showHeader": true }, - "pluginVersion": "11.4.0", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "expr": "reth_tree_root_proof_calculation_storage_targets_histogram{instance=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", - "instant": false, - "legendFormat": "{{type}} storage proof targets p{{quantile}}", - "range": true, - "refId": "Branch Nodes" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "reth_tree_root_proof_calculation_account_targets_histogram{instance=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", - "hide": false, - "instant": false, - "legendFormat": "{{type}} account proof targets p{{quantile}}", - "range": true, - "refId": "Leaf Nodes" + "exemplar": false, + "expr": "reth_static_files_segment_files{job=\"$job\"}", + "format": "table", + "instant": true, + "legendFormat": "__auto", + "range": false, + "refId": "A" } ], - "title": "Proof Targets", - "type": "timeseries" + "title": "Files per segment", + "type": "table" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, + "description": "The size of the static files over time", "fieldConfig": { "defaults": { "color": { @@ -7264,24 +6168,26 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", "value": 80 } ] - } + }, + "unit": "bytes" }, "overrides": [] }, "gridPos": { "h": 8, "w": 12, - "x": 12, - "y": 245 + "x": 0, + "y": 186 }, - "id": 255, + "id": 206, "options": { "legend": { "calcs": [], @@ -7290,26 +6196,26 @@ "showLegend": true }, "tooltip": { + "hideZeros": false, "mode": "single", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "reth_tree_root_proofs_processed_histogram{instance=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", - "instant": false, - "legendFormat": "{{quantile}} percentile", + "expr": "sum by (job) ( reth_static_files_segment_size{job=\"$job\"} )", + "legendFormat": "__auto", "range": true, - "refId": "Branch Nodes" + "refId": "A" } ], - "title": "Proofs Processed", + "title": "Static Files growth", "type": "timeseries" }, { @@ -7317,6 +6223,7 @@ "type": "prometheus", "uid": "${datasource}" }, + "description": "The maximum time the static files operation which commits a writer took.", "fieldConfig": { "defaults": { "color": { @@ -7330,7 +6237,7 @@ "axisPlacement": "auto", "barAlignment": 0, "barWidthFactor": 0.6, - "drawStyle": "line", + "drawStyle": "points", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { @@ -7360,7 +6267,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -7375,10 +6283,10 @@ "gridPos": { "h": 8, "w": 12, - "x": 0, - "y": 253 + "x": 12, + "y": 186 }, - "id": 254, + "id": 207, "options": { "legend": { "calcs": [], @@ -7387,33 +6295,48 @@ "showLegend": true }, "tooltip": { + "hideZeros": false, "mode": "single", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "reth_tree_root_proof_calculation_duration_histogram{instance=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", - "instant": false, - "legendFormat": "{{quantile}} percentile", + "expr": "max(max_over_time(reth_static_files_jar_provider_write_duration_seconds{job=\"$job\", operation=\"commit-writer\", quantile=\"1\"}[$__interval]) > 0) by (segment)", + "legendFormat": "{{segment}}", "range": true, - "refId": "Branch Nodes" + "refId": "A" } ], - "title": "Proof calculation duration", + "title": "Max writer commit time", "type": "timeseries" }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 194 + }, + "id": 79, + "panels": [], + "repeat": "instance", + "title": "Blockchain Tree", + "type": "row" + }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, + "description": "The block number of the tip of the canonical chain from the blockchain tree.", "fieldConfig": { "defaults": { "color": { @@ -7457,25 +6380,25 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", "value": 80 } ] - }, - "unit": "none" + } }, "overrides": [] }, "gridPos": { "h": 8, "w": 12, - "x": 12, - "y": 253 + "x": 0, + "y": 195 }, - "id": 256, + "id": 74, "options": { "legend": { "calcs": [], @@ -7484,26 +6407,27 @@ "showLegend": true }, "tooltip": { - "mode": "single", + "hideZeros": false, + "mode": "multi", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, - "editorMode": "code", - "expr": "reth_tree_root_inflight_multiproofs_histogram{instance=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", - "instant": false, - "legendFormat": "{{quantile}} percentile", + "editorMode": "builder", + "expr": "reth_blockchain_tree_canonical_chain_height{job=\"$job\"}", + "hide": false, + "legendFormat": "Canonical chain height", "range": true, - "refId": "Branch Nodes" + "refId": "B" } ], - "title": "In-flight MultiProof requests", + "title": "Canonical chain height", "type": "timeseries" }, { @@ -7511,6 +6435,7 @@ "type": "prometheus", "uid": "${datasource}" }, + "description": "Total number of blocks in the tree's block buffer", "fieldConfig": { "defaults": { "color": { @@ -7554,25 +6479,25 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", "value": 80 } ] - }, - "unit": "none" + } }, "overrides": [] }, "gridPos": { "h": 8, "w": 12, - "x": 0, - "y": 261 + "x": 12, + "y": 195 }, - "id": 257, + "id": 80, "options": { "legend": { "calcs": [], @@ -7581,26 +6506,27 @@ "showLegend": true }, "tooltip": { - "mode": "single", + "hideZeros": false, + "mode": "multi", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, - "editorMode": "code", - "expr": "reth_tree_root_pending_multiproofs_histogram{instance=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", - "instant": false, - "legendFormat": "{{quantile}} percentile", + "editorMode": "builder", + "expr": "reth_blockchain_tree_block_buffer_blocks{job=\"$job\"}", + "hide": false, + "legendFormat": "Buffered blocks", "range": true, - "refId": "Branch Nodes" + "refId": "B" } ], - "title": "Pending MultiProof requests", + "title": "Block buffer blocks", "type": "timeseries" }, { @@ -7651,7 +6577,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -7666,38 +6593,39 @@ "gridPos": { "h": 8, "w": 12, - "x": 12, - "y": 261 + "x": 0, + "y": 203 }, - "id": 259, + "id": 1002, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", - "showLegend": true + "showLegend": false }, "tooltip": { + "hideZeros": false, "mode": "single", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "reth_sparse_state_trie_multiproof_skipped_account_nodes{instance=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", + "expr": "increase(reth_blockchain_tree_reorgs{job=\"$job\"}[$__rate_interval])", "instant": false, - "legendFormat": "Account {{quantile}} percentile", + "legendFormat": "__auto", "range": true, - "refId": "Branch Nodes" + "refId": "A" } ], - "title": "Redundant multiproof account nodes", + "title": "Reorgs", "type": "timeseries" }, { @@ -7748,7 +6676,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -7763,45 +6692,60 @@ "gridPos": { "h": 8, "w": 12, - "x": 0, - "y": 269 + "x": 12, + "y": 203 }, - "id": 260, + "id": 190, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", - "showLegend": true + "showLegend": false }, "tooltip": { + "hideZeros": false, "mode": "single", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "reth_sparse_state_trie_multiproof_skipped_storage_nodes{instance=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", + "expr": "reth_blockchain_tree_latest_reorg_depth{job=\"$job\"}", "instant": false, - "legendFormat": "Storage {{quantile}} percentile", + "legendFormat": "__auto", "range": true, - "refId": "Branch Nodes" + "refId": "A" } ], - "title": "Redundant multiproof storage nodes", + "title": "Latest Reorg Depth", "type": "timeseries" }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 211 + }, + "id": 108, + "panels": [], + "title": "RPC server", + "type": "row" + }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, + "description": "", "fieldConfig": { "defaults": { "color": { @@ -7816,7 +6760,7 @@ "barAlignment": 0, "barWidthFactor": 0.6, "drawStyle": "line", - "fillOpacity": 0, + "fillOpacity": 10, "gradientMode": "none", "hideFrom": { "legend": false, @@ -7830,7 +6774,7 @@ "scaleDistribution": { "type": "linear" }, - "showPoints": "auto", + "showPoints": "never", "spanNulls": false, "stacking": { "group": "A", @@ -7845,7 +6789,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -7853,17 +6798,42 @@ } ] }, - "unit": "none" + "unit": "short" }, - "overrides": [] + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "http" + }, + "properties": [ + { + "id": "displayName", + "value": "HTTP" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "ws" + }, + "properties": [ + { + "id": "displayName", + "value": "WebSocket" + } + ] + } + ] }, "gridPos": { "h": 8, "w": 12, - "x": 12, - "y": 269 + "x": 0, + "y": 212 }, - "id": 261, + "id": 109, "options": { "legend": { "calcs": [], @@ -7872,26 +6842,31 @@ "showLegend": true }, "tooltip": { - "mode": "single", + "hideZeros": false, + "mode": "multi", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, + "disableTextWrap": false, "editorMode": "code", - "expr": "reth_sparse_state_trie_multiproof_total_storage_nodes{instance=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", - "instant": false, - "legendFormat": "Storage {{quantile}} percentile", + "expr": "sum(reth_rpc_server_connections_connections_opened_total{job=\"$job\"} - reth_rpc_server_connections_connections_closed_total{job=\"$job\"}) by (transport)", + "format": "time_series", + "fullMetaSearch": false, + "includeNullMetadata": true, + "legendFormat": "{{transport}}", "range": true, - "refId": "Branch Nodes" + "refId": "A", + "useBackend": false } ], - "title": "Total multiproof storage nodes", + "title": "Active Connections", "type": "timeseries" }, { @@ -7899,105 +6874,96 @@ "type": "prometheus", "uid": "${datasource}" }, + "description": "", "fieldConfig": { "defaults": { - "color": { - "mode": "palette-classic" - }, "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, "scaleDistribution": { "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "none" + } }, "overrides": [] }, "gridPos": { "h": 8, "w": 12, - "x": 0, - "y": 277 + "x": 12, + "y": 212 }, - "id": 262, + "id": 111, + "maxDataPoints": 25, "options": { + "calculate": false, + "cellGap": 1, + "cellValues": { + "unit": "s" + }, + "color": { + "exponent": 0.2, + "fill": "dark-orange", + "min": 0, + "mode": "opacity", + "reverse": false, + "scale": "exponential", + "scheme": "Oranges", + "steps": 128 + }, + "exemplars": { + "color": "rgba(255,0,255,0.7)" + }, + "filterValues": { + "le": 1e-9 + }, "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true + "show": true + }, + "rowsFrame": { + "layout": "auto", + "value": "Latency time" }, "tooltip": { "mode": "single", - "sort": "none" + "showColorScale": false, + "yHistogram": false + }, + "yAxis": { + "axisLabel": "Quantile", + "axisPlacement": "left", + "reverse": false, + "unit": "percentunit" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "reth_sparse_state_trie_multiproof_total_account_nodes{instance=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", - "hide": false, + "exemplar": false, + "expr": "avg(max_over_time(reth_rpc_server_connections_request_time_seconds{job=\"$job\"}[$__rate_interval]) > 0) by (quantile)", + "format": "time_series", "instant": false, - "legendFormat": "Account {{quantile}} percentile", + "legendFormat": "__auto", "range": true, "refId": "A" } ], - "title": "Total multiproof account nodes", - "type": "timeseries" + "title": "Request Latency time", + "type": "heatmap" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, - "description": "How much time is spent in the multiproof task", "fieldConfig": { "defaults": { "color": { @@ -8011,7 +6977,7 @@ "axisPlacement": "auto", "barAlignment": 0, "barWidthFactor": 0.6, - "drawStyle": "line", + "drawStyle": "points", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { @@ -8041,7 +7007,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -8056,10 +7023,10 @@ "gridPos": { "h": 8, "w": 12, - "x": 12, - "y": 277 + "x": 0, + "y": 220 }, - "id": 263, + "id": 120, "options": { "legend": { "calcs": [], @@ -8068,49 +7035,124 @@ "showLegend": true }, "tooltip": { + "hideZeros": false, "mode": "single", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "reth_tree_root_multiproof_task_total_duration_histogram{instance=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", - "hide": false, + "expr": "max(max_over_time(reth_rpc_server_calls_time_seconds{job=\"$job\"}[$__rate_interval])) by (method) > 0", "instant": false, - "legendFormat": "Task duration {{quantile}} percentile", + "legendFormat": "__auto", "range": true, "refId": "A" } ], - "title": "Proof fetching total duration", + "title": "Maximum call latency per method", "type": "timeseries" }, { - "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "scaleDistribution": { + "type": "linear" + } + } + }, + "overrides": [] + }, "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 285 + "h": 8, + "w": 12, + "x": 12, + "y": 220 }, - "id": 68, - "panels": [], - "repeat": "instance", - "title": "Payload Builder", - "type": "row" + "id": 112, + "maxDataPoints": 25, + "options": { + "calculate": false, + "cellGap": 1, + "cellValues": { + "unit": "s" + }, + "color": { + "exponent": 0.2, + "fill": "dark-orange", + "min": 0, + "mode": "opacity", + "reverse": false, + "scale": "exponential", + "scheme": "Oranges", + "steps": 128 + }, + "exemplars": { + "color": "rgba(255,0,255,0.7)" + }, + "filterValues": { + "le": 1e-9 + }, + "legend": { + "show": true + }, + "rowsFrame": { + "layout": "auto", + "value": "Latency time" + }, + "tooltip": { + "mode": "single", + "showColorScale": false, + "yHistogram": false + }, + "yAxis": { + "axisLabel": "Quantile", + "axisPlacement": "left", + "reverse": false, + "unit": "percentunit" + } + }, + "pluginVersion": "11.5.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "avg(max_over_time(reth_rpc_server_calls_time_seconds{job=\"$job\"}[$__rate_interval]) > 0) by (quantile)", + "format": "time_series", + "instant": false, + "legendFormat": "{{quantile}}", + "range": true, + "refId": "A" + } + ], + "title": "Call Latency time", + "type": "heatmap" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, - "description": "Number of active jobs", "fieldConfig": { "defaults": { "color": { @@ -8134,7 +7176,7 @@ }, "insertNulls": false, "lineInterpolation": "linear", - "lineWidth": 3, + "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" @@ -8154,7 +7196,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -8163,15 +7206,52 @@ ] } }, - "overrides": [] + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": "/.*cached items.*/" + }, + "properties": [ + { + "id": "custom.axisLabel", + "value": "Items" + } + ] + }, + { + "matcher": { + "id": "byRegexp", + "options": "/.*consumers.*/" + }, + "properties": [ + { + "id": "custom.axisLabel", + "value": "Queued consumers" + } + ] + }, + { + "matcher": { + "id": "byRegexp", + "options": "/.memory usage*/" + }, + "properties": [ + { + "id": "unit", + "value": "decbytes" + } + ] + } + ] }, "gridPos": { "h": 8, "w": 12, "x": 0, - "y": 286 + "y": 228 }, - "id": 60, + "id": 198, "options": { "legend": { "calcs": [], @@ -8179,34 +7259,158 @@ "placement": "bottom", "showLegend": true }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.4.0", - "targets": [ + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_rpc_eth_cache_cached_count{job=\"$job\", cache=\"headers\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Headers cache cached items", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_rpc_eth_cache_queued_consumers_count{job=\"$job\", cache=\"receipts\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Receipts cache queued consumers", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_rpc_eth_cache_queued_consumers_count{job=\"$job\", cache=\"headers\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Headers cache queued consumers", + "range": true, + "refId": "C", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_rpc_eth_cache_queued_consumers_count{job=\"$job\", cache=\"blocks\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Block cache queued consumers", + "range": true, + "refId": "D", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_rpc_eth_cache_memory_usage{job=\"$job\", cache=\"blocks\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Blocks cache memory usage", + "range": true, + "refId": "E", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_rpc_eth_cache_cached_count{job=\"$job\", cache=\"receipts\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Receipts cache cached items", + "range": true, + "refId": "F", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_rpc_eth_cache_memory_usage{job=\"$job\", cache=\"receipts\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Receipts cache memory usage", + "range": true, + "refId": "G", + "useBackend": false + }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, + "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_payloads_active_jobs{instance=~\"$instance\"}", - "legendFormat": "Active Jobs", + "expr": "reth_rpc_eth_cache_cached_count{job=\"$job\", cache=\"blocks\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Block cache cached items", "range": true, - "refId": "A" + "refId": "H", + "useBackend": false } ], - "title": "Active Jobs", + "title": "RPC Cache Metrics", "type": "timeseries" }, { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, - "description": "Total number of initiated jobs", "fieldConfig": { "defaults": { "color": { @@ -8229,8 +7433,8 @@ "viz": false }, "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 3, + "lineInterpolation": "smooth", + "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" @@ -8250,14 +7454,16 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", "value": 80 } ] - } + }, + "unit": "reqps" }, "overrides": [] }, @@ -8265,9 +7471,9 @@ "h": 8, "w": 12, "x": 12, - "y": 286 + "y": 228 }, - "id": 62, + "id": 246, "options": { "legend": { "calcs": [], @@ -8276,33 +7482,48 @@ "showLegend": true }, "tooltip": { + "hideZeros": false, "mode": "single", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, - "editorMode": "builder", - "expr": "reth_payloads_initiated_jobs{instance=~\"$instance\"}", - "legendFormat": "Initiated Jobs", + "editorMode": "code", + "expr": "sum(rate(reth_rpc_server_calls_successful_total{instance =~ \"$instance\"}[$__rate_interval])) by (method) > 0", + "instant": false, + "legendFormat": "{{method}}", "range": true, "refId": "A" } ], - "title": "Initiated Jobs", + "title": "RPC Throughput", "type": "timeseries" }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 236 + }, + "id": 24, + "panels": [], + "repeat": "instance", + "title": "Downloader: Headers", + "type": "row" + }, { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, - "description": "Total number of failed jobs", "fieldConfig": { "defaults": { "color": { @@ -8326,7 +7547,7 @@ }, "insertNulls": false, "lineInterpolation": "linear", - "lineWidth": 3, + "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" @@ -8346,7 +7567,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -8355,15 +7577,40 @@ ] } }, - "overrides": [] + "overrides": [ + { + "matcher": { + "id": "byFrameRefID", + "options": "C" + }, + "properties": [ + { + "id": "custom.axisPlacement", + "value": "right" + } + ] + }, + { + "matcher": { + "id": "byFrameRefID", + "options": "D" + }, + "properties": [ + { + "id": "custom.axisPlacement", + "value": "right" + } + ] + } + ] }, "gridPos": { "h": 8, "w": 12, "x": 0, - "y": 294 + "y": 237 }, - "id": 64, + "id": 26, "options": { "legend": { "calcs": [], @@ -8372,11 +7619,12 @@ "showLegend": true }, "tooltip": { - "mode": "single", + "hideZeros": false, + "mode": "multi", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { @@ -8384,33 +7632,58 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "reth_payloads_failed_jobs{instance=~\"$instance\"}", - "legendFormat": "Failed Jobs", + "expr": "reth_downloaders_headers_total_downloaded{job=\"$job\"}", + "legendFormat": "Downloaded", "range": true, "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "reth_downloaders_headers_total_flushed{job=\"$job\"}", + "hide": false, + "legendFormat": "Flushed", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "builder", + "expr": "rate(reth_downloaders_headers_total_downloaded{job=\"$job\"}[$__rate_interval])", + "hide": false, + "instant": false, + "legendFormat": "Downloaded/s", + "range": true, + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "rate(reth_downloaders_headers_total_flushed{job=\"$job\"}[$__rate_interval])", + "hide": false, + "legendFormat": "Flushed/s", + "range": true, + "refId": "D" } ], - "title": "Failed Jobs", + "title": "I/O", "type": "timeseries" }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 302 - }, - "id": 97, - "panels": [], - "title": "Process", - "type": "row" - }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, + "description": "Internal errors in the header downloader. These are expected to happen from time to time.", "fieldConfig": { "defaults": { "color": { @@ -8454,7 +7727,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -8462,30 +7736,17 @@ } ] }, - "unit": "decbytes" + "unit": "cps" }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "Retained" - }, - "properties": [ - { - "id": "custom.axisPlacement", - "value": "right" - } - ] - } - ] + "overrides": [] }, "gridPos": { "h": 8, "w": 12, - "x": 0, - "y": 303 + "x": 12, + "y": 237 }, - "id": 98, + "id": 33, "options": { "legend": { "calcs": [], @@ -8494,21 +7755,21 @@ "showLegend": true }, "tooltip": { + "hideZeros": false, "mode": "single", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "reth_jemalloc_active{instance=~\"$instance\"}", - "instant": false, - "legendFormat": "Active", + "expr": "rate(reth_downloaders_headers_timeout_errors{job=\"$job\"}[$__rate_interval])", + "legendFormat": "Request timed out", "range": true, "refId": "A" }, @@ -8518,67 +7779,26 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "reth_jemalloc_allocated{instance=~\"$instance\"}", + "expr": "rate(reth_downloaders_headers_unexpected_errors{job=\"$job\"}[$__rate_interval])", "hide": false, - "instant": false, - "legendFormat": "Allocated", + "legendFormat": "Unexpected error", "range": true, "refId": "B" }, { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "reth_jemalloc_mapped{instance=~\"$instance\"}", + "expr": "rate(reth_downloaders_headers_validation_errors{job=\"$job\"}[$__rate_interval])", "hide": false, - "instant": false, - "legendFormat": "Mapped", + "legendFormat": "Invalid response", "range": true, "refId": "C" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "builder", - "expr": "reth_jemalloc_metadata{instance=~\"$instance\"}", - "hide": false, - "instant": false, - "legendFormat": "Metadata", - "range": true, - "refId": "D" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "builder", - "expr": "reth_jemalloc_resident{instance=~\"$instance\"}", - "hide": false, - "instant": false, - "legendFormat": "Resident", - "range": true, - "refId": "E" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "builder", - "expr": "reth_jemalloc_retained{instance=~\"$instance\"}", - "hide": false, - "instant": false, - "legendFormat": "Retained", - "range": true, - "refId": "F" } ], - "title": "Jemalloc Memory", + "title": "Errors", "type": "timeseries" }, { @@ -8586,7 +7806,7 @@ "type": "prometheus", "uid": "${datasource}" }, - "description": "", + "description": "The number of connected peers and in-progress requests for headers.", "fieldConfig": { "defaults": { "color": { @@ -8630,25 +7850,25 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", "value": 80 } ] - }, - "unit": "decbytes" + } }, "overrides": [] }, "gridPos": { "h": 8, "w": 12, - "x": 12, - "y": 303 + "x": 0, + "y": 245 }, - "id": 101, + "id": 36, "options": { "legend": { "calcs": [], @@ -8657,34 +7877,60 @@ "showLegend": true }, "tooltip": { - "mode": "single", + "hideZeros": false, + "mode": "multi", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "11.5.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, - "editorMode": "code", - "expr": "reth_process_resident_memory_bytes{instance=~\"$instance\"}", - "instant": false, - "legendFormat": "Resident", + "editorMode": "builder", + "expr": "reth_downloaders_headers_in_flight_requests{job=\"$job\"}", + "legendFormat": "In flight requests", "range": true, "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "builder", + "expr": "reth_network_connected_peers{job=\"$job\"}", + "hide": false, + "legendFormat": "Connected peers", + "range": true, + "refId": "B" } ], - "title": "Memory", + "title": "Requests", "type": "timeseries" }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 253 + }, + "id": 32, + "panels": [], + "repeat": "instance", + "title": "Downloader: Bodies", + "type": "row" + }, { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, - "description": "100% = 1 core", + "description": "The internal state of the headers downloader: the number of downloaded headers, and the number of headers sent to the header stage.", "fieldConfig": { "defaults": { "color": { @@ -8697,7 +7943,6 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, - "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -8736,17 +7981,50 @@ } ] }, - "unit": "percentunit" + "unit": "locale" }, - "overrides": [] + "overrides": [ + { + "matcher": { + "id": "byFrameRefID", + "options": "C" + }, + "properties": [ + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + } + ] + }, + { + "matcher": { + "id": "byFrameRefID", + "options": "D" + }, + "properties": [ + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + } + ] + } + ] }, "gridPos": { "h": 8, "w": 12, "x": 0, - "y": 311 + "y": 254 }, - "id": 99, + "id": 30, "options": { "legend": { "calcs": [], @@ -8755,7 +8033,7 @@ "showLegend": true }, "tooltip": { - "mode": "single", + "mode": "multi", "sort": "none" } }, @@ -8767,22 +8045,93 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "avg(rate(reth_process_cpu_seconds_total{instance=~\"$instance\"}[1m]))", - "instant": false, - "legendFormat": "Process", + "expr": "reth_downloaders_bodies_total_downloaded{job=\"$job\"}", + "legendFormat": "Downloaded", "range": true, "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "reth_downloaders_bodies_total_flushed{job=\"$job\"}", + "hide": false, + "legendFormat": "Flushed", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "builder", + "expr": "rate(reth_downloaders_bodies_total_flushed{job=\"$job\"}[$__rate_interval])", + "hide": false, + "legendFormat": "Flushed/s", + "range": true, + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "rate(reth_downloaders_bodies_total_downloaded{job=\"$job\"}[$__rate_interval])", + "hide": false, + "legendFormat": "Downloaded/s", + "range": true, + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "builder", + "expr": "reth_downloaders_bodies_buffered_responses{job=\"$job\"}", + "hide": false, + "legendFormat": "Buffered responses", + "range": true, + "refId": "E" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "reth_downloaders_bodies_buffered_blocks{job=\"$job\"}", + "hide": false, + "legendFormat": "Buffered blocks", + "range": true, + "refId": "F" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "builder", + "expr": "reth_downloaders_bodies_queued_blocks{job=\"$job\"}", + "hide": false, + "legendFormat": "Queued blocks", + "range": true, + "refId": "G" } ], - "title": "CPU", + "title": "I/O", "type": "timeseries" }, { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, - "description": "", + "description": "Internal errors in the bodies downloader. These are expected to happen from time to time.", "fieldConfig": { "defaults": { "color": { @@ -8795,7 +8144,6 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, - "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -8822,19 +8170,16 @@ } }, "mappings": [], + "min": 0, "thresholds": { "mode": "absolute", "steps": [ { "color": "green" - }, - { - "color": "red", - "value": 80 } ] }, - "unit": "none" + "unit": "cps" }, "overrides": [] }, @@ -8842,9 +8187,9 @@ "h": 8, "w": 12, "x": 12, - "y": 311 + "y": 254 }, - "id": 100, + "id": 28, "options": { "legend": { "calcs": [], @@ -8865,34 +8210,45 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "reth_process_open_fds{instance=~\"$instance\"}", - "instant": false, - "legendFormat": "Open", + "expr": "rate(reth_downloaders_bodies_timeout_errors{job=\"$job\"}[$__rate_interval])", + "legendFormat": "Request timed out", "range": true, "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "rate(reth_downloaders_bodies_unexpected_errors{job=\"$job\"}[$__rate_interval])", + "hide": false, + "legendFormat": "Unexpected error", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "builder", + "expr": "rate(reth_downloaders_bodies_validation_errors{job=\"$job\"}[$__rate_interval])", + "hide": false, + "legendFormat": "Invalid response", + "range": true, + "refId": "C" } ], - "title": "File Descriptors", + "title": "Errors", "type": "timeseries" }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 319 - }, - "id": 105, - "panels": [], - "title": "Pruning", - "type": "row" - }, { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, + "description": "The number of connected peers and in-progress requests for bodies.", "fieldConfig": { "defaults": { "color": { @@ -8905,7 +8261,6 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, - "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -8922,7 +8277,7 @@ "type": "linear" }, "showPoints": "auto", - "spanNulls": true, + "spanNulls": false, "stacking": { "group": "A", "mode": "none" @@ -8932,7 +8287,6 @@ } }, "mappings": [], - "min": 0, "thresholds": { "mode": "absolute", "steps": [ @@ -8944,8 +8298,7 @@ "value": 80 } ] - }, - "unit": "s" + } }, "overrides": [] }, @@ -8953,18 +8306,18 @@ "h": 8, "w": 12, "x": 0, - "y": 320 + "y": 262 }, - "id": 106, + "id": 35, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", - "showLegend": false + "showLegend": true }, "tooltip": { - "mode": "single", + "mode": "multi", "sort": "none" } }, @@ -8975,15 +8328,26 @@ "type": "prometheus", "uid": "${datasource}" }, - "editorMode": "code", - "expr": "rate(reth_pruner_duration_seconds_sum{instance=~\"$instance\"}[$__rate_interval]) / rate(reth_pruner_duration_seconds_count{instance=~\"$instance\"}[$__rate_interval])", - "instant": false, - "legendFormat": "__auto", + "editorMode": "builder", + "expr": "reth_downloaders_bodies_in_flight_requests{job=\"$job\"}", + "legendFormat": "In flight requests", "range": true, "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "reth_network_connected_peers{job=\"$job\"}", + "hide": false, + "legendFormat": "Connected peers", + "range": true, + "refId": "B" } ], - "title": "Pruner duration, total", + "title": "Requests", "type": "timeseries" }, { @@ -8991,6 +8355,7 @@ "type": "prometheus", "uid": "${datasource}" }, + "description": "The number of blocks and size in bytes of those blocks", "fieldConfig": { "defaults": { "color": { @@ -9003,7 +8368,6 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, - "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -9020,7 +8384,7 @@ "type": "linear" }, "showPoints": "auto", - "spanNulls": true, + "spanNulls": false, "stacking": { "group": "A", "mode": "none" @@ -9030,7 +8394,6 @@ } }, "mappings": [], - "min": 0, "thresholds": { "mode": "absolute", "steps": [ @@ -9042,18 +8405,35 @@ "value": 80 } ] - }, - "unit": "s" - }, - "overrides": [] + }, + "unit": "bytes" + }, + "overrides": [ + { + "matcher": { + "id": "byFrameRefID", + "options": "B" + }, + "properties": [ + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "blocks" + } + ] + } + ] }, "gridPos": { "h": 8, "w": 12, "x": 12, - "y": 320 + "y": 262 }, - "id": 107, + "id": 73, "options": { "legend": { "calcs": [], @@ -9062,7 +8442,7 @@ "showLegend": true }, "tooltip": { - "mode": "single", + "mode": "multi", "sort": "none" } }, @@ -9071,24 +8451,37 @@ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, - "editorMode": "code", - "expr": "rate(reth_pruner_segments_duration_seconds_sum{instance=~\"$instance\"}[$__rate_interval]) / rate(reth_pruner_segments_duration_seconds_count{instance=~\"$instance\"}[$__rate_interval])", - "instant": false, - "legendFormat": "{{segment}}", + "editorMode": "builder", + "expr": "reth_downloaders_bodies_buffered_blocks_size_bytes{job=\"$job\"}", + "hide": false, + "legendFormat": "Buffered blocks size ", "range": true, "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "builder", + "expr": "reth_downloaders_bodies_buffered_blocks{job=\"$job\"}", + "hide": false, + "legendFormat": "Buffered blocks", + "range": true, + "refId": "B" } ], - "title": "Pruner duration, per segment", + "title": "Downloader buffer", "type": "timeseries" }, { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, + "description": "The number of blocks in a request and size in bytes of those block responses", "fieldConfig": { "defaults": { "color": { @@ -9101,7 +8494,6 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, - "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -9118,7 +8510,7 @@ "type": "linear" }, "showPoints": "auto", - "spanNulls": true, + "spanNulls": false, "stacking": { "group": "A", "mode": "none" @@ -9140,17 +8532,34 @@ } ] }, - "unit": "none" + "unit": "bytes" }, - "overrides": [] + "overrides": [ + { + "matcher": { + "id": "byFrameRefID", + "options": "B" + }, + "properties": [ + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "blocks" + } + ] + } + ] }, "gridPos": { "h": 8, "w": 12, "x": 0, - "y": 328 + "y": 270 }, - "id": 217, + "id": 102, "options": { "legend": { "calcs": [], @@ -9159,7 +8568,7 @@ "showLegend": true }, "tooltip": { - "mode": "single", + "mode": "multi", "sort": "none" } }, @@ -9171,14 +8580,39 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "reth_pruner_segments_highest_pruned_block{instance=~\"$instance\"}", - "instant": false, - "legendFormat": "{{segment}}", + "expr": "reth_downloaders_bodies_response_response_size_bytes{job=\"$job\"}", + "hide": false, + "legendFormat": "Response size", "range": true, "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "reth_downloaders_bodies_response_response_length{job=\"$job\"}", + "hide": false, + "legendFormat": "Individual response length (number of bodies in response)", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "builder", + "expr": "reth_downloaders_bodies_response_response_size_bytes / reth_downloaders_bodies_response_response_length{job=\"$job\"}", + "hide": false, + "instant": false, + "legendFormat": "Mean body size in response", + "range": true, + "refId": "C" } ], - "title": "Highest pruned block, per segment", + "title": "Block body response sizes", "type": "timeseries" }, { @@ -9187,17 +8621,17 @@ "h": 1, "w": 24, "x": 0, - "y": 336 + "y": 278 }, - "id": 108, + "id": 226, "panels": [], - "title": "RPC server", + "title": "Eth Requests", "type": "row" }, { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, "description": "", "fieldConfig": { @@ -9212,9 +8646,8 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, - "barWidthFactor": 0.6, "drawStyle": "line", - "fillOpacity": 10, + "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, @@ -9284,9 +8717,9 @@ "h": 8, "w": 12, "x": 0, - "y": 337 + "y": 279 }, - "id": 109, + "id": 225, "options": { "legend": { "calcs": [], @@ -9295,6 +8728,7 @@ "showLegend": true }, "tooltip": { + "maxHeight": 600, "mode": "multi", "sort": "none" } @@ -9307,115 +8741,26 @@ "uid": "${datasource}" }, "disableTextWrap": false, - "editorMode": "code", - "expr": "sum(reth_rpc_server_connections_connections_opened_total{instance=~\"$instance\"} - reth_rpc_server_connections_connections_closed_total{instance=~\"$instance\"}) by (transport)", + "editorMode": "builder", + "expr": "rate(reth_network_eth_headers_requests_received_total{job=\"$job\"}[$__rate_interval])", "format": "time_series", "fullMetaSearch": false, "includeNullMetadata": true, - "legendFormat": "{{transport}}", + "legendFormat": "Headers Requests/s", "range": true, "refId": "A", "useBackend": false } ], - "title": "Active Connections", + "title": "Headers Requests Received", "type": "timeseries" }, { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, "description": "", - "fieldConfig": { - "defaults": { - "custom": { - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "scaleDistribution": { - "type": "linear" - } - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 337 - }, - "id": 111, - "maxDataPoints": 25, - "options": { - "calculate": false, - "cellGap": 1, - "cellValues": { - "unit": "s" - }, - "color": { - "exponent": 0.2, - "fill": "dark-orange", - "min": 0, - "mode": "opacity", - "reverse": false, - "scale": "exponential", - "scheme": "Oranges", - "steps": 128 - }, - "exemplars": { - "color": "rgba(255,0,255,0.7)" - }, - "filterValues": { - "le": 1e-9 - }, - "legend": { - "show": true - }, - "rowsFrame": { - "layout": "auto", - "value": "Latency time" - }, - "tooltip": { - "mode": "single", - "showColorScale": false, - "yHistogram": false - }, - "yAxis": { - "axisLabel": "Quantile", - "axisPlacement": "left", - "reverse": false, - "unit": "percentunit" - } - }, - "pluginVersion": "11.2.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "avg(max_over_time(reth_rpc_server_connections_request_time_seconds{instance=~\"$instance\"}[$__rate_interval]) > 0) by (quantile)", - "format": "time_series", - "instant": false, - "legendFormat": "__auto", - "range": true, - "refId": "A" - } - ], - "title": "Request Latency time", - "type": "heatmap" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, "fieldConfig": { "defaults": { "color": { @@ -9428,8 +8773,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "points", + "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { @@ -9444,7 +8788,7 @@ "scaleDistribution": { "type": "linear" }, - "showPoints": "auto", + "showPoints": "never", "spanNulls": false, "stacking": { "group": "A", @@ -9461,148 +8805,89 @@ { "color": "green" }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "s" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 345 - }, - "id": 120, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.4.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "expr": "max(max_over_time(reth_rpc_server_calls_time_seconds{instance=~\"$instance\"}[$__rate_interval])) by (method) > 0", - "instant": false, - "legendFormat": "__auto", - "range": true, - "refId": "A" - } - ], - "title": "Maximum call latency per method", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "", - "fieldConfig": { - "defaults": { - "custom": { - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "http" }, - "scaleDistribution": { - "type": "linear" - } + "properties": [ + { + "id": "displayName", + "value": "HTTP" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "ws" + }, + "properties": [ + { + "id": "displayName", + "value": "WebSocket" + } + ] } - }, - "overrides": [] + ] }, "gridPos": { "h": 8, "w": 12, "x": 12, - "y": 345 + "y": 279 }, - "id": 112, - "maxDataPoints": 25, + "id": 227, "options": { - "calculate": false, - "cellGap": 1, - "cellValues": { - "unit": "s" - }, - "color": { - "exponent": 0.2, - "fill": "dark-orange", - "min": 0, - "mode": "opacity", - "reverse": false, - "scale": "exponential", - "scheme": "Oranges", - "steps": 128 - }, - "exemplars": { - "color": "rgba(255,0,255,0.7)" - }, - "filterValues": { - "le": 1e-9 - }, "legend": { - "show": true - }, - "rowsFrame": { - "layout": "auto", - "value": "Latency time" + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, "tooltip": { - "mode": "single", - "showColorScale": false, - "yHistogram": false - }, - "yAxis": { - "axisLabel": "Quantile", - "axisPlacement": "left", - "reverse": false, - "unit": "percentunit" + "maxHeight": 600, + "mode": "multi", + "sort": "none" } }, - "pluginVersion": "11.2.0", + "pluginVersion": "11.4.0", "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, - "editorMode": "code", - "exemplar": false, - "expr": "avg(max_over_time(reth_rpc_server_calls_time_seconds{instance=~\"$instance\"}[$__rate_interval]) > 0) by (quantile)", + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_network_eth_receipts_requests_received_total{job=\"$job\"}[$__rate_interval])", "format": "time_series", - "instant": false, - "legendFormat": "{{quantile}}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "legendFormat": "Receipts Requests/s", "range": true, - "refId": "A" + "refId": "A", + "useBackend": false } ], - "title": "Call Latency time", - "type": "heatmap" + "title": "Receipts Requests Received", + "type": "timeseries" }, { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, + "description": "", "fieldConfig": { "defaults": { "color": { @@ -9615,7 +8900,6 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, - "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -9631,7 +8915,7 @@ "scaleDistribution": { "type": "linear" }, - "showPoints": "auto", + "showPoints": "never", "spanNulls": false, "stacking": { "group": "A", @@ -9653,42 +8937,31 @@ "value": 80 } ] - } + }, + "unit": "short" }, "overrides": [ { "matcher": { - "id": "byRegexp", - "options": "/.*cached items.*/" - }, - "properties": [ - { - "id": "custom.axisLabel", - "value": "Items" - } - ] - }, - { - "matcher": { - "id": "byRegexp", - "options": "/.*consumers.*/" + "id": "byName", + "options": "http" }, "properties": [ { - "id": "custom.axisLabel", - "value": "Queued consumers" + "id": "displayName", + "value": "HTTP" } ] }, { "matcher": { - "id": "byRegexp", - "options": "/.memory usage*/" + "id": "byName", + "options": "ws" }, "properties": [ { - "id": "unit", - "value": "decbytes" + "id": "displayName", + "value": "WebSocket" } ] } @@ -9698,9 +8971,9 @@ "h": 8, "w": 12, "x": 0, - "y": 353 + "y": 287 }, - "id": 198, + "id": 235, "options": { "legend": { "calcs": [], @@ -9709,7 +8982,8 @@ "showLegend": true }, "tooltip": { - "mode": "single", + "maxHeight": 600, + "mode": "multi", "sort": "none" } }, @@ -9722,143 +8996,25 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_rpc_eth_cache_cached_count{instance=\"$instance\", cache=\"headers\"}", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Headers cache cached items", - "range": true, - "refId": "A", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_rpc_eth_cache_queued_consumers_count{instance=\"$instance\", cache=\"receipts\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Receipts cache queued consumers", - "range": true, - "refId": "B", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_rpc_eth_cache_queued_consumers_count{instance=\"$instance\", cache=\"headers\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Headers cache queued consumers", - "range": true, - "refId": "C", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_rpc_eth_cache_queued_consumers_count{instance=\"$instance\", cache=\"blocks\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Block cache queued consumers", - "range": true, - "refId": "D", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_rpc_eth_cache_memory_usage{instance=\"$instance\", cache=\"blocks\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Blocks cache memory usage", - "range": true, - "refId": "E", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_rpc_eth_cache_cached_count{instance=\"$instance\", cache=\"receipts\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Receipts cache cached items", - "range": true, - "refId": "F", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_rpc_eth_cache_memory_usage{instance=\"$instance\", cache=\"receipts\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Receipts cache memory usage", - "range": true, - "refId": "G", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_rpc_eth_cache_cached_count{instance=\"$instance\", cache=\"blocks\"}", + "expr": "rate(reth_network_eth_bodies_requests_received_total{job=\"$job\"}[$__rate_interval])", + "format": "time_series", "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Block cache cached items", + "includeNullMetadata": true, + "legendFormat": "Bodies Requests/s", "range": true, - "refId": "H", + "refId": "A", "useBackend": false } ], - "title": "RPC Cache Metrics", + "title": "Bodies Requests Received", "type": "timeseries" }, { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, + "description": "", "fieldConfig": { "defaults": { "color": { @@ -9871,7 +9027,6 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, - "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -9881,13 +9036,13 @@ "viz": false }, "insertNulls": false, - "lineInterpolation": "smooth", + "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, - "showPoints": "auto", + "showPoints": "never", "spanNulls": false, "stacking": { "group": "A", @@ -9910,17 +9065,42 @@ } ] }, - "unit": "reqps" + "unit": "short" }, - "overrides": [] + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "http" + }, + "properties": [ + { + "id": "displayName", + "value": "HTTP" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "ws" + }, + "properties": [ + { + "id": "displayName", + "value": "WebSocket" + } + ] + } + ] }, "gridPos": { "h": 8, "w": 12, "x": 12, - "y": 353 + "y": 287 }, - "id": 246, + "id": 234, "options": { "legend": { "calcs": [], @@ -9929,7 +9109,8 @@ "showLegend": true }, "tooltip": { - "mode": "single", + "maxHeight": 600, + "mode": "multi", "sort": "none" } }, @@ -9940,15 +9121,19 @@ "type": "prometheus", "uid": "${datasource}" }, - "editorMode": "code", - "expr": "sum(rate(reth_rpc_server_calls_successful_total{instance =~ \"$instance\"}[$__rate_interval])) by (method) > 0", - "instant": false, - "legendFormat": "{{method}}", + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_network_eth_node_data_requests_received_total{job=\"$job\"}[$__rate_interval])", + "format": "time_series", + "fullMetaSearch": false, + "includeNullMetadata": true, + "legendFormat": "Node Data Requests/s", "range": true, - "refId": "A" + "refId": "A", + "useBackend": false } ], - "title": "RPC Throughput", + "title": "Node Data Requests Received", "type": "timeseries" }, { @@ -9957,19 +9142,20 @@ "h": 1, "w": 24, "x": 0, - "y": 361 + "y": 295 }, - "id": 236, + "id": 68, "panels": [], - "title": "Execution Extensions", + "repeat": "instance", + "title": "Payload Builder", "type": "row" }, { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, - "description": "The total number of canonical state notifications sent to ExExes.", + "description": "Number of active jobs", "fieldConfig": { "defaults": { "color": { @@ -9982,7 +9168,6 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, - "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -9993,7 +9178,7 @@ }, "insertNulls": false, "lineInterpolation": "linear", - "lineWidth": 1, + "lineWidth": 3, "pointSize": 5, "scaleDistribution": { "type": "linear" @@ -10028,9 +9213,9 @@ "h": 8, "w": 12, "x": 0, - "y": 362 + "y": 296 }, - "id": 237, + "id": 60, "options": { "legend": { "calcs": [], @@ -10039,7 +9224,7 @@ "showLegend": true }, "tooltip": { - "mode": "multi", + "mode": "single", "sort": "none" } }, @@ -10051,22 +9236,21 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "reth_exex_notifications_sent_total{instance=~\"$instance\"}", - "hide": false, - "legendFormat": "Total Notifications Sent", + "expr": "reth_payloads_active_jobs{job=\"$job\"}", + "legendFormat": "Active Jobs", "range": true, - "refId": "B" + "refId": "A" } ], - "title": "Total Notifications Sent", + "title": "Active Jobs", "type": "timeseries" }, { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, - "description": "The total number of events ExExes have sent to the manager.", + "description": "Total number of initiated jobs", "fieldConfig": { "defaults": { "color": { @@ -10079,7 +9263,6 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, - "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -10090,7 +9273,7 @@ }, "insertNulls": false, "lineInterpolation": "linear", - "lineWidth": 1, + "lineWidth": 3, "pointSize": 5, "scaleDistribution": { "type": "linear" @@ -10125,9 +9308,9 @@ "h": 8, "w": 12, "x": 12, - "y": 362 + "y": 296 }, - "id": 238, + "id": 62, "options": { "legend": { "calcs": [], @@ -10136,7 +9319,7 @@ "showLegend": true }, "tooltip": { - "mode": "multi", + "mode": "single", "sort": "none" } }, @@ -10148,22 +9331,21 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "reth_exex_events_sent_total{instance=~\"$instance\"}", - "hide": false, - "legendFormat": "Total Events Sent", + "expr": "reth_payloads_initiated_jobs{job=\"$job\"}", + "legendFormat": "Initiated Jobs", "range": true, - "refId": "B" + "refId": "A" } ], - "title": "Total Events Sent", + "title": "Initiated Jobs", "type": "timeseries" }, { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, - "description": "Current and Maximum capacity of the internal state notifications buffer.", + "description": "Total number of failed jobs", "fieldConfig": { "defaults": { "color": { @@ -10176,7 +9358,6 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, - "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -10187,7 +9368,7 @@ }, "insertNulls": false, "lineInterpolation": "linear", - "lineWidth": 1, + "lineWidth": 3, "pointSize": 5, "scaleDistribution": { "type": "linear" @@ -10222,9 +9403,9 @@ "h": 8, "w": 12, "x": 0, - "y": 370 + "y": 304 }, - "id": 239, + "id": 64, "options": { "legend": { "calcs": [], @@ -10233,7 +9414,7 @@ "showLegend": true }, "tooltip": { - "mode": "multi", + "mode": "single", "sort": "none" } }, @@ -10245,34 +9426,33 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "reth_exex_manager_current_capacity{instance=~\"$instance\"}", - "hide": false, - "legendFormat": "Current size", - "range": true, - "refId": "B" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "builder", - "expr": "max_over_time(reth_exex_manager_max_capacity{instance=~\"$instance\"}[1h])", - "hide": false, - "legendFormat": "Max size", + "expr": "reth_payloads_failed_jobs{job=\"$job\"}", + "legendFormat": "Failed Jobs", "range": true, - "refId": "C" + "refId": "A" } ], - "title": "Current and Max Capacity", + "title": "Failed Jobs", "type": "timeseries" }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 312 + }, + "id": 105, + "panels": [], + "title": "Pruning", + "type": "row" + }, { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, - "description": "Current size of the internal state notifications buffer.", "fieldConfig": { "defaults": { "color": { @@ -10285,7 +9465,6 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, - "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -10302,7 +9481,7 @@ "type": "linear" }, "showPoints": "auto", - "spanNulls": false, + "spanNulls": true, "stacking": { "group": "A", "mode": "none" @@ -10312,6 +9491,7 @@ } }, "mappings": [], + "min": 0, "thresholds": { "mode": "absolute", "steps": [ @@ -10323,26 +9503,27 @@ "value": 80 } ] - } + }, + "unit": "s" }, "overrides": [] }, "gridPos": { "h": 8, "w": 12, - "x": 12, - "y": 370 + "x": 0, + "y": 313 }, - "id": 219, + "id": 106, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", - "showLegend": true + "showLegend": false }, "tooltip": { - "mode": "multi", + "mode": "single", "sort": "none" } }, @@ -10353,29 +9534,61 @@ "type": "prometheus", "uid": "${datasource}" }, - "editorMode": "builder", - "expr": "reth_exex_manager_buffer_size{instance=~\"$instance\"}", - "hide": false, - "legendFormat": "Max size", + "editorMode": "code", + "expr": "rate(reth_pruner_duration_seconds_sum{job=\"$job\"}[$__rate_interval]) / rate(reth_pruner_duration_seconds_count{job=\"$job\"}[$__rate_interval])", + "instant": false, + "legendFormat": "__auto", "range": true, - "refId": "B" + "refId": "A" } ], - "title": "Buffer Size", + "title": "Pruner duration, total", "type": "timeseries" }, { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, - "description": "Total number of ExExes installed in the node", "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, "mappings": [], + "min": 0, "thresholds": { "mode": "absolute", "steps": [ @@ -10388,71 +9601,52 @@ } ] }, - "unit": "none" + "unit": "s" }, "overrides": [] }, "gridPos": { "h": 8, "w": 12, - "x": 0, - "y": 378 + "x": 12, + "y": 313 }, - "id": 220, + "id": 107, "options": { - "colorMode": "value", - "graphMode": "none", - "justifyMode": "auto", - "orientation": "auto", - "percentChangeColorMode": "standard", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, - "showPercentChange": false, - "textMode": "auto", - "wideLayout": true + "tooltip": { + "mode": "single", + "sort": "none" + } }, - "pluginVersion": "11.2.0", + "pluginVersion": "11.4.0", "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, - "editorMode": "builder", - "expr": "reth_exex_manager_num_exexs{instance=~\"$instance\"}", - "hide": false, - "legendFormat": "Number of ExExs", + "editorMode": "code", + "expr": "rate(reth_pruner_segments_duration_seconds_sum{job=\"$job\"}[$__rate_interval]) / rate(reth_pruner_segments_duration_seconds_count{job=\"$job\"}[$__rate_interval])", + "instant": false, + "legendFormat": "{{segment}}", "range": true, "refId": "A" } ], - "title": "Number of ExExes", - "type": "stat" - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 386 - }, - "id": 241, - "panels": [], - "title": "Execution Extensions Write-Ahead Log", - "type": "row" + "title": "Pruner duration, per segment", + "type": "timeseries" }, { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, - "description": "", "fieldConfig": { "defaults": { "color": { @@ -10465,7 +9659,6 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, - "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -10482,7 +9675,7 @@ "type": "linear" }, "showPoints": "auto", - "spanNulls": false, + "spanNulls": true, "stacking": { "group": "A", "mode": "none" @@ -10491,7 +9684,6 @@ "mode": "off" } }, - "fieldMinMax": false, "mappings": [], "thresholds": { "mode": "absolute", @@ -10504,7 +9696,8 @@ "value": 80 } ] - } + }, + "unit": "none" }, "overrides": [] }, @@ -10512,9 +9705,9 @@ "h": 8, "w": 12, "x": 0, - "y": 387 + "y": 321 }, - "id": 243, + "id": 217, "options": { "legend": { "calcs": [], @@ -10523,7 +9716,7 @@ "showLegend": true }, "tooltip": { - "mode": "multi", + "mode": "single", "sort": "none" } }, @@ -10535,36 +9728,34 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "reth_exex_wal_lowest_committed_block_height{instance=~\"$instance\"}", - "hide": false, - "instant": false, - "legendFormat": "Lowest Block", - "range": true, - "refId": "B" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "expr": "reth_exex_wal_highest_committed_block_height{instance=~\"$instance\"}", - "hide": false, + "expr": "reth_pruner_segments_highest_pruned_block{job=\"$job\"}", "instant": false, - "legendFormat": "Highest Block", + "legendFormat": "{{segment}}", "range": true, - "refId": "C" + "refId": "A" } ], - "title": "Current Committed Block Heights", + "title": "Highest pruned block, per segment", "type": "timeseries" }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 329 + }, + "id": 97, + "panels": [], + "title": "Process", + "type": "row" + }, { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, - "description": "", "fieldConfig": { "defaults": { "color": { @@ -10577,7 +9768,6 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, - "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -10603,7 +9793,6 @@ "mode": "off" } }, - "fieldMinMax": false, "mappings": [], "thresholds": { "mode": "absolute", @@ -10616,17 +9805,31 @@ "value": 80 } ] - } + }, + "unit": "decbytes" }, - "overrides": [] + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Retained" + }, + "properties": [ + { + "id": "custom.axisPlacement", + "value": "right" + } + ] + } + ] }, "gridPos": { "h": 8, "w": 12, - "x": 12, - "y": 387 + "x": 0, + "y": 330 }, - "id": 244, + "id": 98, "options": { "legend": { "calcs": [], @@ -10635,7 +9838,7 @@ "showLegend": true }, "tooltip": { - "mode": "multi", + "mode": "single", "sort": "none" } }, @@ -10646,29 +9849,80 @@ "type": "prometheus", "uid": "${datasource}" }, - "editorMode": "code", - "expr": "reth_exex_wal_committed_blocks_count{instance=~\"$instance\"}", + "editorMode": "builder", + "expr": "reth_jemalloc_active{job=\"$job\"}", + "instant": false, + "legendFormat": "Active", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "reth_jemalloc_allocated{job=\"$job\"}", + "hide": false, + "instant": false, + "legendFormat": "Allocated", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "builder", + "expr": "reth_jemalloc_mapped{job=\"$job\"}", "hide": false, "instant": false, - "legendFormat": "Committed Blocks", + "legendFormat": "Mapped", "range": true, "refId": "C" }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "reth_jemalloc_metadata{job=\"$job\"}", + "hide": false, + "instant": false, + "legendFormat": "Metadata", + "range": true, + "refId": "D" + }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, - "editorMode": "code", - "expr": "reth_exex_wal_notifications_count{instance=~\"$instance\"}", + "editorMode": "builder", + "expr": "reth_jemalloc_resident{job=\"$job\"}", "hide": false, "instant": false, - "legendFormat": "Notifications", + "legendFormat": "Resident", "range": true, - "refId": "B" + "refId": "E" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "reth_jemalloc_retained{job=\"$job\"}", + "hide": false, + "instant": false, + "legendFormat": "Retained", + "range": true, + "refId": "F" } ], - "title": "Number of entities", + "title": "Jemalloc Memory", "type": "timeseries" }, { @@ -10689,7 +9943,6 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, - "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -10715,7 +9968,6 @@ "mode": "off" } }, - "fieldMinMax": false, "mappings": [], "thresholds": { "mode": "absolute", @@ -10729,26 +9981,26 @@ } ] }, - "unit": "bytes" + "unit": "decbytes" }, "overrides": [] }, "gridPos": { "h": 8, "w": 12, - "x": 0, - "y": 395 + "x": 12, + "y": 330 }, - "id": 245, + "id": 101, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", - "showLegend": false + "showLegend": true }, "tooltip": { - "mode": "multi", + "mode": "single", "sort": "none" } }, @@ -10757,39 +10009,25 @@ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "reth_exex_wal_size_bytes{instance=~\"$instance\"}", - "hide": false, + "expr": "reth_process_resident_memory_bytes{job=\"$job\"}", "instant": false, - "legendFormat": "__auto", + "legendFormat": "Resident", "range": true, - "refId": "C" + "refId": "A" } ], - "title": "Total size of all notifications", + "title": "Memory", "type": "timeseries" }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 403 - }, - "id": 226, - "panels": [], - "title": "Eth Requests", - "type": "row" - }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, - "description": "", + "description": "100% = 1 core", "fieldConfig": { "defaults": { "color": { @@ -10802,7 +10040,6 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, - "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -10818,7 +10055,7 @@ "scaleDistribution": { "type": "linear" }, - "showPoints": "never", + "showPoints": "auto", "spanNulls": false, "stacking": { "group": "A", @@ -10841,42 +10078,17 @@ } ] }, - "unit": "short" + "unit": "percentunit" }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "http" - }, - "properties": [ - { - "id": "displayName", - "value": "HTTP" - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "ws" - }, - "properties": [ - { - "id": "displayName", - "value": "WebSocket" - } - ] - } - ] + "overrides": [] }, "gridPos": { "h": 8, "w": 12, "x": 0, - "y": 404 + "y": 338 }, - "id": 225, + "id": 99, "options": { "legend": { "calcs": [], @@ -10885,8 +10097,7 @@ "showLegend": true }, "tooltip": { - "maxHeight": 600, - "mode": "multi", + "mode": "single", "sort": "none" } }, @@ -10895,21 +10106,17 @@ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, - "disableTextWrap": false, "editorMode": "builder", - "expr": "rate(reth_network_eth_headers_requests_received_total{instance=~\"$instance\"}[$__rate_interval])", - "format": "time_series", - "fullMetaSearch": false, - "includeNullMetadata": true, - "legendFormat": "Headers Requests/s", + "expr": "avg(rate(reth_process_cpu_seconds_total{job=\"$job\"}[1m]))", + "instant": false, + "legendFormat": "Process", "range": true, - "refId": "A", - "useBackend": false + "refId": "A" } ], - "title": "Headers Requests Received", + "title": "CPU", "type": "timeseries" }, { @@ -10930,7 +10137,6 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, - "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -10946,7 +10152,7 @@ "scaleDistribution": { "type": "linear" }, - "showPoints": "never", + "showPoints": "auto", "spanNulls": false, "stacking": { "group": "A", @@ -10969,42 +10175,17 @@ } ] }, - "unit": "short" + "unit": "none" }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "http" - }, - "properties": [ - { - "id": "displayName", - "value": "HTTP" - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "ws" - }, - "properties": [ - { - "id": "displayName", - "value": "WebSocket" - } - ] - } - ] + "overrides": [] }, "gridPos": { "h": 8, "w": 12, "x": 12, - "y": 404 + "y": 338 }, - "id": 227, + "id": 100, "options": { "legend": { "calcs": [], @@ -11013,8 +10194,7 @@ "showLegend": true }, "tooltip": { - "maxHeight": 600, - "mode": "multi", + "mode": "single", "sort": "none" } }, @@ -11023,21 +10203,17 @@ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, - "disableTextWrap": false, "editorMode": "builder", - "expr": "rate(reth_network_eth_receipts_requests_received_total{instance=~\"$instance\"}[$__rate_interval])", - "format": "time_series", - "fullMetaSearch": false, - "includeNullMetadata": true, - "legendFormat": "Receipts Requests/s", + "expr": "reth_process_open_fds{job=\"$job\"}", + "instant": false, + "legendFormat": "Open", "range": true, - "refId": "A", - "useBackend": false + "refId": "A" } ], - "title": "Receipts Requests Received", + "title": "File Descriptors", "type": "timeseries" }, { @@ -11045,7 +10221,7 @@ "type": "prometheus", "uid": "${datasource}" }, - "description": "", + "description": "Tracks the number of critical tasks currently ran by the executor.", "fieldConfig": { "defaults": { "color": { @@ -11058,7 +10234,6 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, - "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -11074,7 +10249,7 @@ "scaleDistribution": { "type": "linear" }, - "showPoints": "never", + "showPoints": "auto", "spanNulls": false, "stacking": { "group": "A", @@ -11092,47 +10267,22 @@ "color": "green" }, { - "color": "red", - "value": 80 - } - ] - }, - "unit": "short" - }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "http" - }, - "properties": [ - { - "id": "displayName", - "value": "HTTP" - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "ws" - }, - "properties": [ - { - "id": "displayName", - "value": "WebSocket" + "color": "semi-dark-red", + "value": 0 } ] - } - ] + }, + "unit": "tasks" + }, + "overrides": [] }, "gridPos": { "h": 8, "w": 12, "x": 0, - "y": 412 + "y": 346 }, - "id": 235, + "id": 248, "options": { "legend": { "calcs": [], @@ -11141,31 +10291,28 @@ "showLegend": true }, "tooltip": { - "maxHeight": 600, - "mode": "multi", + "hideZeros": false, + "mode": "single", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "11.5.3", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rate(reth_network_eth_bodies_requests_received_total{instance=~\"$instance\"}[$__rate_interval])", - "format": "time_series", - "fullMetaSearch": false, - "includeNullMetadata": true, - "legendFormat": "Bodies Requests/s", + "editorMode": "code", + "expr": "reth_executor_spawn_critical_tasks_total{job=\"$job\"}- reth_executor_spawn_finished_critical_tasks_total{job=\"$job\"}", + "hide": false, + "instant": false, + "legendFormat": "Tasks running", "range": true, - "refId": "A", - "useBackend": false + "refId": "C" } ], - "title": "Bodies Requests Received", + "title": "Task Executor critical tasks", "type": "timeseries" }, { @@ -11173,7 +10320,7 @@ "type": "prometheus", "uid": "${datasource}" }, - "description": "", + "description": "Tracks the number of regular tasks currently ran by the executor.", "fieldConfig": { "defaults": { "color": { @@ -11186,7 +10333,6 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, - "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -11202,7 +10348,7 @@ "scaleDistribution": { "type": "linear" }, - "showPoints": "never", + "showPoints": "auto", "spanNulls": false, "stacking": { "group": "A", @@ -11220,35 +10366,23 @@ "color": "green" }, { - "color": "red", + "color": "semi-dark-red", "value": 80 } ] }, - "unit": "short" + "unit": "tasks/s" }, "overrides": [ { "matcher": { - "id": "byName", - "options": "http" - }, - "properties": [ - { - "id": "displayName", - "value": "HTTP" - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "ws" + "id": "byFrameRefID", + "options": "C" }, "properties": [ { - "id": "displayName", - "value": "WebSocket" + "id": "unit", + "value": "tasks" } ] } @@ -11258,9 +10392,9 @@ "h": 8, "w": 12, "x": 12, - "y": 412 + "y": 346 }, - "id": 234, + "id": 247, "options": { "legend": { "calcs": [], @@ -11269,32 +10403,860 @@ "showLegend": true }, "tooltip": { - "maxHeight": 600, - "mode": "multi", + "hideZeros": false, + "mode": "single", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "11.5.3", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "${DS_PROMETHEUS}" }, "disableTextWrap": false, - "editorMode": "builder", - "expr": "rate(reth_network_eth_node_data_requests_received_total{instance=~\"$instance\"}[$__rate_interval])", - "format": "time_series", + "editorMode": "code", + "exemplar": false, + "expr": "rate(reth_executor_spawn_regular_tasks_total{job=\"$job\"}[$__rate_interval])", "fullMetaSearch": false, - "includeNullMetadata": true, - "legendFormat": "Node Data Requests/s", + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Tasks started", "range": true, "refId": "A", "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "reth_executor_spawn_regular_tasks_total{job=\"$job\"} - reth_executor_spawn_finished_regular_tasks_total{job=\"$job\"}", + "hide": false, + "instant": false, + "legendFormat": "Tasks running", + "range": true, + "refId": "C" } ], - "title": "Node Data Requests Received", + "title": "Task Executor regular tasks", "type": "timeseries" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 354 + }, + "id": 236, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The total number of canonical state notifications sent to ExExes.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 370 + }, + "id": 237, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "11.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "builder", + "expr": "reth_exex_notifications_sent_total{job=\"$job\"}", + "hide": false, + "legendFormat": "Total Notifications Sent", + "range": true, + "refId": "B" + } + ], + "title": "Total Notifications Sent", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The total number of events ExExes have sent to the manager.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 370 + }, + "id": 238, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "11.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "builder", + "expr": "reth_exex_events_sent_total{job=\"$job\"}", + "hide": false, + "legendFormat": "Total Events Sent", + "range": true, + "refId": "B" + } + ], + "title": "Total Events Sent", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Current and Maximum capacity of the internal state notifications buffer.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 378 + }, + "id": 239, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "11.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "builder", + "expr": "reth_exex_manager_current_capacity{job=\"$job\"}", + "hide": false, + "legendFormat": "Current size", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "max_over_time(reth_exex_manager_max_capacity{job=\"$job\"}[1h])", + "hide": false, + "legendFormat": "Max size", + "range": true, + "refId": "C" + } + ], + "title": "Current and Max Capacity", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "description": "Current size of the internal state notifications buffer.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 378 + }, + "id": 219, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "11.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "reth_exex_manager_buffer_size{job=\"$job\"}", + "hide": false, + "legendFormat": "Max size", + "range": true, + "refId": "B" + } + ], + "title": "Buffer Size", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "description": "Total number of ExExes installed in the node", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 386 + }, + "id": 220, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": ["lastNotNull"], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "11.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "reth_exex_manager_num_exexs{job=\"$job\"}", + "hide": false, + "legendFormat": "Number of ExExs", + "range": true, + "refId": "A" + } + ], + "title": "Number of ExExes", + "type": "stat" + } + ], + "title": "Execution Extensions", + "type": "row" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 355 + }, + "id": 241, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "fieldMinMax": false, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 371 + }, + "id": 243, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "11.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "reth_exex_wal_lowest_committed_block_height{job=\"$job\"}", + "hide": false, + "instant": false, + "legendFormat": "Lowest Block", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "reth_exex_wal_highest_committed_block_height{job=\"$job\"}", + "hide": false, + "instant": false, + "legendFormat": "Highest Block", + "range": true, + "refId": "C" + } + ], + "title": "Current Committed Block Heights", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "fieldMinMax": false, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 371 + }, + "id": 244, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "11.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "reth_exex_wal_committed_blocks_count{job=\"$job\"}", + "hide": false, + "instant": false, + "legendFormat": "Committed Blocks", + "range": true, + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "reth_exex_wal_notifications_count{job=\"$job\"}", + "hide": false, + "instant": false, + "legendFormat": "Notifications", + "range": true, + "refId": "B" + } + ], + "title": "Number of entities", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "fieldMinMax": false, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 379 + }, + "id": 245, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "11.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "reth_exex_wal_size_bytes{job=\"$job\"}", + "hide": false, + "instant": false, + "legendFormat": "__auto", + "range": true, + "refId": "C" + } + ], + "title": "Total size of all notifications", + "type": "timeseries" + } + ], + "title": "Execution Extensions Write-Ahead Log", + "type": "row" } ], "refresh": "5s", @@ -11322,26 +11284,6 @@ "regex": "/.*job=\\\"([^\\\"]*).*/", "type": "query" }, - { - "current": {}, - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "definition": "query_result(reth_info{job=\"${job}\"})", - "includeAll": false, - "label": "Instance (auto-selected)", - "name": "instance", - "options": [], - "query": { - "qryType": 3, - "query": "query_result(reth_info{job=\"${job}\"})", - "refId": "PrometheusVariableQueryEditor-VariableQuery" - }, - "refresh": 1, - "regex": "/.*instance=\\\"([^\\\"]*).*/", - "type": "query" - }, { "current": {}, "includeAll": false, @@ -11356,13 +11298,13 @@ ] }, "time": { - "from": "now-1h", + "from": "now-12h", "to": "now" }, "timepicker": {}, "timezone": "", "title": "Reth", "uid": "2k8BXz24x", - "version": 2, + "version": 4, "weekStart": "" } diff --git a/etc/lighthouse.yml b/etc/lighthouse.yml index ccd520ffa26..fc76b1fc776 100644 --- a/etc/lighthouse.yml +++ b/etc/lighthouse.yml @@ -1,5 +1,5 @@ -version: "3.9" name: reth + services: lighthouse: restart: unless-stopped @@ -13,11 +13,17 @@ services: - "9000:9000/tcp" # p2p - "9000:9000/udp" # p2p volumes: - - lighthousedata:/root/.lighthouse + - lighthouse_data:/root/.lighthouse - ./jwttoken:/root/jwt:ro # For Sepolia: # - Replace `--network mainnet` with `--network sepolia` - # - Use different checkpoint sync URL: `--checkpoint-sync-url https://sepolia.checkpoint-sync.ethpandaops.io` + # - Use different checkpoint sync URL: `--checkpoint-sync-url https://checkpoint-sync.sepolia.ethpandaops.io` + # For Holesky: + # - Replace `--network mainnet` with `--network holesky` + # - Use different checkpoint sync URL: `--checkpoint-sync-url https://checkpoint-sync.holesky.ethpandaops.io` + # For Hoodi: + # - Replace `--network mainnet` with `--network hoodi` + # - Use different checkpoint sync URL: `--checkpoint-sync-url https://checkpoint-sync.hoodi.ethpandaops.io` command: > lighthouse bn --network mainnet @@ -43,5 +49,5 @@ services: - --metrics-port=9091 volumes: - lighthousedata: + lighthouse_data: driver: local diff --git a/examples/bsc-p2p/src/block_import/mod.rs b/examples/bsc-p2p/src/block_import/mod.rs index ba7820bd327..f5eff8a4316 100644 --- a/examples/bsc-p2p/src/block_import/mod.rs +++ b/examples/bsc-p2p/src/block_import/mod.rs @@ -1,6 +1,7 @@ #![allow(unused)] use handle::ImportHandle; use reth_engine_primitives::EngineTypes; +use reth_eth_wire::NewBlock; use reth_network::import::{BlockImport, BlockImportOutcome, NewBlockEvent}; use reth_network_peers::PeerId; use reth_payload_primitives::{BuiltPayload, PayloadTypes}; @@ -25,8 +26,12 @@ impl BscBlockImport { } } -impl BlockImport> for BscBlockImport { - fn on_new_block(&mut self, peer_id: PeerId, incoming_block: NewBlockEvent>) { +impl BlockImport>> for BscBlockImport { + fn on_new_block( + &mut self, + peer_id: PeerId, + incoming_block: NewBlockEvent>>, + ) { if let NewBlockEvent::Block(block) = incoming_block { let _ = self.handle.send_block(block, peer_id); } diff --git a/examples/bsc-p2p/src/block_import/service.rs b/examples/bsc-p2p/src/block_import/service.rs index e3d05886105..e816aa70660 100644 --- a/examples/bsc-p2p/src/block_import/service.rs +++ b/examples/bsc-p2p/src/block_import/service.rs @@ -3,6 +3,7 @@ use crate::block_import::parlia::{ParliaConsensus, ParliaConsensusErr}; use alloy_rpc_types::engine::{ForkchoiceState, PayloadStatusEnum}; use futures::{future::Either, stream::FuturesUnordered, StreamExt}; use reth_engine_primitives::{BeaconConsensusEngineHandle, EngineTypes}; +use reth_eth_wire::NewBlock; use reth_network::{ import::{BlockImportError, BlockImportEvent, BlockImportOutcome, BlockValidation}, message::NewBlockMessage, @@ -25,13 +26,13 @@ pub type BscBlock = <<::BuiltPayload as BuiltPayload>::Primitives as NodePrimitives>::Block; /// Network message containing a new block -pub(crate) type BlockMsg = NewBlockMessage>; +pub(crate) type BlockMsg = NewBlockMessage>>; /// Import outcome for a block -pub(crate) type Outcome = BlockImportOutcome>; +pub(crate) type Outcome = BlockImportOutcome>>; /// Import event for a block -pub(crate) type ImportEvent = BlockImportEvent>; +pub(crate) type ImportEvent = BlockImportEvent>>; /// Future that processes a block import and returns its outcome type PayloadFut = Pin> + Send + Sync>>; @@ -371,7 +372,7 @@ mod tests { /// Run a block import test with the given event assertion async fn assert_block_import(&mut self, assert_fn: F) where - F: Fn(&BlockImportEvent>) -> bool, + F: Fn(&BlockImportEvent>>) -> bool, { let block_msg = create_test_block(); self.handle.send_block(block_msg, PeerId::random()).unwrap(); @@ -400,7 +401,7 @@ mod tests { } /// Creates a test block message - fn create_test_block() -> NewBlockMessage { + fn create_test_block() -> NewBlockMessage> { let block: reth_primitives::Block = Block::default(); let new_block = NewBlock { block: block.clone(), td: U128::ZERO }; NewBlockMessage { hash: block.header.hash_slow(), block: Arc::new(new_block) } diff --git a/examples/custom-node/Cargo.toml b/examples/custom-node/Cargo.toml index 886f2509fe0..4821ef54f40 100644 --- a/examples/custom-node/Cargo.toml +++ b/examples/custom-node/Cargo.toml @@ -12,7 +12,7 @@ reth-codecs.workspace = true reth-network-peers.workspace = true reth-node-builder.workspace = true reth-optimism-forks.workspace = true -reth-optimism-consensus.workspace = true +reth-db-api.workspace = true reth-op = { workspace = true, features = ["node", "pool"] } reth-payload-builder.workspace = true reth-rpc-api.workspace = true @@ -63,5 +63,6 @@ arbitrary = [ "revm/arbitrary", "reth-ethereum/arbitrary", "alloy-rpc-types-engine/arbitrary", + "reth-db-api/arbitrary", ] default = [] diff --git a/examples/custom-node/src/consensus.rs b/examples/custom-node/src/consensus.rs deleted file mode 100644 index b9a8d2e1636..00000000000 --- a/examples/custom-node/src/consensus.rs +++ /dev/null @@ -1,27 +0,0 @@ -use std::sync::Arc; - -use reth_node_builder::{ - components::ConsensusBuilder, BuilderContext, FullNodeTypes, NodePrimitives, NodeTypes, -}; -use reth_op::DepositReceipt; -use reth_optimism_consensus::OpBeaconConsensus; -use reth_optimism_forks::OpHardforks; - -#[derive(Debug, Default, Clone)] -pub struct CustomConsensusBuilder; - -impl ConsensusBuilder for CustomConsensusBuilder -where - Node: FullNodeTypes< - Types: NodeTypes< - ChainSpec: OpHardforks, - Primitives: NodePrimitives, - >, - >, -{ - type Consensus = Arc::ChainSpec>>; - - async fn build_consensus(self, ctx: &BuilderContext) -> eyre::Result { - Ok(Arc::new(OpBeaconConsensus::new(ctx.chain_spec()))) - } -} diff --git a/examples/custom-node/src/engine_api.rs b/examples/custom-node/src/engine_api.rs index 69ff05171e4..0484be19d45 100644 --- a/examples/custom-node/src/engine_api.rs +++ b/examples/custom-node/src/engine_api.rs @@ -15,7 +15,7 @@ use reth_ethereum::node::api::{ NodeTypes, }; use reth_node_builder::rpc::EngineApiBuilder; -use reth_op::node::node::OpStorage; +use reth_op::node::OpStorage; use reth_payload_builder::PayloadStore; use reth_rpc_api::IntoEngineApiRpcModule; use reth_rpc_engine_api::EngineApiError; diff --git a/examples/custom-node/src/evm/builder.rs b/examples/custom-node/src/evm/builder.rs index 1fe25243752..fe7e7cf7113 100644 --- a/examples/custom-node/src/evm/builder.rs +++ b/examples/custom-node/src/evm/builder.rs @@ -3,6 +3,8 @@ use reth_ethereum::node::api::FullNodeTypes; use reth_node_builder::{components::ExecutorBuilder, BuilderContext, NodeTypes}; use std::{future, future::Future}; +#[derive(Debug, Clone, Default)] +#[non_exhaustive] pub struct CustomExecutorBuilder; impl ExecutorBuilder for CustomExecutorBuilder diff --git a/examples/custom-node/src/evm/mod.rs b/examples/custom-node/src/evm/mod.rs index 656a67ae2ae..7e4ac45c325 100644 --- a/examples/custom-node/src/evm/mod.rs +++ b/examples/custom-node/src/evm/mod.rs @@ -7,6 +7,7 @@ mod executor; pub use alloy::{CustomContext, CustomEvm}; pub use assembler::CustomBlockAssembler; +pub use builder::CustomExecutorBuilder; pub use config::CustomEvmConfig; pub use env::{CustomTxEnv, PaymentTxEnv}; pub use executor::CustomBlockExecutor; diff --git a/examples/custom-node/src/lib.rs b/examples/custom-node/src/lib.rs index e6a2eab8612..a4511e204e8 100644 --- a/examples/custom-node/src/lib.rs +++ b/examples/custom-node/src/lib.rs @@ -7,21 +7,25 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] +use crate::{ + evm::CustomExecutorBuilder, pool::CustomPooledTransaction, primitives::CustomTransaction, +}; use chainspec::CustomChainSpec; -use consensus::CustomConsensusBuilder; -use engine::CustomPayloadTypes; -use pool::CustomPoolBuilder; use primitives::CustomNodePrimitives; use reth_ethereum::node::api::{FullNodeTypes, NodeTypes}; -use reth_node_builder::{components::ComponentsBuilder, Node, NodeComponentsBuilder}; -use reth_op::node::{node::OpStorage, OpNode}; +use reth_node_builder::{ + components::{BasicPayloadServiceBuilder, ComponentsBuilder}, + Node, +}; +use reth_op::node::{ + node::{OpConsensusBuilder, OpNetworkBuilder, OpPayloadBuilder, OpPoolBuilder}, + txpool, OpNode, OpPayloadTypes, +}; pub mod chainspec; -pub mod consensus; pub mod engine; pub mod engine_api; pub mod evm; -pub mod network; pub mod pool; pub mod primitives; @@ -33,32 +37,32 @@ impl NodeTypes for CustomNode { type ChainSpec = CustomChainSpec; type StateCommitment = ::StateCommitment; type Storage = ::Storage; - type Payload = CustomPayloadTypes; + type Payload = OpPayloadTypes; } impl Node for CustomNode where - N: FullNodeTypes< - Types: NodeTypes< - Payload = CustomPayloadTypes, - ChainSpec = CustomChainSpec, - Primitives = CustomNodePrimitives, - Storage = OpStorage, - >, - >, - ComponentsBuilder: - NodeComponentsBuilder, + N: FullNodeTypes, { - type ComponentsBuilder = - ComponentsBuilder; + type ComponentsBuilder = ComponentsBuilder< + N, + OpPoolBuilder>, + BasicPayloadServiceBuilder, + OpNetworkBuilder, + CustomExecutorBuilder, + OpConsensusBuilder, + >; type AddOns = (); fn components_builder(&self) -> Self::ComponentsBuilder { ComponentsBuilder::default() .node_types::() - .pool(CustomPoolBuilder::default()) - .consensus(CustomConsensusBuilder) + .pool(OpPoolBuilder::default()) + .executor(CustomExecutorBuilder::default()) + .payload(BasicPayloadServiceBuilder::new(OpPayloadBuilder::new(false))) + .network(OpNetworkBuilder::new(false, false)) + .consensus(OpConsensusBuilder::default()) } fn add_ons(&self) -> Self::AddOns {} diff --git a/examples/custom-node/src/network.rs b/examples/custom-node/src/network.rs deleted file mode 100644 index 23752f495b6..00000000000 --- a/examples/custom-node/src/network.rs +++ /dev/null @@ -1,96 +0,0 @@ -use crate::{ - chainspec::CustomChainSpec, - primitives::{ - CustomHeader, CustomNodePrimitives, CustomTransaction, CustomTransactionEnvelope, - }, -}; -use alloy_consensus::{Block, BlockBody}; -use eyre::Result; -use op_alloy_consensus::OpPooledTransaction; -use reth_ethereum::{ - chainspec::{EthChainSpec, Hardforks}, - network::{NetworkConfig, NetworkHandle, NetworkManager, NetworkPrimitives}, - node::api::{FullNodeTypes, NodeTypes, TxTy}, - pool::{PoolTransaction, TransactionPool}, -}; -use reth_node_builder::{components::NetworkBuilder, BuilderContext}; -use reth_op::{primitives::Extended, OpReceipt}; - -#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Hash)] -#[non_exhaustive] -pub struct CustomNetworkPrimitives; - -impl NetworkPrimitives for CustomNetworkPrimitives { - type BlockHeader = CustomHeader; - type BlockBody = BlockBody; - type Block = Block; - type BroadcastedTransaction = CustomTransaction; - type PooledTransaction = Extended; - type Receipt = OpReceipt; -} - -#[derive(Default)] -pub struct CustomNetworkBuilder {} - -impl CustomNetworkBuilder { - fn network_config( - &self, - ctx: &BuilderContext, - ) -> eyre::Result::Provider, CustomNetworkPrimitives>> - where - Node: FullNodeTypes>, - { - let args = &ctx.config().network; - let network_builder = ctx - .network_config_builder()? - // apply discovery settings - .apply(|mut builder| { - let rlpx_socket = (args.addr, args.port).into(); - if args.discovery.disable_discovery { - builder = builder.disable_discv4_discovery(); - } - if !args.discovery.disable_discovery { - builder = builder.discovery_v5( - args.discovery.discovery_v5_builder( - rlpx_socket, - ctx.config() - .network - .resolved_bootnodes() - .or_else(|| ctx.chain_spec().bootnodes()) - .unwrap_or_default(), - ), - ); - } - - builder - }); - - let network_config = ctx.build_network_config(network_builder); - - Ok(network_config) - } -} - -impl NetworkBuilder for CustomNetworkBuilder -where - Node: FullNodeTypes< - Types: NodeTypes, - >, - Pool: TransactionPool< - Transaction: PoolTransaction< - Consensus = TxTy, - Pooled = Extended, - >, - > + Unpin - + 'static, -{ - type Network = NetworkHandle; - - async fn build_network(self, ctx: &BuilderContext, pool: Pool) -> Result { - let network_config = self.network_config(ctx)?; - let network = NetworkManager::builder(network_config).await?; - let handle = ctx.start_network(network, pool); - - Ok(handle) - } -} diff --git a/examples/custom-node/src/pool.rs b/examples/custom-node/src/pool.rs index ccc426257a5..8fda09d7129 100644 --- a/examples/custom-node/src/pool.rs +++ b/examples/custom-node/src/pool.rs @@ -1,211 +1,5 @@ -// use jsonrpsee::tracing::{debug, info}; use crate::primitives::CustomTransactionEnvelope; -use op_alloy_consensus::{interop::SafetyLevel, OpTxEnvelope}; -use reth_chain_state::CanonStateSubscriptions; -use reth_node_builder::{ - components::{PoolBuilder, PoolBuilderConfigOverrides}, - node::{FullNodeTypes, NodeTypes}, - BuilderContext, NodePrimitives, -}; -use reth_op::{ - node::txpool::{ - supervisor::{SupervisorClient, DEFAULT_SUPERVISOR_URL}, - OpPooledTransaction, OpPooledTx, OpTransactionPool, OpTransactionValidator, - }, - pool::{ - blobstore::DiskFileBlobStore, CoinbaseTipOrdering, EthPoolTransaction, - TransactionValidationTaskExecutor, - }, - primitives::Extended, -}; -use reth_optimism_forks::OpHardforks; +use op_alloy_consensus::OpPooledTransaction; +use reth_ethereum::primitives::Extended; -#[derive(Debug, Clone)] -pub struct CustomPoolBuilder< - T = OpPooledTransaction>, -> { - /// Enforced overrides that are applied to the pool config. - pub pool_config_overrides: PoolBuilderConfigOverrides, - /// Enable transaction conditionals. - pub enable_tx_conditional: bool, - /// Supervisor client url - pub supervisor_http: String, - /// Supervisor safety level - pub supervisor_safety_level: SafetyLevel, - /// Marker for the pooled transaction type. - _pd: core::marker::PhantomData, -} - -impl Default for CustomPoolBuilder { - fn default() -> Self { - Self { - pool_config_overrides: Default::default(), - enable_tx_conditional: false, - supervisor_http: DEFAULT_SUPERVISOR_URL.to_string(), - supervisor_safety_level: SafetyLevel::CrossUnsafe, - _pd: Default::default(), - } - } -} - -impl CustomPoolBuilder { - /// Sets the enable_tx_conditional flag on the pool builder. - pub const fn with_enable_tx_conditional(mut self, enable_tx_conditional: bool) -> Self { - self.enable_tx_conditional = enable_tx_conditional; - self - } - - /// Sets the [PoolBuilderConfigOverrides] on the pool builder. - pub fn with_pool_config_overrides( - mut self, - pool_config_overrides: PoolBuilderConfigOverrides, - ) -> Self { - self.pool_config_overrides = pool_config_overrides; - self - } - - /// Sets the supervisor client - pub fn with_supervisor( - mut self, - supervisor_client: String, - supervisor_safety_level: SafetyLevel, - ) -> Self { - self.supervisor_http = supervisor_client; - self.supervisor_safety_level = supervisor_safety_level; - self - } -} - -impl PoolBuilder for CustomPoolBuilder -where - Node: FullNodeTypes>, - ::Primitives: - NodePrimitives>, - T: EthPoolTransaction> - + OpPooledTx, -{ - type Pool = OpTransactionPool; - - async fn build_pool(self, ctx: &BuilderContext) -> eyre::Result { - let Self { pool_config_overrides, .. } = self; - let data_dir = ctx.config().datadir(); - let blob_store = DiskFileBlobStore::open(data_dir.blobstore(), Default::default())?; - // supervisor used for interop - if ctx.chain_spec().is_interop_active_at_timestamp(ctx.head().timestamp) && - self.supervisor_http == DEFAULT_SUPERVISOR_URL - { - // info!(target: "reth::cli", - // url=%DEFAULT_SUPERVISOR_URL, - // "Default supervisor url is used, consider changing --rollup.supervisor-http." - // ); - } - let supervisor_client = SupervisorClient::builder(self.supervisor_http.clone()) - .minimum_safety(self.supervisor_safety_level) - .build() - .await; - - let validator = TransactionValidationTaskExecutor::eth_builder(ctx.provider().clone()) - .no_eip4844() - .with_head_timestamp(ctx.head().timestamp) - .kzg_settings(ctx.kzg_settings()?) - .set_tx_fee_cap(ctx.config().rpc.rpc_tx_fee_cap) - .with_additional_tasks( - pool_config_overrides - .additional_validation_tasks - .unwrap_or_else(|| ctx.config().txpool.additional_validation_tasks), - ) - .build_with_tasks(ctx.task_executor().clone(), blob_store.clone()) - .map(|validator| { - OpTransactionValidator::new(validator) - // In --dev mode we can't require gas fees because we're unable to decode - // the L1 block info - .require_l1_data_gas_fee(!ctx.config().dev.dev) - .with_supervisor(supervisor_client.clone()) - }); - - let transaction_pool = reth_ethereum::pool::Pool::new( - validator, - CoinbaseTipOrdering::default(), - blob_store, - pool_config_overrides.apply(ctx.pool_config()), - ); - // info!(target: "reth::cli", "Transaction pool initialized";); - - // spawn txpool maintenance tasks - { - let pool = transaction_pool.clone(); - let chain_events = ctx.provider().canonical_state_stream(); - let client = ctx.provider().clone(); - if !ctx.config().txpool.disable_transactions_backup { - // Use configured backup path or default to data dir - let transactions_path = ctx - .config() - .txpool - .transactions_backup_path - .clone() - .unwrap_or_else(|| data_dir.txpool_transactions()); - - let transactions_backup_config = - reth_ethereum::pool::maintain::LocalTransactionBackupConfig::with_local_txs_backup(transactions_path); - - ctx.task_executor().spawn_critical_with_graceful_shutdown_signal( - "local transactions backup task", - |shutdown| { - reth_ethereum::pool::maintain::backup_local_transactions_task( - shutdown, - pool.clone(), - transactions_backup_config, - ) - }, - ); - } - - // spawn the main maintenance task - ctx.task_executor().spawn_critical( - "txpool maintenance task", - reth_ethereum::pool::maintain::maintain_transaction_pool_future( - client, - pool.clone(), - chain_events, - ctx.task_executor().clone(), - reth_ethereum::pool::maintain::MaintainPoolConfig { - max_tx_lifetime: pool.config().max_queued_lifetime, - no_local_exemptions: transaction_pool - .config() - .local_transactions_config - .no_exemptions, - ..Default::default() - }, - ), - ); - // debug!(target: "reth::cli", "Spawned txpool maintenance task"); - - // spawn the Op txpool maintenance task - let chain_events = ctx.provider().canonical_state_stream(); - ctx.task_executor().spawn_critical( - "Op txpool interop maintenance task", - reth_op::node::txpool::maintain::maintain_transaction_pool_interop_future( - pool.clone(), - chain_events, - supervisor_client, - ), - ); - // debug!(target: "reth::cli", "Spawned Op interop txpool maintenance task"); - - if self.enable_tx_conditional { - // spawn the Op txpool maintenance task - let chain_events = ctx.provider().canonical_state_stream(); - ctx.task_executor().spawn_critical( - "Op txpool conditional maintenance task", - reth_op::node::txpool::maintain::maintain_transaction_pool_conditional_future( - pool, - chain_events, - ), - ); - // debug!(target: "reth::cli", "Spawned Op conditional txpool maintenance task"); - } - } - - Ok(transaction_pool) - } -} +pub type CustomPooledTransaction = Extended; diff --git a/examples/custom-node/src/primitives/header.rs b/examples/custom-node/src/primitives/header.rs index 3c66e2a4fb6..884c9c4cb1c 100644 --- a/examples/custom-node/src/primitives/header.rs +++ b/examples/custom-node/src/primitives/header.rs @@ -167,6 +167,21 @@ impl reth_codecs::Compact for CustomHeader { } } +impl reth_db_api::table::Compress for CustomHeader { + type Compressed = Vec; + + fn compress_to_buf>(&self, buf: &mut B) { + let _ = Compact::to_compact(self, buf); + } +} + +impl reth_db_api::table::Decompress for CustomHeader { + fn decompress(value: &[u8]) -> Result { + let (obj, _) = Compact::from_compact(value, value.len()); + Ok(obj) + } +} + impl BlockHeader for CustomHeader {} impl BlockHeaderMut for CustomHeader { diff --git a/examples/custom-node/src/primitives/tx.rs b/examples/custom-node/src/primitives/tx.rs index 4b1f1e8824c..ce365b2c405 100644 --- a/examples/custom-node/src/primitives/tx.rs +++ b/examples/custom-node/src/primitives/tx.rs @@ -16,7 +16,10 @@ use reth_codecs::{ Compact, }; use reth_ethereum::primitives::{serde_bincode_compat::SerdeBincodeCompat, InMemorySize}; -use reth_op::primitives::{Extended, SignedTransaction}; +use reth_op::{ + primitives::{Extended, SignedTransaction}, + OpTransaction, +}; use revm_primitives::{Address, Bytes}; use serde::{Deserialize, Serialize}; @@ -235,3 +238,9 @@ impl Compact for CustomTransactionEnvelope { (CustomTransactionEnvelope { inner: signed }, buf) } } + +impl OpTransaction for CustomTransactionEnvelope { + fn is_deposit(&self) -> bool { + false + } +} diff --git a/examples/db-access/src/main.rs b/examples/db-access/src/main.rs index 679e2b780bc..f5ada05ce2b 100644 --- a/examples/db-access/src/main.rs +++ b/examples/db-access/src/main.rs @@ -149,14 +149,6 @@ fn block_provider_example>( .find_block_by_hash(sealed_block.hash(), BlockSource::Any)? .ok_or(eyre::eyre!("block hash not found"))?; assert_eq!(block, block_by_hash3); - - // Can query the block's ommers/uncles - let _ommers = provider.ommers(number.into())?; - - // Can query the block's withdrawals (via the `WithdrawalsProvider`) - let _withdrawals = - provider.withdrawals_by_block(sealed_block.hash().into(), sealed_block.timestamp)?; - Ok(()) } diff --git a/examples/engine-api-access/Cargo.toml b/examples/engine-api-access/Cargo.toml new file mode 100644 index 00000000000..9f969135d8b --- /dev/null +++ b/examples/engine-api-access/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "example-engine-api-access" +version = "0.0.0" +publish = false +edition.workspace = true +license.workspace = true + +[dependencies] +# reth +reth-db = { workspace = true, features = ["op", "test-utils"] } +reth-node-builder.workspace = true +reth-optimism-consensus.workspace = true +reth-tasks.workspace = true +reth-node-api.workspace = true +reth-rpc-api.workspace = true +reth-tracing.workspace = true +reth-provider.workspace = true +reth-optimism-node.workspace = true +reth-optimism-chainspec.workspace = true + +# alloy +alloy-rpc-types-engine.workspace = true + +async-trait.workspace = true +clap = { workspace = true, features = ["derive"] } +eyre.workspace = true +jsonrpsee.workspace = true +futures.workspace = true +serde_json.workspace = true +tokio = { workspace = true, features = ["sync"] } diff --git a/examples/engine-api-access/src/main.rs b/examples/engine-api-access/src/main.rs new file mode 100644 index 00000000000..492074a7b8e --- /dev/null +++ b/examples/engine-api-access/src/main.rs @@ -0,0 +1,49 @@ +//! Example demonstrating how to access the Engine API instance during construction. +//! +//! Run with +//! +//! ```sh +//! cargo run -p example-engine-api-access +//! ``` + +use reth_db::test_utils::create_test_rw_db; +use reth_node_builder::{EngineApiExt, FullNodeComponents, NodeBuilder, NodeConfig}; +use reth_optimism_chainspec::BASE_MAINNET; +use reth_optimism_node::{ + args::RollupArgs, + node::{OpAddOns, OpEngineValidatorBuilder}, + OpEngineApiBuilder, OpNode, +}; +use tokio::sync::oneshot; + +#[tokio::main] +async fn main() { + // Op node configuration and setup + let config = NodeConfig::new(BASE_MAINNET.clone()); + let db = create_test_rw_db(); + let args = RollupArgs::default(); + let op_node = OpNode::new(args); + + let (engine_api_tx, _engine_api_rx) = oneshot::channel(); + + let engine_api = + EngineApiExt::new(OpEngineApiBuilder::::default(), move |api| { + let _ = engine_api_tx.send(api); + }); + + let _builder = NodeBuilder::new(config) + .with_database(db) + .with_types::() + .with_components(op_node.components()) + .with_add_ons(OpAddOns::default().with_engine_api(engine_api)) + .on_component_initialized(move |ctx| { + let _provider = ctx.provider(); + Ok(()) + }) + .on_node_started(|_full_node| Ok(())) + .on_rpc_started(|_ctx, handles| { + let _client = handles.rpc.http_client(); + Ok(()) + }) + .check_launch(); +} diff --git a/fork.yaml b/fork.yaml index e4d9238f2ee..8d9fa3c9906 100644 --- a/fork.yaml +++ b/fork.yaml @@ -4,7 +4,7 @@ footer: | base: name: reth url: https://github.com/paradigmxyz/reth - hash: 3338c5a31901967e81a9b965ea45f22adc128d47 + hash: fork: name: scroll-reth url: https://github.com/scroll-tech/reth diff --git a/testing/ef-tests/src/cases/blockchain_test.rs b/testing/ef-tests/src/cases/blockchain_test.rs index 0a79f3fbe27..1cf905ff2d1 100644 --- a/testing/ef-tests/src/cases/blockchain_test.rs +++ b/testing/ef-tests/src/cases/blockchain_test.rs @@ -12,7 +12,7 @@ use reth_db_common::init::{insert_genesis_hashes, insert_genesis_history, insert use reth_ethereum_consensus::{validate_block_post_execution, EthBeaconConsensus}; use reth_ethereum_primitives::Block; use reth_evm::{execute::Executor, ConfigureEvm}; -use reth_evm_ethereum::{execute::EthExecutorProvider, EthEvmConfig}; +use reth_evm_ethereum::EthEvmConfig; use reth_primitives_traits::{RecoveredBlock, SealedBlock}; use reth_provider::{ test_utils::create_test_provider_factory_with_chain_spec, BlockWriter, DatabaseProviderFactory, @@ -212,7 +212,7 @@ fn run_case(case: &BlockchainTest) -> Result<(), Error> { // Decode blocks let blocks = decode_blocks(&case.blocks)?; - let executor_provider = EthExecutorProvider::ethereum(chain_spec.clone()); + let executor_provider = EthEvmConfig::ethereum(chain_spec.clone()); let mut parent = genesis_block; let mut program_inputs = Vec::new();